summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/freescale
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/freescale')
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/Makefile4
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c12
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c68
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h10
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c40
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c189
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c492
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c3394
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h246
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpkg.h5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.h24
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h162
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h537
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw.c1581
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpsw.h755
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig9
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c1340
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h129
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c82
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c40
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h16
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.c155
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.h20
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c229
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c21
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c17
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c7
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c9
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c5
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c178
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h17
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c5
39 files changed, 9057 insertions, 772 deletions
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 3f9175bdce77..2d1abdd58fab 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -27,6 +27,7 @@ config FEC
default ARCH_MXC || SOC_IMX28 if ARM
select CRC32
select PHYLIB
+ imply NET_SELFTESTS
imply PTP_1588_CLOCK
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 67c436400352..de7b31842233 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -24,6 +24,4 @@ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
-obj-$(CONFIG_FSL_ENETC) += enetc/
-obj-$(CONFIG_FSL_ENETC_MDIO) += enetc/
-obj-$(CONFIG_FSL_ENETC_VF) += enetc/
+obj-y += enetc/
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 720dc99bd1fc..177c020bf34a 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -3081,7 +3081,7 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
struct xdp_frame **frames, u32 flags)
{
struct xdp_frame *xdpf;
- int i, err, drops = 0;
+ int i, nxmit = 0;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
@@ -3091,14 +3091,12 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
for (i = 0; i < n; i++) {
xdpf = frames[i];
- err = dpaa_xdp_xmit_frame(net_dev, xdpf);
- if (err) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ if (dpaa_xdp_xmit_frame(net_dev, xdpf))
+ break;
+ nxmit++;
}
- return n - drops;
+ return nxmit;
}
static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
index ee7a906e30b3..d029b69c3f18 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -29,3 +29,11 @@ config FSL_DPAA2_PTP_CLOCK
help
This driver adds support for using the DPAA2 1588 timer module
as a PTP clock.
+
+config FSL_DPAA2_SWITCH
+ tristate "Freescale DPAA2 Ethernet Switch"
+ depends on BRIDGE || BRIDGE=n
+ depends on NET_SWITCHDEV
+ help
+ Driver for Freescale DPAA2 Ethernet Switch. This driver manages
+ switch objects discovered on the Freeescale MC bus.
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
index 146cb3540e61..c2ef74052ef8 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Makefile
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -5,11 +5,13 @@
obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
+obj-$(CONFIG_FSL_DPAA2_SWITCH) += fsl-dpaa2-switch.o
fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o
fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
+fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o dpaa2-switch-flower.o
# Needed by the tracing framework
CFLAGS_dpaa2-eth.o := -I$(src)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 492943bb9c48..e0c3c58e2ac7 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -223,31 +223,31 @@ static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
}
}
-static void dpaa2_eth_xdp_release_buf(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- dma_addr_t addr)
+static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ dma_addr_t addr)
{
int retries = 0;
int err;
- ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
- if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
+ ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr;
+ if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
return;
while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
- ch->xdp.drop_bufs,
- ch->xdp.drop_cnt)) == -EBUSY) {
+ ch->recycled_bufs,
+ ch->recycled_bufs_cnt)) == -EBUSY) {
if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
break;
cpu_relax();
}
if (err) {
- dpaa2_eth_free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
- ch->buf_count -= ch->xdp.drop_cnt;
+ dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt);
+ ch->buf_count -= ch->recycled_bufs_cnt;
}
- ch->xdp.drop_cnt = 0;
+ ch->recycled_bufs_cnt = 0;
}
static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
@@ -300,7 +300,7 @@ static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
ch->stats.xdp_tx++;
}
for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
- dpaa2_eth_xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
+ dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
percpu_stats->tx_errors++;
ch->stats.xdp_tx_err++;
}
@@ -382,7 +382,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
fallthrough;
case XDP_DROP:
- dpaa2_eth_xdp_release_buf(priv, ch, addr);
+ dpaa2_eth_recycle_buf(priv, ch, addr);
ch->stats.xdp_drop++;
break;
case XDP_REDIRECT:
@@ -403,7 +403,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
free_pages((unsigned long)vaddr, 0);
} else {
ch->buf_count++;
- dpaa2_eth_xdp_release_buf(priv, ch, addr);
+ dpaa2_eth_recycle_buf(priv, ch, addr);
}
ch->stats.xdp_drop++;
} else {
@@ -418,6 +418,35 @@ out:
return xdp_act;
}
+static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ void *fd_vaddr)
+{
+ u16 fd_offset = dpaa2_fd_get_offset(fd);
+ struct dpaa2_eth_priv *priv = ch->priv;
+ u32 fd_length = dpaa2_fd_get_len(fd);
+ struct sk_buff *skb = NULL;
+ unsigned int skb_len;
+
+ if (fd_length > priv->rx_copybreak)
+ return NULL;
+
+ skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
+
+ skb = napi_alloc_skb(&ch->napi, skb_len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, dpaa2_eth_needed_headroom(NULL));
+ skb_put(skb, fd_length);
+
+ memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
+
+ dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
+
+ return skb;
+}
+
/* Main Rx frame processing routine */
static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch,
@@ -459,9 +488,12 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
return;
}
- dma_unmap_page(dev, addr, priv->rx_buf_size,
- DMA_BIDIRECTIONAL);
- skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
+ skb = dpaa2_eth_copybreak(ch, fd, vaddr);
+ if (!skb) {
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
+ }
} else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog);
@@ -2431,8 +2463,6 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
percpu_stats->tx_packets += enqueued;
for (i = 0; i < enqueued; i++)
percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
- for (i = enqueued; i < n; i++)
- xdp_return_frame_rx_napi(frames[i]);
return enqueued;
}
@@ -4304,6 +4334,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
skb_queue_head_init(&priv->tx_skbs);
+ priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
+
/* Obtain a MC portal */
err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
&priv->mc_io);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 9b6a89709ce1..cdb623d5f2c1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -438,8 +438,6 @@ struct dpaa2_eth_fq {
struct dpaa2_eth_ch_xdp {
struct bpf_prog *prog;
- u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD];
- int drop_cnt;
unsigned int res;
};
@@ -457,6 +455,10 @@ struct dpaa2_eth_channel {
struct dpaa2_eth_ch_xdp xdp;
struct xdp_rxq_info xdp_rxq;
struct list_head *rx_list;
+
+ /* Buffers to be recycled back in the buffer pool */
+ u64 recycled_bufs[DPAA2_ETH_BUFS_PER_CMD];
+ int recycled_bufs_cnt;
};
struct dpaa2_eth_dist_fields {
@@ -487,6 +489,8 @@ struct dpaa2_eth_trap_data {
struct dpaa2_eth_priv *priv;
};
+#define DPAA2_ETH_DEFAULT_COPYBREAK 512
+
/* Driver private data */
struct dpaa2_eth_priv {
struct net_device *net_dev;
@@ -567,6 +571,8 @@ struct dpaa2_eth_priv {
struct devlink *devlink;
struct dpaa2_eth_trap_data *trap_data;
struct devlink_port devlink_port;
+
+ u32 rx_copybreak;
};
struct dpaa2_eth_devlink_priv {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index bf59708b869e..ad5e374eeccf 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -782,6 +782,44 @@ static int dpaa2_eth_get_ts_info(struct net_device *dev,
return 0;
}
+static int dpaa2_eth_get_tunable(struct net_device *net_dev,
+ const struct ethtool_tunable *tuna,
+ void *data)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = priv->rx_copybreak;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int dpaa2_eth_set_tunable(struct net_device *net_dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ priv->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
const struct ethtool_ops dpaa2_ethtool_ops = {
.get_drvinfo = dpaa2_eth_get_drvinfo,
.nway_reset = dpaa2_eth_nway_reset,
@@ -796,4 +834,6 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_rxnfc = dpaa2_eth_get_rxnfc,
.set_rxnfc = dpaa2_eth_set_rxnfc,
.get_ts_info = dpaa2_eth_get_ts_info,
+ .get_tunable = dpaa2_eth_get_tunable,
+ .set_tunable = dpaa2_eth_set_tunable,
};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
new file mode 100644
index 000000000000..70e04321c420
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DPAA2 Ethernet Switch ethtool support
+ *
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ *
+ */
+
+#include <linux/ethtool.h>
+
+#include "dpaa2-switch.h"
+
+static struct {
+ enum dpsw_counter id;
+ char name[ETH_GSTRING_LEN];
+} dpaa2_switch_ethtool_counters[] = {
+ {DPSW_CNT_ING_FRAME, "rx frames"},
+ {DPSW_CNT_ING_BYTE, "rx bytes"},
+ {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
+ {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
+ {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
+ {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
+ {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
+ {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
+ {DPSW_CNT_EGR_FRAME, "tx frames"},
+ {DPSW_CNT_EGR_BYTE, "tx bytes"},
+ {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
+ {DPSW_CNT_ING_NO_BUFF_DISCARD, "rx discarded no buffer frames"},
+};
+
+#define DPAA2_SWITCH_NUM_COUNTERS ARRAY_SIZE(dpaa2_switch_ethtool_counters)
+
+static void dpaa2_switch_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ u16 version_major, version_minor;
+ int err;
+
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+
+ err = dpsw_get_api_version(port_priv->ethsw_data->mc_io, 0,
+ &version_major,
+ &version_minor);
+ if (err)
+ strscpy(drvinfo->fw_version, "N/A",
+ sizeof(drvinfo->fw_version));
+ else
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u", version_major, version_minor);
+
+ strscpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
+ sizeof(drvinfo->bus_info));
+}
+
+static int
+dpaa2_switch_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpsw_link_state state = {0};
+ int err = 0;
+
+ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ &state);
+ if (err) {
+ netdev_err(netdev, "ERROR %d getting link state\n", err);
+ goto out;
+ }
+
+ /* At the moment, we have no way of interrogating the DPMAC
+ * from the DPSW side or there may not exist a DPMAC at all.
+ * Report only autoneg state, duplexity and speed.
+ */
+ if (state.options & DPSW_LINK_OPT_AUTONEG)
+ link_ksettings->base.autoneg = AUTONEG_ENABLE;
+ if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX))
+ link_ksettings->base.duplex = DUPLEX_FULL;
+ link_ksettings->base.speed = state.rate;
+
+out:
+ return err;
+}
+
+static int
+dpaa2_switch_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *link_ksettings)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpsw_link_cfg cfg = {0};
+ bool if_running;
+ int err = 0, ret;
+
+ /* Interface needs to be down to change link settings */
+ if_running = netif_running(netdev);
+ if (if_running) {
+ err = dpsw_if_disable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ }
+ }
+
+ cfg.rate = link_ksettings->base.speed;
+ if (link_ksettings->base.autoneg == AUTONEG_ENABLE)
+ cfg.options |= DPSW_LINK_OPT_AUTONEG;
+ else
+ cfg.options &= ~DPSW_LINK_OPT_AUTONEG;
+ if (link_ksettings->base.duplex == DUPLEX_HALF)
+ cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX;
+ else
+ cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX;
+
+ err = dpsw_if_set_link_cfg(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ &cfg);
+
+ if (if_running) {
+ ret = dpsw_if_enable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (ret) {
+ netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
+ return ret;
+ }
+ }
+ return err;
+}
+
+static int dpaa2_switch_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return DPAA2_SWITCH_NUM_COUNTERS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ dpaa2_switch_ethtool_counters[i].name,
+ ETH_GSTRING_LEN);
+ break;
+ }
+}
+
+static void dpaa2_switch_ethtool_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int i, err;
+
+ for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++) {
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ dpaa2_switch_ethtool_counters[i].id,
+ &data[i]);
+ if (err)
+ netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
+ dpaa2_switch_ethtool_counters[i].name, err);
+ }
+}
+
+const struct ethtool_ops dpaa2_switch_port_ethtool_ops = {
+ .get_drvinfo = dpaa2_switch_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = dpaa2_switch_get_link_ksettings,
+ .set_link_ksettings = dpaa2_switch_set_link_ksettings,
+ .get_strings = dpaa2_switch_ethtool_get_strings,
+ .get_ethtool_stats = dpaa2_switch_ethtool_get_stats,
+ .get_sset_count = dpaa2_switch_ethtool_get_sset_count,
+};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
new file mode 100644
index 000000000000..f9451ec5f2cb
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DPAA2 Ethernet Switch flower support
+ *
+ * Copyright 2021 NXP
+ *
+ */
+
+#include "dpaa2-switch.h"
+
+static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
+ struct dpsw_acl_key *acl_key)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct dpsw_acl_fields *acl_h, *acl_m;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_IP) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ acl_h = &acl_key->match;
+ acl_m = &acl_key->mask;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ acl_h->l3_protocol = match.key->ip_proto;
+ acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto);
+ acl_m->l3_protocol = match.mask->ip_proto;
+ acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]);
+ ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]);
+ ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]);
+ ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ acl_h->l2_vlan_id = match.key->vlan_id;
+ acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid);
+ acl_h->l2_pcp_dei = match.key->vlan_priority << 1 |
+ match.key->vlan_dei;
+
+ acl_m->l2_vlan_id = match.mask->vlan_id;
+ acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid);
+ acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 |
+ match.mask->vlan_dei;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ acl_h->l3_source_ip = be32_to_cpu(match.key->src);
+ acl_h->l3_dest_ip = be32_to_cpu(match.key->dst);
+ acl_m->l3_source_ip = be32_to_cpu(match.mask->src);
+ acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ acl_h->l4_source_port = be16_to_cpu(match.key->src);
+ acl_h->l4_dest_port = be16_to_cpu(match.key->dst);
+ acl_m->l4_source_port = be16_to_cpu(match.mask->src);
+ acl_m->l4_dest_port = be16_to_cpu(match.mask->dst);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ if (match.mask->ttl != 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on TTL not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if ((match.mask->tos & 0x3) != 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on ECN not supported, only DSCP");
+ return -EOPNOTSUPP;
+ }
+
+ acl_h->l3_dscp = match.key->tos >> 2;
+ acl_m->l3_dscp = match.mask->tos >> 2;
+ }
+
+ return 0;
+}
+
+int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
+ struct ethsw_core *ethsw = acl_tbl->ethsw;
+ struct dpsw_acl_key *acl_key = &entry->key;
+ struct device *dev = ethsw->dev;
+ u8 *cmd_buff;
+ int err;
+
+ cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
+ if (!cmd_buff)
+ return -ENOMEM;
+
+ dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
+
+ acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
+ dev_err(dev, "DMA mapping failed\n");
+ return -EFAULT;
+ }
+
+ err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ acl_tbl->id, acl_entry_cfg);
+
+ dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
+ DMA_TO_DEVICE);
+ if (err) {
+ dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
+ return err;
+ }
+
+ kfree(cmd_buff);
+
+ return 0;
+}
+
+static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
+ struct dpsw_acl_key *acl_key = &entry->key;
+ struct ethsw_core *ethsw = acl_tbl->ethsw;
+ struct device *dev = ethsw->dev;
+ u8 *cmd_buff;
+ int err;
+
+ cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
+ if (!cmd_buff)
+ return -ENOMEM;
+
+ dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
+
+ acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
+ DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
+ dev_err(dev, "DMA mapping failed\n");
+ return -EFAULT;
+ }
+
+ err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ acl_tbl->id, acl_entry_cfg);
+
+ dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
+ DMA_TO_DEVICE);
+ if (err) {
+ dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
+ return err;
+ }
+
+ kfree(cmd_buff);
+
+ return 0;
+}
+
+static int
+dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpaa2_switch_acl_entry *tmp;
+ struct list_head *pos, *n;
+ int index = 0;
+
+ if (list_empty(&acl_tbl->entries)) {
+ list_add(&entry->list, &acl_tbl->entries);
+ return index;
+ }
+
+ list_for_each_safe(pos, n, &acl_tbl->entries) {
+ tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
+ if (entry->prio < tmp->prio)
+ break;
+ index++;
+ }
+ list_add(&entry->list, pos->prev);
+ return index;
+}
+
+static struct dpaa2_switch_acl_entry*
+dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl,
+ int index)
+{
+ struct dpaa2_switch_acl_entry *tmp;
+ int i = 0;
+
+ list_for_each_entry(tmp, &acl_tbl->entries, list) {
+ if (i == index)
+ return tmp;
+ ++i;
+ }
+
+ return NULL;
+}
+
+static int
+dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct dpaa2_switch_acl_entry *entry,
+ int precedence)
+{
+ int err;
+
+ err = dpaa2_switch_acl_entry_remove(acl_tbl, entry);
+ if (err)
+ return err;
+
+ entry->cfg.precedence = precedence;
+ return dpaa2_switch_acl_entry_add(acl_tbl, entry);
+}
+
+static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpaa2_switch_acl_entry *tmp;
+ int index, i, precedence, err;
+
+ /* Add the new ACL entry to the linked list and get its index */
+ index = dpaa2_switch_acl_entry_add_to_list(acl_tbl, entry);
+
+ /* Move up in priority the ACL entries to make space
+ * for the new filter.
+ */
+ precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - acl_tbl->num_rules - 1;
+ for (i = 0; i < index; i++) {
+ tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i);
+
+ err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp,
+ precedence);
+ if (err)
+ return err;
+
+ precedence++;
+ }
+
+ /* Add the new entry to hardware */
+ entry->cfg.precedence = precedence;
+ err = dpaa2_switch_acl_entry_add(acl_tbl, entry);
+ acl_tbl->num_rules++;
+
+ return err;
+}
+
+static struct dpaa2_switch_acl_entry *
+dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl,
+ unsigned long cookie)
+{
+ struct dpaa2_switch_acl_entry *tmp, *n;
+
+ list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) {
+ if (tmp->cookie == cookie)
+ return tmp;
+ }
+ return NULL;
+}
+
+static int
+dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpaa2_switch_acl_entry *tmp, *n;
+ int index = 0;
+
+ list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) {
+ if (tmp->cookie == entry->cookie)
+ return index;
+ index++;
+ }
+ return -ENOENT;
+}
+
+static int
+dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct dpaa2_switch_acl_entry *entry)
+{
+ struct dpaa2_switch_acl_entry *tmp;
+ int index, i, precedence, err;
+
+ index = dpaa2_switch_acl_entry_get_index(acl_tbl, entry);
+
+ /* Remove from hardware the ACL entry */
+ err = dpaa2_switch_acl_entry_remove(acl_tbl, entry);
+ if (err)
+ return err;
+
+ acl_tbl->num_rules--;
+
+ /* Remove it from the list also */
+ list_del(&entry->list);
+
+ /* Move down in priority the entries over the deleted one */
+ precedence = entry->cfg.precedence;
+ for (i = index - 1; i >= 0; i--) {
+ tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i);
+ err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp,
+ precedence);
+ if (err)
+ return err;
+
+ precedence--;
+ }
+
+ kfree(entry);
+
+ return 0;
+}
+
+static int dpaa2_switch_tc_parse_action(struct ethsw_core *ethsw,
+ struct flow_action_entry *cls_act,
+ struct dpsw_acl_result *dpsw_act,
+ struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ switch (cls_act->id) {
+ case FLOW_ACTION_TRAP:
+ dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
+ break;
+ case FLOW_ACTION_REDIRECT:
+ if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not a DPAA2 switch port");
+ return -EOPNOTSUPP;
+ }
+
+ dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
+ dpsw_act->action = DPSW_ACL_ACTION_REDIRECT;
+ break;
+ case FLOW_ACTION_DROP:
+ dpsw_act->action = DPSW_ACL_ACTION_DROP;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Action not supported");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct ethsw_core *ethsw = acl_tbl->ethsw;
+ struct dpaa2_switch_acl_entry *acl_entry;
+ struct flow_action_entry *act;
+ int err;
+
+ if (!flow_offload_has_one_action(&rule->action)) {
+ NL_SET_ERR_MSG(extack, "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) {
+ NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
+ return -ENOMEM;
+ }
+
+ acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
+ if (!acl_entry)
+ return -ENOMEM;
+
+ err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key);
+ if (err)
+ goto free_acl_entry;
+
+ act = &rule->action.entries[0];
+ err = dpaa2_switch_tc_parse_action(ethsw, act,
+ &acl_entry->cfg.result, extack);
+ if (err)
+ goto free_acl_entry;
+
+ acl_entry->prio = cls->common.prio;
+ acl_entry->cookie = cls->cookie;
+
+ err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry);
+ if (err)
+ goto free_acl_entry;
+
+ return 0;
+
+free_acl_entry:
+ kfree(acl_entry);
+
+ return err;
+}
+
+int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct flow_cls_offload *cls)
+{
+ struct dpaa2_switch_acl_entry *entry;
+
+ entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie);
+ if (!entry)
+ return 0;
+
+ return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry);
+}
+
+int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct ethsw_core *ethsw = acl_tbl->ethsw;
+ struct dpaa2_switch_acl_entry *acl_entry;
+ struct flow_action_entry *act;
+ int err;
+
+ if (!flow_offload_has_one_action(&cls->rule->action)) {
+ NL_SET_ERR_MSG(extack, "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) {
+ NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
+ return -ENOMEM;
+ }
+
+ acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
+ if (!acl_entry)
+ return -ENOMEM;
+
+ act = &cls->rule->action.entries[0];
+ err = dpaa2_switch_tc_parse_action(ethsw, act,
+ &acl_entry->cfg.result, extack);
+ if (err)
+ goto free_acl_entry;
+
+ acl_entry->prio = cls->common.prio;
+ acl_entry->cookie = cls->cookie;
+
+ err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry);
+ if (err)
+ goto free_acl_entry;
+
+ return 0;
+
+free_acl_entry:
+ kfree(acl_entry);
+
+ return err;
+}
+
+int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct dpaa2_switch_acl_entry *entry;
+
+ entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie);
+ if (!entry)
+ return 0;
+
+ return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
new file mode 100644
index 000000000000..05de37c3b64c
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -0,0 +1,3394 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DPAA2 Ethernet Switch driver
+ *
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#include <linux/module.h>
+
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/kthread.h>
+#include <linux/workqueue.h>
+#include <linux/iommu.h>
+#include <net/pkt_cls.h>
+
+#include <linux/fsl/mc.h>
+
+#include "dpaa2-switch.h"
+
+/* Minimal supported DPSW version */
+#define DPSW_MIN_VER_MAJOR 8
+#define DPSW_MIN_VER_MINOR 9
+
+#define DEFAULT_VLAN_ID 1
+
+static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv)
+{
+ return port_priv->fdb->fdb_id;
+}
+
+static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+ if (!ethsw->fdbs[i].in_use)
+ return &ethsw->fdbs[i];
+ return NULL;
+}
+
+static struct dpaa2_switch_acl_tbl *
+dpaa2_switch_acl_tbl_get_unused(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+ if (!ethsw->acls[i].in_use)
+ return &ethsw->acls[i];
+ return NULL;
+}
+
+static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv,
+ struct net_device *bridge_dev)
+{
+ struct ethsw_port_priv *other_port_priv = NULL;
+ struct dpaa2_switch_fdb *fdb;
+ struct net_device *other_dev;
+ struct list_head *iter;
+
+ /* If we leave a bridge (bridge_dev is NULL), find an unused
+ * FDB and use that.
+ */
+ if (!bridge_dev) {
+ fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data);
+
+ /* If there is no unused FDB, we must be the last port that
+ * leaves the last bridge, all the others are standalone. We
+ * can just keep the FDB that we already have.
+ */
+
+ if (!fdb) {
+ port_priv->fdb->bridge_dev = NULL;
+ return 0;
+ }
+
+ port_priv->fdb = fdb;
+ port_priv->fdb->in_use = true;
+ port_priv->fdb->bridge_dev = NULL;
+ return 0;
+ }
+
+ /* The below call to netdev_for_each_lower_dev() demands the RTNL lock
+ * being held. Assert on it so that it's easier to catch new code
+ * paths that reach this point without the RTNL lock.
+ */
+ ASSERT_RTNL();
+
+ /* If part of a bridge, use the FDB of the first dpaa2 switch interface
+ * to be present in that bridge
+ */
+ netdev_for_each_lower_dev(bridge_dev, other_dev, iter) {
+ if (!dpaa2_switch_port_dev_check(other_dev))
+ continue;
+
+ if (other_dev == port_priv->netdev)
+ continue;
+
+ other_port_priv = netdev_priv(other_dev);
+ break;
+ }
+
+ /* The current port is about to change its FDB to the one used by the
+ * first port that joined the bridge.
+ */
+ if (other_port_priv) {
+ /* The previous FDB is about to become unused, since the
+ * interface is no longer standalone.
+ */
+ port_priv->fdb->in_use = false;
+ port_priv->fdb->bridge_dev = NULL;
+
+ /* Get a reference to the new FDB */
+ port_priv->fdb = other_port_priv->fdb;
+ }
+
+ /* Keep track of the new upper bridge device */
+ port_priv->fdb->bridge_dev = bridge_dev;
+
+ return 0;
+}
+
+static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id,
+ enum dpsw_flood_type type,
+ struct dpsw_egress_flood_cfg *cfg)
+{
+ int i = 0, j;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ /* Add all the DPAA2 switch ports found in the same bridging domain to
+ * the egress flooding domain
+ */
+ for (j = 0; j < ethsw->sw_attr.num_ifs; j++) {
+ if (!ethsw->ports[j])
+ continue;
+ if (ethsw->ports[j]->fdb->fdb_id != fdb_id)
+ continue;
+
+ if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood)
+ cfg->if_id[i++] = ethsw->ports[j]->idx;
+ else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood)
+ cfg->if_id[i++] = ethsw->ports[j]->idx;
+ }
+
+ /* Add the CTRL interface to the egress flooding domain */
+ cfg->if_id[i++] = ethsw->sw_attr.num_ifs;
+
+ cfg->fdb_id = fdb_id;
+ cfg->flood_type = type;
+ cfg->num_ifs = i;
+}
+
+static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id)
+{
+ struct dpsw_egress_flood_cfg flood_cfg;
+ int err;
+
+ /* Setup broadcast flooding domain */
+ dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg);
+ err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &flood_cfg);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
+ return err;
+ }
+
+ /* Setup unknown flooding domain */
+ dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg);
+ err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &flood_cfg);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
+ dma_addr_t iova_addr)
+{
+ phys_addr_t phys_addr;
+
+ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
+
+ return phys_to_virt(phys_addr);
+}
+
+static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpsw_vlan_cfg vcfg = {0};
+ int err;
+
+ vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_vlan_add(ethsw->mc_io, 0,
+ ethsw->dpsw_handle, vid, &vcfg);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
+ return err;
+ }
+ ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
+
+ return 0;
+}
+
+static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv)
+{
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_link_state state;
+ int err;
+
+ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx, &state);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
+ return true;
+ }
+
+ WARN_ONCE(state.up > 1, "Garbage read into link_state");
+
+ return state.up ? true : false;
+}
+
+static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_tci_cfg tci_cfg = { 0 };
+ bool up;
+ int err, ret;
+
+ err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &tci_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
+ return err;
+ }
+
+ tci_cfg.vlan_id = pvid;
+
+ /* Interface needs to be down to change PVID */
+ up = dpaa2_switch_port_is_up(port_priv);
+ if (up) {
+ err = dpsw_if_disable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ }
+ }
+
+ err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &tci_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
+ goto set_tci_error;
+ }
+
+ /* Delete previous PVID info and mark the new one */
+ port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
+ port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
+ port_priv->pvid = pvid;
+
+set_tci_error:
+ if (up) {
+ ret = dpsw_if_enable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (ret) {
+ netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
+ return ret;
+ }
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv,
+ u16 vid, u16 flags)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_vlan_if_cfg vcfg = {0};
+ int err;
+
+ if (port_priv->vlans[vid]) {
+ netdev_warn(netdev, "VLAN %d already configured\n", vid);
+ return -EEXIST;
+ }
+
+ /* If hit, this VLAN rule will lead the packet into the FDB table
+ * specified in the vlan configuration below
+ */
+ vcfg.num_ifs = 1;
+ vcfg.if_id[0] = port_priv->idx;
+ vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID;
+ err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
+ return err;
+ }
+
+ port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
+
+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
+ err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ vid, &vcfg);
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_vlan_add_if_untagged err %d\n", err);
+ return err;
+ }
+ port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
+ }
+
+ if (flags & BRIDGE_VLAN_INFO_PVID) {
+ err = dpaa2_switch_port_set_pvid(port_priv, vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state)
+{
+ switch (state) {
+ case BR_STATE_DISABLED:
+ return DPSW_STP_STATE_DISABLED;
+ case BR_STATE_LISTENING:
+ return DPSW_STP_STATE_LISTENING;
+ case BR_STATE_LEARNING:
+ return DPSW_STP_STATE_LEARNING;
+ case BR_STATE_FORWARDING:
+ return DPSW_STP_STATE_FORWARDING;
+ case BR_STATE_BLOCKING:
+ return DPSW_STP_STATE_BLOCKING;
+ default:
+ return DPSW_STP_STATE_DISABLED;
+ }
+}
+
+static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
+{
+ struct dpsw_stp_cfg stp_cfg = {0};
+ int err;
+ u16 vid;
+
+ if (!netif_running(port_priv->netdev) || state == port_priv->stp_state)
+ return 0; /* Nothing to do */
+
+ stp_cfg.state = br_stp_state_to_dpsw(state);
+ for (vid = 0; vid <= VLAN_VID_MASK; vid++) {
+ if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
+ stp_cfg.vlan_id = vid;
+ err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx, &stp_cfg);
+ if (err) {
+ netdev_err(port_priv->netdev,
+ "dpsw_if_set_stp err %d\n", err);
+ return err;
+ }
+ }
+ }
+
+ port_priv->stp_state = state;
+
+ return 0;
+}
+
+static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
+{
+ struct ethsw_port_priv *ppriv_local = NULL;
+ int i, err;
+
+ if (!ethsw->vlans[vid])
+ return -ENOENT;
+
+ err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
+ return err;
+ }
+ ethsw->vlans[vid] = 0;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ ppriv_local = ethsw->ports[i];
+ ppriv_local->vlans[vid] = 0;
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_unicast_cfg entry = {0};
+ u16 fdb_id;
+ int err;
+
+ entry.if_egress = port_priv->idx;
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ ether_addr_copy(entry.mac_addr, addr);
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ fdb_id, &entry);
+ if (err)
+ netdev_err(port_priv->netdev,
+ "dpsw_fdb_add_unicast err %d\n", err);
+ return err;
+}
+
+static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_unicast_cfg entry = {0};
+ u16 fdb_id;
+ int err;
+
+ entry.if_egress = port_priv->idx;
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ ether_addr_copy(entry.mac_addr, addr);
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ fdb_id, &entry);
+ /* Silently discard error for calling multiple times the del command */
+ if (err && err != -ENXIO)
+ netdev_err(port_priv->netdev,
+ "dpsw_fdb_remove_unicast err %d\n", err);
+ return err;
+}
+
+static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_multicast_cfg entry = {0};
+ u16 fdb_id;
+ int err;
+
+ ether_addr_copy(entry.mac_addr, addr);
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ entry.num_ifs = 1;
+ entry.if_id[0] = port_priv->idx;
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ fdb_id, &entry);
+ /* Silently discard error for calling multiple times the add command */
+ if (err && err != -ENXIO)
+ netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
+ err);
+ return err;
+}
+
+static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_multicast_cfg entry = {0};
+ u16 fdb_id;
+ int err;
+
+ ether_addr_copy(entry.mac_addr, addr);
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ entry.num_ifs = 1;
+ entry.if_id[0] = port_priv->idx;
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ fdb_id, &entry);
+ /* Silently discard error for calling multiple times the del command */
+ if (err && err != -ENAVAIL)
+ netdev_err(port_priv->netdev,
+ "dpsw_fdb_remove_multicast err %d\n", err);
+ return err;
+}
+
+static void dpaa2_switch_port_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ u64 tmp;
+ int err;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_FRAME, &stats->rx_packets);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_EGR_FRAME, &stats->tx_packets);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_BYTE, &stats->rx_bytes);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_FRAME_DISCARD,
+ &stats->rx_dropped);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_FLTR_FRAME,
+ &tmp);
+ if (err)
+ goto error;
+ stats->rx_dropped += tmp;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_EGR_FRAME_DISCARD,
+ &stats->tx_dropped);
+ if (err)
+ goto error;
+
+ return;
+
+error:
+ netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
+}
+
+static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev,
+ int attr_id)
+{
+ return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
+}
+
+static int dpaa2_switch_port_get_offload_stats(int attr_id,
+ const struct net_device *netdev,
+ void *sp)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ dpaa2_switch_port_get_stats((struct net_device *)netdev, sp);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
+ 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ (u16)ETHSW_L2_MAX_FRM(mtu));
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_if_set_max_frame_length() err %d\n", err);
+ return err;
+ }
+
+ netdev->mtu = mtu;
+ return 0;
+}
+
+static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpsw_link_state state;
+ int err;
+
+ /* Interrupts are received even though no one issued an 'ifconfig up'
+ * on the switch interface. Ignore these link state update interrupts
+ */
+ if (!netif_running(netdev))
+ return 0;
+
+ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx, &state);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
+ return err;
+ }
+
+ WARN_ONCE(state.up > 1, "Garbage read into link_state");
+
+ if (state.up != port_priv->link_state) {
+ if (state.up) {
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+ } else {
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+ port_priv->link_state = state.up;
+ }
+
+ return 0;
+}
+
+/* Manage all NAPI instances for the control interface.
+ *
+ * We only have one RX queue and one Tx Conf queue for all
+ * switch ports. Therefore, we only need to enable the NAPI instance once, the
+ * first time one of the switch ports runs .dev_open().
+ */
+
+static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw)
+{
+ int i;
+
+ /* Access to the ethsw->napi_users relies on the RTNL lock */
+ ASSERT_RTNL();
+
+ /* a new interface is using the NAPI instance */
+ ethsw->napi_users++;
+
+ /* if there is already a user of the instance, return */
+ if (ethsw->napi_users > 1)
+ return;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ napi_enable(&ethsw->fq[i].napi);
+}
+
+static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw)
+{
+ int i;
+
+ /* Access to the ethsw->napi_users relies on the RTNL lock */
+ ASSERT_RTNL();
+
+ /* If we are not the last interface using the NAPI, return */
+ ethsw->napi_users--;
+ if (ethsw->napi_users)
+ return;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ napi_disable(&ethsw->fq[i].napi);
+}
+
+static int dpaa2_switch_port_open(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int err;
+
+ /* Explicitly set carrier off, otherwise
+ * netif_carrier_ok() will return true and cause 'ip link show'
+ * to report the LOWER_UP flag, even though the link
+ * notification wasn't even received.
+ */
+ netif_carrier_off(netdev);
+
+ err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_enable err %d\n", err);
+ return err;
+ }
+
+ /* sync carrier state */
+ err = dpaa2_switch_port_carrier_state_sync(netdev);
+ if (err) {
+ netdev_err(netdev,
+ "dpaa2_switch_port_carrier_state_sync err %d\n", err);
+ goto err_carrier_sync;
+ }
+
+ dpaa2_switch_enable_ctrl_if_napi(ethsw);
+
+ return 0;
+
+err_carrier_sync:
+ dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx);
+ return err;
+}
+
+static int dpaa2_switch_port_stop(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int err;
+
+ err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ }
+
+ dpaa2_switch_disable_ctrl_if_napi(ethsw);
+
+ return 0;
+}
+
+static int dpaa2_switch_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(dev);
+
+ ppid->id_len = 1;
+ ppid->id[0] = port_priv->ethsw_data->dev_id;
+
+ return 0;
+}
+
+static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name,
+ size_t len)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ err = snprintf(name, len, "p%d", port_priv->idx);
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+struct ethsw_dump_ctx {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ int idx;
+};
+
+static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
+ struct ethsw_dump_ctx *dump)
+{
+ int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
+ u32 portid = NETLINK_CB(dump->cb->skb).portid;
+ u32 seq = dump->cb->nlh->nlmsg_seq;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ if (dump->idx < dump->cb->args[2])
+ goto skip;
+
+ nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
+ sizeof(*ndm), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = NTF_SELF;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = dump->dev->ifindex;
+ ndm->ndm_state = is_dynamic ? NUD_REACHABLE : NUD_NOARP;
+
+ if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr))
+ goto nla_put_failure;
+
+ nlmsg_end(dump->skb, nlh);
+
+skip:
+ dump->idx++;
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(dump->skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry,
+ struct ethsw_port_priv *port_priv)
+{
+ int idx = port_priv->idx;
+ int valid;
+
+ if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
+ valid = entry->if_info == port_priv->idx;
+ else
+ valid = entry->if_mask[idx / 8] & BIT(idx % 8);
+
+ return valid;
+}
+
+static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv,
+ dpaa2_switch_fdb_cb_t cb, void *data)
+{
+ struct net_device *net_dev = port_priv->netdev;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct device *dev = net_dev->dev.parent;
+ struct fdb_dump_entry *fdb_entries;
+ struct fdb_dump_entry fdb_entry;
+ dma_addr_t fdb_dump_iova;
+ u16 num_fdb_entries;
+ u32 fdb_dump_size;
+ int err = 0, i;
+ u8 *dma_mem;
+ u16 fdb_id;
+
+ fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry);
+ dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL);
+ if (!dma_mem)
+ return -ENOMEM;
+
+ fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, fdb_dump_iova)) {
+ netdev_err(net_dev, "dma_map_single() failed\n");
+ err = -ENOMEM;
+ goto err_map;
+ }
+
+ fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
+ err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id,
+ fdb_dump_iova, fdb_dump_size, &num_fdb_entries);
+ if (err) {
+ netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err);
+ goto err_dump;
+ }
+
+ dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE);
+
+ fdb_entries = (struct fdb_dump_entry *)dma_mem;
+ for (i = 0; i < num_fdb_entries; i++) {
+ fdb_entry = fdb_entries[i];
+
+ err = cb(port_priv, &fdb_entry, data);
+ if (err)
+ goto end;
+ }
+
+end:
+ kfree(dma_mem);
+
+ return 0;
+
+err_dump:
+ dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE);
+err_map:
+ kfree(dma_mem);
+ return err;
+}
+
+static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv,
+ struct fdb_dump_entry *fdb_entry,
+ void *data)
+{
+ if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
+ return 0;
+
+ return dpaa2_switch_fdb_dump_nl(fdb_entry, data);
+}
+
+static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *net_dev,
+ struct net_device *filter_dev, int *idx)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
+ struct ethsw_dump_ctx dump = {
+ .dev = net_dev,
+ .skb = skb,
+ .cb = cb,
+ .idx = *idx,
+ };
+ int err;
+
+ err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump);
+ *idx = dump.idx;
+
+ return err;
+}
+
+static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv,
+ struct fdb_dump_entry *fdb_entry,
+ void *data __always_unused)
+{
+ if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
+ return 0;
+
+ if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC))
+ return 0;
+
+ if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
+ dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr);
+ else
+ dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr);
+
+ return 0;
+}
+
+static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv)
+{
+ dpaa2_switch_fdb_iterate(port_priv,
+ dpaa2_switch_fdb_entry_fast_age, NULL);
+}
+
+static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct switchdev_obj_port_vlan vlan = {
+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+ .vid = vid,
+ .obj.orig_dev = netdev,
+ /* This API only allows programming tagged, non-PVID VIDs */
+ .flags = 0,
+ };
+
+ return dpaa2_switch_port_vlans_add(netdev, &vlan);
+}
+
+static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct switchdev_obj_port_vlan vlan = {
+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+ .vid = vid,
+ .obj.orig_dev = netdev,
+ /* This API only allows programming tagged, non-PVID VIDs */
+ .flags = 0,
+ };
+
+ return dpaa2_switch_port_vlans_del(netdev, &vlan);
+}
+
+static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *net_dev = port_priv->netdev;
+ struct device *dev = net_dev->dev.parent;
+ u8 mac_addr[ETH_ALEN];
+ int err;
+
+ if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR))
+ return 0;
+
+ /* Get firmware address, if any */
+ err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, mac_addr);
+ if (err) {
+ dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n");
+ return err;
+ }
+
+ /* First check if firmware has any address configured by bootloader */
+ if (!is_zero_ether_addr(mac_addr)) {
+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ } else {
+ /* No MAC address configured, fill in net_dev->dev_addr
+ * with a random one
+ */
+ eth_hw_addr_random(net_dev);
+ dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
+
+ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
+ * practical purposes, this will be our "permanent" mac address,
+ * at least until the next reboot. This move will also permit
+ * register_netdevice() to properly fill up net_dev->perm_addr.
+ */
+ net_dev->addr_assign_type = NET_ADDR_PERM;
+ }
+
+ return 0;
+}
+
+static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw,
+ const struct dpaa2_fd *fd)
+{
+ struct device *dev = ethsw->dev;
+ unsigned char *buffer_start;
+ struct sk_buff **skbh, *skb;
+ dma_addr_t fd_addr;
+
+ fd_addr = dpaa2_fd_get_addr(fd);
+ skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr);
+
+ skb = *skbh;
+ buffer_start = (unsigned char *)skbh;
+
+ dma_unmap_single(dev, fd_addr,
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_TO_DEVICE);
+
+ /* Move on with skb release */
+ dev_kfree_skb(skb);
+}
+
+static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd)
+{
+ struct device *dev = ethsw->dev;
+ struct sk_buff **skbh;
+ dma_addr_t addr;
+ u8 *buff_start;
+ void *hwa;
+
+ buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET -
+ DPAA2_SWITCH_TX_BUF_ALIGN,
+ DPAA2_SWITCH_TX_BUF_ALIGN);
+
+ /* Clear FAS to have consistent values for TX confirmation. It is
+ * located in the first 8 bytes of the buffer's hardware annotation
+ * area
+ */
+ hwa = buff_start + DPAA2_SWITCH_SWA_SIZE;
+ memset(hwa, 0, 8);
+
+ /* Store a backpointer to the skb at the beginning of the buffer
+ * (in the private data area) such that we can release it
+ * on Tx confirm
+ */
+ skbh = (struct sk_buff **)buff_start;
+ *skbh = skb;
+
+ addr = dma_map_single(dev, buff_start,
+ skb_tail_pointer(skb) - buff_start,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ return -ENOMEM;
+
+ /* Setup the FD fields */
+ memset(fd, 0, sizeof(*fd));
+
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start));
+ dpaa2_fd_set_len(fd, skb->len);
+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
+
+ return 0;
+}
+
+static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES;
+ struct dpaa2_fd fd;
+ int err;
+
+ if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) {
+ struct sk_buff *ns;
+
+ ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM);
+ if (unlikely(!ns)) {
+ net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name);
+ goto err_free_skb;
+ }
+ dev_consume_skb_any(skb);
+ skb = ns;
+ }
+
+ /* We'll be holding a back-reference to the skb until Tx confirmation */
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ /* skb_unshare() has already freed the skb */
+ net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name);
+ goto err_exit;
+ }
+
+ /* At this stage, we do not support non-linear skbs so just try to
+ * linearize the skb and if that's not working, just drop the packet.
+ */
+ err = skb_linearize(skb);
+ if (err) {
+ net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err);
+ goto err_free_skb;
+ }
+
+ err = dpaa2_switch_build_single_fd(ethsw, skb, &fd);
+ if (unlikely(err)) {
+ net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err);
+ goto err_free_skb;
+ }
+
+ do {
+ err = dpaa2_io_service_enqueue_qd(NULL,
+ port_priv->tx_qdid,
+ 8, 0, &fd);
+ retries--;
+ } while (err == -EBUSY && retries);
+
+ if (unlikely(err < 0)) {
+ dpaa2_switch_free_fd(ethsw, &fd);
+ goto err_exit;
+ }
+
+ return NETDEV_TX_OK;
+
+err_free_skb:
+ dev_kfree_skb(skb);
+err_exit:
+ return NETDEV_TX_OK;
+}
+
+static int
+dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct flow_cls_offload *f)
+{
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return dpaa2_switch_cls_flower_replace(acl_tbl, f);
+ case FLOW_CLS_DESTROY:
+ return dpaa2_switch_cls_flower_destroy(acl_tbl, f);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct tc_cls_matchall_offload *f)
+{
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return dpaa2_switch_cls_matchall_replace(acl_tbl, f);
+ case TC_CLSMATCHALL_DESTROY:
+ return dpaa2_switch_cls_matchall_destroy(acl_tbl, f);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
+{
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(dpaa2_switch_block_cb_list);
+
+static int dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_acl_tbl *acl_tbl)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_acl_if_cfg acl_if_cfg;
+ int err;
+
+ if (port_priv->acl_tbl)
+ return -EINVAL;
+
+ acl_if_cfg.if_id[0] = port_priv->idx;
+ acl_if_cfg.num_ifs = 1;
+ err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ acl_tbl->id, &acl_if_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
+ return err;
+ }
+
+ acl_tbl->ports |= BIT(port_priv->idx);
+ port_priv->acl_tbl = acl_tbl;
+
+ return 0;
+}
+
+static int
+dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_acl_tbl *acl_tbl)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_acl_if_cfg acl_if_cfg;
+ int err;
+
+ if (port_priv->acl_tbl != acl_tbl)
+ return -EINVAL;
+
+ acl_if_cfg.if_id[0] = port_priv->idx;
+ acl_if_cfg.num_ifs = 1;
+ err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ acl_tbl->id, &acl_if_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
+ return err;
+ }
+
+ acl_tbl->ports &= ~BIT(port_priv->idx);
+ port_priv->acl_tbl = NULL;
+ return 0;
+}
+
+static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_acl_tbl *acl_tbl)
+{
+ struct dpaa2_switch_acl_tbl *old_acl_tbl = port_priv->acl_tbl;
+ int err;
+
+ /* If the port is already bound to this ACL table then do nothing. This
+ * can happen when this port is the first one to join a tc block
+ */
+ if (port_priv->acl_tbl == acl_tbl)
+ return 0;
+
+ err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_acl_tbl);
+ if (err)
+ return err;
+
+ /* Mark the previous ACL table as being unused if this was the last
+ * port that was using it.
+ */
+ if (old_acl_tbl->ports == 0)
+ old_acl_tbl->in_use = false;
+
+ return dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl);
+}
+
+static int dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv,
+ struct dpaa2_switch_acl_tbl *acl_tbl)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_acl_tbl *new_acl_tbl;
+ int err;
+
+ /* We are the last port that leaves a block (an ACL table).
+ * We'll continue to use this table.
+ */
+ if (acl_tbl->ports == BIT(port_priv->idx))
+ return 0;
+
+ err = dpaa2_switch_port_acl_tbl_unbind(port_priv, acl_tbl);
+ if (err)
+ return err;
+
+ if (acl_tbl->ports == 0)
+ acl_tbl->in_use = false;
+
+ new_acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw);
+ new_acl_tbl->in_use = true;
+ return dpaa2_switch_port_acl_tbl_bind(port_priv, new_acl_tbl);
+}
+
+static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
+ struct flow_block_offload *f)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_acl_tbl *acl_tbl;
+ struct flow_block_cb *block_cb;
+ bool register_block = false;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block,
+ dpaa2_switch_port_setup_tc_block_cb_ig,
+ ethsw);
+
+ if (!block_cb) {
+ /* If the ACL table is not already known, then this port must
+ * be the first to join it. In this case, we can just continue
+ * to use our private table
+ */
+ acl_tbl = port_priv->acl_tbl;
+
+ block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig,
+ ethsw, acl_tbl, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ register_block = true;
+ } else {
+ acl_tbl = flow_block_cb_priv(block_cb);
+ }
+
+ flow_block_cb_incref(block_cb);
+ err = dpaa2_switch_port_block_bind(port_priv, acl_tbl);
+ if (err)
+ goto err_block_bind;
+
+ if (register_block) {
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list,
+ &dpaa2_switch_block_cb_list);
+ }
+
+ return 0;
+
+err_block_bind:
+ if (!flow_block_cb_decref(block_cb))
+ flow_block_cb_free(block_cb);
+ return err;
+}
+
+static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev,
+ struct flow_block_offload *f)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_acl_tbl *acl_tbl;
+ struct flow_block_cb *block_cb;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block,
+ dpaa2_switch_port_setup_tc_block_cb_ig,
+ ethsw);
+ if (!block_cb)
+ return;
+
+ acl_tbl = flow_block_cb_priv(block_cb);
+ err = dpaa2_switch_port_block_unbind(port_priv, acl_tbl);
+ if (!err && !flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+}
+
+static int dpaa2_switch_setup_tc_block(struct net_device *netdev,
+ struct flow_block_offload *f)
+{
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ f->driver_block_list = &dpaa2_switch_block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ return dpaa2_switch_setup_tc_block_bind(netdev, f);
+ case FLOW_BLOCK_UNBIND:
+ dpaa2_switch_setup_tc_block_unbind(netdev, f);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dpaa2_switch_port_setup_tc(struct net_device *netdev,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK: {
+ return dpaa2_switch_setup_tc_block(netdev, type_data);
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops dpaa2_switch_port_ops = {
+ .ndo_open = dpaa2_switch_port_open,
+ .ndo_stop = dpaa2_switch_port_stop,
+
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_get_stats64 = dpaa2_switch_port_get_stats,
+ .ndo_change_mtu = dpaa2_switch_port_change_mtu,
+ .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats,
+ .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats,
+ .ndo_fdb_dump = dpaa2_switch_port_fdb_dump,
+ .ndo_vlan_rx_add_vid = dpaa2_switch_port_vlan_add,
+ .ndo_vlan_rx_kill_vid = dpaa2_switch_port_vlan_kill,
+
+ .ndo_start_xmit = dpaa2_switch_port_tx,
+ .ndo_get_port_parent_id = dpaa2_switch_port_parent_id,
+ .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name,
+ .ndo_setup_tc = dpaa2_switch_port_setup_tc,
+};
+
+bool dpaa2_switch_port_dev_check(const struct net_device *netdev)
+{
+ return netdev->netdev_ops == &dpaa2_switch_port_ops;
+}
+
+static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev);
+ dpaa2_switch_port_set_mac_addr(ethsw->ports[i]);
+ }
+}
+
+static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+{
+ struct device *dev = (struct device *)arg;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+
+ /* Mask the events and the if_id reserved bits to be cleared on read */
+ u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
+ int err;
+
+ err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, &status);
+ if (err) {
+ dev_err(dev, "Can't get irq status (err %d)\n", err);
+
+ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
+ if (err)
+ dev_err(dev, "Can't clear irq status (err %d)\n", err);
+ goto out;
+ }
+
+ if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
+ dpaa2_switch_links_state_update(ethsw);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
+ struct fsl_mc_device_irq *irq;
+ int err;
+
+ err = fsl_mc_allocate_irqs(sw_dev);
+ if (err) {
+ dev_err(dev, "MC irqs allocation failed\n");
+ return err;
+ }
+
+ if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
+ err = -EINVAL;
+ goto free_irq;
+ }
+
+ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 0);
+ if (err) {
+ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
+ goto free_irq;
+ }
+
+ irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
+
+ err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
+ NULL,
+ dpaa2_switch_irq0_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(dev), dev);
+ if (err) {
+ dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
+ goto free_irq;
+ }
+
+ err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, mask);
+ if (err) {
+ dev_err(dev, "dpsw_set_irq_mask(): %d\n", err);
+ goto free_devm_irq;
+ }
+
+ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 1);
+ if (err) {
+ dev_err(dev, "dpsw_set_irq_enable(): %d\n", err);
+ goto free_devm_irq;
+ }
+
+ return 0;
+
+free_devm_irq:
+ devm_free_irq(dev, irq->msi_desc->irq, dev);
+free_irq:
+ fsl_mc_free_irqs(sw_dev);
+ return err;
+}
+
+static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ int err;
+
+ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 0);
+ if (err)
+ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
+
+ fsl_mc_free_irqs(sw_dev);
+}
+
+static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ enum dpsw_learning_mode learn_mode;
+ int err;
+
+ if (enable)
+ learn_mode = DPSW_LEARNING_MODE_HW;
+ else
+ learn_mode = DPSW_LEARNING_MODE_DIS;
+
+ err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, learn_mode);
+ if (err)
+ netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err);
+
+ if (!enable)
+ dpaa2_switch_port_fast_age(port_priv);
+
+ return err;
+}
+
+static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
+ u8 state)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ err = dpaa2_switch_port_set_stp_state(port_priv, state);
+ if (err)
+ return err;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ err = dpaa2_switch_port_set_learning(port_priv, false);
+ break;
+ case BR_STATE_LEARNING:
+ case BR_STATE_FORWARDING:
+ err = dpaa2_switch_port_set_learning(port_priv,
+ port_priv->learn_ena);
+ break;
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv,
+ struct switchdev_brport_flags flags)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+
+ if (flags.mask & BR_BCAST_FLOOD)
+ port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD);
+
+ if (flags.mask & BR_FLOOD)
+ port_priv->ucast_flood = !!(flags.val & BR_FLOOD);
+
+ return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
+}
+
+static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD |
+ BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) {
+ bool multicast = !!(flags.val & BR_MCAST_FLOOD);
+ bool unicast = !!(flags.val & BR_FLOOD);
+
+ if (unicast != multicast) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot configure multicast flooding independently of unicast");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_bridge_flags(struct net_device *netdev,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ if (flags.mask & BR_LEARNING) {
+ bool learn_ena = !!(flags.val & BR_LEARNING);
+
+ err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
+ if (err)
+ return err;
+ port_priv->learn_ena = learn_ena;
+ }
+
+ if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) {
+ err = dpaa2_switch_port_flood(port_priv, flags);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_attr_set(struct net_device *netdev,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ err = dpaa2_switch_port_attr_stp_state_set(netdev,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ if (!attr->u.vlan_filtering) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The DPAA2 switch does not support VLAN-unaware operation");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+int dpaa2_switch_port_vlans_add(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpsw_attr *attr = &ethsw->sw_attr;
+ int err = 0;
+
+ /* Make sure that the VLAN is not already configured
+ * on the switch port
+ */
+ if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER)
+ return -EEXIST;
+
+ /* Check if there is space for a new VLAN */
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
+ return err;
+ }
+ if (attr->max_vlans - attr->num_vlans < 1)
+ return -ENOSPC;
+
+ /* Check if there is space for a new VLAN */
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
+ return err;
+ }
+ if (attr->max_vlans - attr->num_vlans < 1)
+ return -ENOSPC;
+
+ if (!port_priv->ethsw_data->vlans[vlan->vid]) {
+ /* this is a new VLAN */
+ err = dpaa2_switch_add_vlan(port_priv, vlan->vid);
+ if (err)
+ return err;
+
+ port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL;
+ }
+
+ return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags);
+}
+
+static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc,
+ const unsigned char *addr)
+{
+ struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
+ struct netdev_hw_addr *ha;
+
+ netif_addr_lock_bh(netdev);
+ list_for_each_entry(ha, &list->list, list) {
+ if (ether_addr_equal(ha->addr, addr)) {
+ netif_addr_unlock_bh(netdev);
+ return 1;
+ }
+ }
+ netif_addr_unlock_bh(netdev);
+ return 0;
+}
+
+static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ /* Check if address is already set on this port */
+ if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
+ return -EEXIST;
+
+ err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr);
+ if (err)
+ return err;
+
+ err = dev_mc_add(netdev, mdb->addr);
+ if (err) {
+ netdev_err(netdev, "dev_mc_add err %d\n", err);
+ dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_obj_add(struct net_device *netdev,
+ const struct switchdev_obj *obj)
+{
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = dpaa2_switch_port_vlans_add(netdev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = dpaa2_switch_port_mdb_add(netdev,
+ SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_vlan_if_cfg vcfg;
+ int i, err;
+
+ if (!port_priv->vlans[vid])
+ return -ENOENT;
+
+ if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
+ /* If we are deleting the PVID of a port, use VLAN 4095 instead
+ * as we are sure that neither the bridge nor the 8021q module
+ * will use it
+ */
+ err = dpaa2_switch_port_set_pvid(port_priv, 4095);
+ if (err)
+ return err;
+ }
+
+ vcfg.num_ifs = 1;
+ vcfg.if_id[0] = port_priv->idx;
+ if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
+ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ vid, &vcfg);
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_vlan_remove_if_untagged err %d\n",
+ err);
+ }
+ port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
+ }
+
+ if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
+ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ vid, &vcfg);
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_vlan_remove_if err %d\n", err);
+ return err;
+ }
+ port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
+
+ /* Delete VLAN from switch if it is no longer configured on
+ * any port
+ */
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+ if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
+ return 0; /* Found a port member in VID */
+
+ ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
+
+ err = dpaa2_switch_dellink(ethsw, vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int dpaa2_switch_port_vlans_del(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+
+ if (netif_is_bridge_master(vlan->obj.orig_dev))
+ return -EOPNOTSUPP;
+
+ return dpaa2_switch_port_del_vlan(port_priv, vlan->vid);
+}
+
+static int dpaa2_switch_port_mdb_del(struct net_device *netdev,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
+ return -ENOENT;
+
+ err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
+ if (err)
+ return err;
+
+ err = dev_mc_del(netdev, mdb->addr);
+ if (err) {
+ netdev_err(netdev, "dev_mc_del err %d\n", err);
+ return err;
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_port_obj_del(struct net_device *netdev,
+ const struct switchdev_obj *obj)
+{
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ return err;
+}
+
+static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
+ struct switchdev_notifier_port_attr_info *ptr)
+{
+ int err;
+
+ err = switchdev_handle_port_attr_set(netdev, ptr,
+ dpaa2_switch_port_dev_check,
+ dpaa2_switch_port_attr_set);
+ return notifier_from_errno(err);
+}
+
+static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
+ struct net_device *upper_dev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct ethsw_port_priv *other_port_priv;
+ struct net_device *other_dev;
+ struct list_head *iter;
+ bool learn_ena;
+ int err;
+
+ netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
+ if (!dpaa2_switch_port_dev_check(other_dev))
+ continue;
+
+ other_port_priv = netdev_priv(other_dev);
+ if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
+ netdev_err(netdev,
+ "Interface from a different DPSW is in the bridge already!\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Delete the previously manually installed VLAN 1 */
+ err = dpaa2_switch_port_del_vlan(port_priv, 1);
+ if (err)
+ return err;
+
+ dpaa2_switch_port_set_fdb(port_priv, upper_dev);
+
+ /* Inherit the initial bridge port learning state */
+ learn_ena = br_port_flag_is_set(netdev, BR_LEARNING);
+ err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
+ port_priv->learn_ena = learn_ena;
+
+ /* Setup the egress flood policy (broadcast, unknown unicast) */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
+ if (err)
+ goto err_egress_flood;
+
+ return 0;
+
+err_egress_flood:
+ dpaa2_switch_port_set_fdb(port_priv, NULL);
+ return err;
+}
+
+static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg)
+{
+ __be16 vlan_proto = htons(ETH_P_8021Q);
+
+ if (vdev)
+ vlan_proto = vlan_dev_vlan_proto(vdev);
+
+ return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid);
+}
+
+static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg)
+{
+ __be16 vlan_proto = htons(ETH_P_8021Q);
+
+ if (vdev)
+ vlan_proto = vlan_dev_vlan_proto(vdev);
+
+ return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid);
+}
+
+static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpaa2_switch_fdb *old_fdb = port_priv->fdb;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int err;
+
+ /* First of all, fast age any learn FDB addresses on this switch port */
+ dpaa2_switch_port_fast_age(port_priv);
+
+ /* Clear all RX VLANs installed through vlan_vid_add() either as VLAN
+ * upper devices or otherwise from the FDB table that we are about to
+ * leave
+ */
+ err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev);
+ if (err)
+ netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err);
+
+ dpaa2_switch_port_set_fdb(port_priv, NULL);
+
+ /* Restore all RX VLANs into the new FDB table that we just joined */
+ err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev);
+ if (err)
+ netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err);
+
+ /* Reset the flooding state to denote that this port can send any
+ * packet in standalone mode. With this, we are also ensuring that any
+ * later bridge join will have the flooding flag on.
+ */
+ port_priv->bcast_flood = true;
+ port_priv->ucast_flood = true;
+
+ /* Setup the egress flood policy (broadcast, unknown unicast).
+ * When the port is not under a bridge, only the CTRL interface is part
+ * of the flooding domain besides the actual port
+ */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
+ if (err)
+ return err;
+
+ /* Recreate the egress flood domain of the FDB that we just left */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id);
+ if (err)
+ return err;
+
+ /* No HW learning when not under a bridge */
+ err = dpaa2_switch_port_set_learning(port_priv, false);
+ if (err)
+ return err;
+ port_priv->learn_ena = false;
+
+ /* Add the VLAN 1 as PVID when not under a bridge. We need this since
+ * the dpaa2 switch interfaces are not capable to be VLAN unaware
+ */
+ return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID,
+ BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID);
+}
+
+static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev)
+{
+ struct net_device *upper_dev;
+ struct list_head *iter;
+
+ /* RCU read lock not necessary because we have write-side protection
+ * (rtnl_mutex), however a non-rcu iterator does not exist.
+ */
+ netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter)
+ if (is_vlan_dev(upper_dev))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+ int err = 0;
+
+ if (!dpaa2_switch_port_dev_check(netdev))
+ return NOTIFY_DONE;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!netif_is_bridge_master(upper_dev))
+ break;
+
+ if (!br_vlan_enabled(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot join a bridge while VLAN uppers are present");
+ goto out;
+ }
+
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (netif_is_bridge_master(upper_dev)) {
+ if (info->linking)
+ err = dpaa2_switch_port_bridge_join(netdev, upper_dev);
+ else
+ err = dpaa2_switch_port_bridge_leave(netdev);
+ }
+ break;
+ }
+
+out:
+ return notifier_from_errno(err);
+}
+
+struct ethsw_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+static void dpaa2_switch_event_work(struct work_struct *work)
+{
+ struct ethsw_switchdev_event_work *switchdev_work =
+ container_of(work, struct ethsw_switchdev_event_work, work);
+ struct net_device *dev = switchdev_work->dev;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ int err;
+
+ rtnl_lock();
+ fdb_info = &switchdev_work->fdb_info;
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user || fdb_info->is_local)
+ break;
+ if (is_unicast_ether_addr(fdb_info->addr))
+ err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev),
+ fdb_info->addr);
+ else
+ err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev),
+ fdb_info->addr);
+ if (err)
+ break;
+ fdb_info->offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
+ &fdb_info->info, NULL);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user || fdb_info->is_local)
+ break;
+ if (is_unicast_ether_addr(fdb_info->addr))
+ dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
+ else
+ dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
+ break;
+ }
+
+ rtnl_unlock();
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(dev);
+}
+
+/* Called under rcu_read_lock() */
+static int dpaa2_switch_port_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct ethsw_port_priv *port_priv = netdev_priv(dev);
+ struct ethsw_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET)
+ return dpaa2_switch_port_attr_set_event(dev, ptr);
+
+ if (!dpaa2_switch_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (!switchdev_work)
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work);
+ switchdev_work->dev = dev;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+
+ /* Take a reference on the device to avoid being freed. */
+ dev_hold(dev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(ethsw->workqueue, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static int dpaa2_switch_port_obj_event(unsigned long event,
+ struct net_device *netdev,
+ struct switchdev_notifier_port_obj_info *port_obj_info)
+{
+ int err = -EOPNOTSUPP;
+
+ if (!dpaa2_switch_port_dev_check(netdev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj);
+ break;
+ }
+
+ port_obj_info->handled = true;
+ return notifier_from_errno(err);
+}
+
+static int dpaa2_switch_port_blocking_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ case SWITCHDEV_PORT_OBJ_DEL:
+ return dpaa2_switch_port_obj_event(event, dev, ptr);
+ case SWITCHDEV_PORT_ATTR_SET:
+ return dpaa2_switch_port_attr_set_event(dev, ptr);
+ }
+
+ return NOTIFY_DONE;
+}
+
+/* Build a linear skb based on a single-buffer frame descriptor */
+static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw,
+ const struct dpaa2_fd *fd)
+{
+ u16 fd_offset = dpaa2_fd_get_offset(fd);
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u32 fd_length = dpaa2_fd_get_len(fd);
+ struct device *dev = ethsw->dev;
+ struct sk_buff *skb = NULL;
+ void *fd_vaddr;
+
+ fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr);
+ dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (unlikely(!skb)) {
+ dev_err(dev, "build_skb() failed\n");
+ return NULL;
+ }
+
+ skb_reserve(skb, fd_offset);
+ skb_put(skb, fd_length);
+
+ ethsw->buf_count--;
+
+ return skb;
+}
+
+static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq,
+ const struct dpaa2_fd *fd)
+{
+ dpaa2_switch_free_fd(fq->ethsw, fd);
+}
+
+static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq,
+ const struct dpaa2_fd *fd)
+{
+ struct ethsw_core *ethsw = fq->ethsw;
+ struct ethsw_port_priv *port_priv;
+ struct net_device *netdev;
+ struct vlan_ethhdr *hdr;
+ struct sk_buff *skb;
+ u16 vlan_tci, vid;
+ int if_id, err;
+
+ /* get switch ingress interface ID */
+ if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF;
+
+ if (if_id >= ethsw->sw_attr.num_ifs) {
+ dev_err(ethsw->dev, "Frame received from unknown interface!\n");
+ goto err_free_fd;
+ }
+ port_priv = ethsw->ports[if_id];
+ netdev = port_priv->netdev;
+
+ /* build the SKB based on the FD received */
+ if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) {
+ if (net_ratelimit()) {
+ netdev_err(netdev, "Received invalid frame format\n");
+ goto err_free_fd;
+ }
+ }
+
+ skb = dpaa2_switch_build_linear_skb(ethsw, fd);
+ if (unlikely(!skb))
+ goto err_free_fd;
+
+ skb_reset_mac_header(skb);
+
+ /* Remove the VLAN header if the packet that we just received has a vid
+ * equal to the port PVIDs. Since the dpaa2-switch can operate only in
+ * VLAN-aware mode and no alterations are made on the packet when it's
+ * redirected/mirrored to the control interface, we are sure that there
+ * will always be a VLAN header present.
+ */
+ hdr = vlan_eth_hdr(skb);
+ vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK;
+ if (vid == port_priv->pvid) {
+ err = __skb_vlan_pop(skb, &vlan_tci);
+ if (err) {
+ dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err);
+ goto err_free_fd;
+ }
+ }
+
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ /* Setup the offload_fwd_mark only if the port is under a bridge */
+ skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev);
+
+ netif_receive_skb(skb);
+
+ return;
+
+err_free_fd:
+ dpaa2_switch_free_fd(ethsw, fd);
+}
+
+static void dpaa2_switch_detect_features(struct ethsw_core *ethsw)
+{
+ ethsw->features = 0;
+
+ if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6))
+ ethsw->features |= ETHSW_FEATURE_MAC_ADDR;
+}
+
+static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw)
+{
+ struct dpsw_ctrl_if_attr ctrl_if_attr;
+ struct device *dev = ethsw->dev;
+ int i = 0;
+ int err;
+
+ err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ctrl_if_attr);
+ if (err) {
+ dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err);
+ return err;
+ }
+
+ ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid;
+ ethsw->fq[i].ethsw = ethsw;
+ ethsw->fq[i++].type = DPSW_QUEUE_RX;
+
+ ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid;
+ ethsw->fq[i].ethsw = ethsw;
+ ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF;
+
+ return 0;
+}
+
+/* Free buffers acquired from the buffer pool or which were meant to
+ * be released in the pool
+ */
+static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count)
+{
+ struct device *dev = ethsw->dev;
+ void *vaddr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]);
+ dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ free_pages((unsigned long)vaddr, 0);
+ }
+}
+
+/* Perform a single release command to add buffers
+ * to the specified buffer pool
+ */
+static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid)
+{
+ struct device *dev = ethsw->dev;
+ u64 buf_array[BUFS_PER_CMD];
+ struct page *page;
+ int retries = 0;
+ dma_addr_t addr;
+ int err;
+ int i;
+
+ for (i = 0; i < BUFS_PER_CMD; i++) {
+ /* Allocate one page for each Rx buffer. WRIOP sees
+ * the entire page except for a tailroom reserved for
+ * skb shared info
+ */
+ page = dev_alloc_pages(0);
+ if (!page) {
+ dev_err(dev, "buffer allocation failed\n");
+ goto err_alloc;
+ }
+
+ addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, addr)) {
+ dev_err(dev, "dma_map_single() failed\n");
+ goto err_map;
+ }
+ buf_array[i] = addr;
+ }
+
+release_bufs:
+ /* In case the portal is busy, retry until successful or
+ * max retries hit.
+ */
+ while ((err = dpaa2_io_service_release(NULL, bpid,
+ buf_array, i)) == -EBUSY) {
+ if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES)
+ break;
+
+ cpu_relax();
+ }
+
+ /* If release command failed, clean up and bail out. */
+ if (err) {
+ dpaa2_switch_free_bufs(ethsw, buf_array, i);
+ return 0;
+ }
+
+ return i;
+
+err_map:
+ __free_pages(page, 0);
+err_alloc:
+ /* If we managed to allocate at least some buffers,
+ * release them to hardware
+ */
+ if (i)
+ goto release_bufs;
+
+ return 0;
+}
+
+static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw)
+{
+ int *count = &ethsw->buf_count;
+ int new_count;
+ int err = 0;
+
+ if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) {
+ do {
+ new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
+ if (unlikely(!new_count)) {
+ /* Out of memory; abort for now, we'll
+ * try later on
+ */
+ break;
+ }
+ *count += new_count;
+ } while (*count < DPAA2_ETHSW_NUM_BUFS);
+
+ if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS))
+ err = -ENOMEM;
+ }
+
+ return err;
+}
+
+static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw)
+{
+ int *count, i;
+
+ for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) {
+ count = &ethsw->buf_count;
+ *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
+
+ if (unlikely(*count < BUFS_PER_CMD))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw)
+{
+ u64 buf_array[BUFS_PER_CMD];
+ int ret;
+
+ do {
+ ret = dpaa2_io_service_acquire(NULL, ethsw->bpid,
+ buf_array, BUFS_PER_CMD);
+ if (ret < 0) {
+ dev_err(ethsw->dev,
+ "dpaa2_io_service_acquire() = %d\n", ret);
+ return;
+ }
+ dpaa2_switch_free_bufs(ethsw, buf_array, ret);
+
+ } while (ret);
+}
+
+static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
+{
+ struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 };
+ struct device *dev = ethsw->dev;
+ struct fsl_mc_device *dpbp_dev;
+ struct dpbp_attr dpbp_attrs;
+ int err;
+
+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
+ &dpbp_dev);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "DPBP device allocation failed\n");
+ return err;
+ }
+ ethsw->dpbp_dev = dpbp_dev;
+
+ err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id,
+ &dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_open() failed\n");
+ goto err_open;
+ }
+
+ err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_reset() failed\n");
+ goto err_reset;
+ }
+
+ err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_enable() failed\n");
+ goto err_enable;
+ }
+
+ err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle,
+ &dpbp_attrs);
+ if (err) {
+ dev_err(dev, "dpbp_get_attributes() failed\n");
+ goto err_get_attr;
+ }
+
+ dpsw_ctrl_if_pools_cfg.num_dpbp = 1;
+ dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id;
+ dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE;
+ dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0;
+
+ err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &dpsw_ctrl_if_pools_cfg);
+ if (err) {
+ dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
+ goto err_get_attr;
+ }
+ ethsw->bpid = dpbp_attrs.id;
+
+ return 0;
+
+err_get_attr:
+ dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
+err_enable:
+err_reset:
+ dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle);
+err_open:
+ fsl_mc_object_free(dpbp_dev);
+ return err;
+}
+
+static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw)
+{
+ dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
+ dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
+ fsl_mc_object_free(ethsw->dpbp_dev);
+}
+
+static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
+ ethsw->fq[i].store =
+ dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE,
+ ethsw->dev);
+ if (!ethsw->fq[i].store) {
+ dev_err(ethsw->dev, "dpaa2_io_store_create failed\n");
+ while (--i >= 0)
+ dpaa2_io_store_destroy(ethsw->fq[i].store);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ dpaa2_io_store_destroy(ethsw->fq[i].store);
+}
+
+static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq)
+{
+ int err, retries = 0;
+
+ /* Try to pull from the FQ while the portal is busy and we didn't hit
+ * the maximum number fo retries
+ */
+ do {
+ err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store);
+ cpu_relax();
+ } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
+
+ if (unlikely(err))
+ dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err);
+
+ return err;
+}
+
+/* Consume all frames pull-dequeued into the store */
+static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq)
+{
+ struct ethsw_core *ethsw = fq->ethsw;
+ int cleaned = 0, is_last;
+ struct dpaa2_dq *dq;
+ int retries = 0;
+
+ do {
+ /* Get the next available FD from the store */
+ dq = dpaa2_io_store_next(fq->store, &is_last);
+ if (unlikely(!dq)) {
+ if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) {
+ dev_err_once(ethsw->dev,
+ "No valid dequeue response\n");
+ return -ETIMEDOUT;
+ }
+ continue;
+ }
+
+ if (fq->type == DPSW_QUEUE_RX)
+ dpaa2_switch_rx(fq, dpaa2_dq_fd(dq));
+ else
+ dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq));
+ cleaned++;
+
+ } while (!is_last);
+
+ return cleaned;
+}
+
+/* NAPI poll routine */
+static int dpaa2_switch_poll(struct napi_struct *napi, int budget)
+{
+ int err, cleaned = 0, store_cleaned, work_done;
+ struct dpaa2_switch_fq *fq;
+ int retries = 0;
+
+ fq = container_of(napi, struct dpaa2_switch_fq, napi);
+
+ do {
+ err = dpaa2_switch_pull_fq(fq);
+ if (unlikely(err))
+ break;
+
+ /* Refill pool if appropriate */
+ dpaa2_switch_refill_bp(fq->ethsw);
+
+ store_cleaned = dpaa2_switch_store_consume(fq);
+ cleaned += store_cleaned;
+
+ if (cleaned >= budget) {
+ work_done = budget;
+ goto out;
+ }
+
+ } while (store_cleaned);
+
+ /* We didn't consume the entire budget, so finish napi and re-enable
+ * data availability notifications
+ */
+ napi_complete_done(napi, cleaned);
+ do {
+ err = dpaa2_io_service_rearm(NULL, &fq->nctx);
+ cpu_relax();
+ } while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
+
+ work_done = max(cleaned, 1);
+out:
+
+ return work_done;
+}
+
+static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
+{
+ struct dpaa2_switch_fq *fq;
+
+ fq = container_of(nctx, struct dpaa2_switch_fq, nctx);
+
+ napi_schedule(&fq->napi);
+}
+
+static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw)
+{
+ struct dpsw_ctrl_if_queue_cfg queue_cfg;
+ struct dpaa2_io_notification_ctx *nctx;
+ int err, i, j;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
+ nctx = &ethsw->fq[i].nctx;
+
+ /* Register a new software context for the FQID.
+ * By using NULL as the first parameter, we specify that we do
+ * not care on which cpu are interrupts received for this queue
+ */
+ nctx->is_cdan = 0;
+ nctx->id = ethsw->fq[i].fqid;
+ nctx->desired_cpu = DPAA2_IO_ANY_CPU;
+ nctx->cb = dpaa2_switch_fqdan_cb;
+ err = dpaa2_io_service_register(NULL, nctx, ethsw->dev);
+ if (err) {
+ err = -EPROBE_DEFER;
+ goto err_register;
+ }
+
+ queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST |
+ DPSW_CTRL_IF_QUEUE_OPT_USER_CTX;
+ queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO;
+ queue_cfg.dest_cfg.dest_id = nctx->dpio_id;
+ queue_cfg.dest_cfg.priority = 0;
+ queue_cfg.user_ctx = nctx->qman64;
+
+ err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ ethsw->fq[i].type,
+ &queue_cfg);
+ if (err)
+ goto err_set_queue;
+ }
+
+ return 0;
+
+err_set_queue:
+ dpaa2_io_service_deregister(NULL, nctx, ethsw->dev);
+err_register:
+ for (j = 0; j < i; j++)
+ dpaa2_io_service_deregister(NULL, &ethsw->fq[j].nctx,
+ ethsw->dev);
+
+ return err;
+}
+
+static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ dpaa2_io_service_deregister(NULL, &ethsw->fq[i].nctx,
+ ethsw->dev);
+}
+
+static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw)
+{
+ int err;
+
+ /* setup FQs for Rx and Tx Conf */
+ err = dpaa2_switch_setup_fqs(ethsw);
+ if (err)
+ return err;
+
+ /* setup the buffer pool needed on the Rx path */
+ err = dpaa2_switch_setup_dpbp(ethsw);
+ if (err)
+ return err;
+
+ err = dpaa2_switch_seed_bp(ethsw);
+ if (err)
+ goto err_free_dpbp;
+
+ err = dpaa2_switch_alloc_rings(ethsw);
+ if (err)
+ goto err_drain_dpbp;
+
+ err = dpaa2_switch_setup_dpio(ethsw);
+ if (err)
+ goto err_destroy_rings;
+
+ err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err);
+ goto err_deregister_dpio;
+ }
+
+ return 0;
+
+err_deregister_dpio:
+ dpaa2_switch_free_dpio(ethsw);
+err_destroy_rings:
+ dpaa2_switch_destroy_rings(ethsw);
+err_drain_dpbp:
+ dpaa2_switch_drain_bp(ethsw);
+err_free_dpbp:
+ dpaa2_switch_free_dpbp(ethsw);
+
+ return err;
+}
+
+static int dpaa2_switch_init(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ struct dpsw_vlan_if_cfg vcfg = {0};
+ struct dpsw_tci_cfg tci_cfg = {0};
+ struct dpsw_stp_cfg stp_cfg;
+ int err;
+ u16 i;
+
+ ethsw->dev_id = sw_dev->obj_desc.id;
+
+ err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, &ethsw->dpsw_handle);
+ if (err) {
+ dev_err(dev, "dpsw_open err %d\n", err);
+ return err;
+ }
+
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &ethsw->sw_attr);
+ if (err) {
+ dev_err(dev, "dpsw_get_attributes err %d\n", err);
+ goto err_close;
+ }
+
+ err = dpsw_get_api_version(ethsw->mc_io, 0,
+ &ethsw->major,
+ &ethsw->minor);
+ if (err) {
+ dev_err(dev, "dpsw_get_api_version err %d\n", err);
+ goto err_close;
+ }
+
+ /* Minimum supported DPSW version check */
+ if (ethsw->major < DPSW_MIN_VER_MAJOR ||
+ (ethsw->major == DPSW_MIN_VER_MAJOR &&
+ ethsw->minor < DPSW_MIN_VER_MINOR)) {
+ dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n",
+ ethsw->major, ethsw->minor);
+ err = -EOPNOTSUPP;
+ goto err_close;
+ }
+
+ if (!dpaa2_switch_supports_cpu_traffic(ethsw)) {
+ err = -EOPNOTSUPP;
+ goto err_close;
+ }
+
+ dpaa2_switch_detect_features(ethsw);
+
+ err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err) {
+ dev_err(dev, "dpsw_reset err %d\n", err);
+ goto err_close;
+ }
+
+ stp_cfg.vlan_id = DEFAULT_VLAN_ID;
+ stp_cfg.state = DPSW_STP_STATE_FORWARDING;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i);
+ if (err) {
+ dev_err(dev, "dpsw_if_disable err %d\n", err);
+ goto err_close;
+ }
+
+ err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
+ &stp_cfg);
+ if (err) {
+ dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
+ err, i);
+ goto err_close;
+ }
+
+ /* Switch starts with all ports configured to VLAN 1. Need to
+ * remove this setting to allow configuration at bridge join
+ */
+ vcfg.num_ifs = 1;
+ vcfg.if_id[0] = i;
+ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DEFAULT_VLAN_ID, &vcfg);
+ if (err) {
+ dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n",
+ err);
+ goto err_close;
+ }
+
+ tci_cfg.vlan_id = 4095;
+ err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg);
+ if (err) {
+ dev_err(dev, "dpsw_if_set_tci err %d\n", err);
+ goto err_close;
+ }
+
+ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DEFAULT_VLAN_ID, &vcfg);
+ if (err) {
+ dev_err(dev, "dpsw_vlan_remove_if err %d\n", err);
+ goto err_close;
+ }
+ }
+
+ err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID);
+ if (err) {
+ dev_err(dev, "dpsw_vlan_remove err %d\n", err);
+ goto err_close;
+ }
+
+ ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
+ WQ_MEM_RECLAIM, "ethsw",
+ ethsw->sw_attr.id);
+ if (!ethsw->workqueue) {
+ err = -ENOMEM;
+ goto err_close;
+ }
+
+ err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0);
+ if (err)
+ goto err_destroy_ordered_workqueue;
+
+ err = dpaa2_switch_ctrl_if_setup(ethsw);
+ if (err)
+ goto err_destroy_ordered_workqueue;
+
+ return 0;
+
+err_destroy_ordered_workqueue:
+ destroy_workqueue(ethsw->workqueue);
+
+err_close:
+ dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ return err;
+}
+
+/* Add an ACL to redirect frames with specific destination MAC address to
+ * control interface
+ */
+static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv,
+ const char *mac)
+{
+ struct dpaa2_switch_acl_entry acl_entry = {0};
+
+ /* Match on the destination MAC address */
+ ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac);
+ eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac);
+
+ /* Trap to CPU */
+ acl_entry.cfg.precedence = 0;
+ acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
+
+ return dpaa2_switch_acl_entry_add(port_priv->acl_tbl, &acl_entry);
+}
+
+static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
+{
+ const char stpa[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
+ struct switchdev_obj_port_vlan vlan = {
+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+ .vid = DEFAULT_VLAN_ID,
+ .flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID,
+ };
+ struct net_device *netdev = port_priv->netdev;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpaa2_switch_acl_tbl *acl_tbl;
+ struct dpsw_fdb_cfg fdb_cfg = {0};
+ struct dpsw_if_attr dpsw_if_attr;
+ struct dpaa2_switch_fdb *fdb;
+ struct dpsw_acl_cfg acl_cfg;
+ u16 fdb_id, acl_tbl_id;
+ int err;
+
+ /* Get the Tx queue for this specific port */
+ err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &dpsw_if_attr);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err);
+ return err;
+ }
+ port_priv->tx_qdid = dpsw_if_attr.qdid;
+
+ /* Create a FDB table for this particular switch port */
+ fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs;
+ err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &fdb_id, &fdb_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_fdb_add err %d\n", err);
+ return err;
+ }
+
+ /* Find an unused dpaa2_switch_fdb structure and use it */
+ fdb = dpaa2_switch_fdb_get_unused(ethsw);
+ fdb->fdb_id = fdb_id;
+ fdb->in_use = true;
+ fdb->bridge_dev = NULL;
+ port_priv->fdb = fdb;
+
+ /* We need to add VLAN 1 as the PVID on this port until it is under a
+ * bridge since the DPAA2 switch is not able to handle the traffic in a
+ * VLAN unaware fashion
+ */
+ err = dpaa2_switch_port_vlans_add(netdev, &vlan);
+ if (err)
+ return err;
+
+ /* Setup the egress flooding domains (broadcast, unknown unicast */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
+ if (err)
+ return err;
+
+ /* Create an ACL table to be used by this switch port */
+ acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES;
+ err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ &acl_tbl_id, &acl_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_acl_add err %d\n", err);
+ return err;
+ }
+
+ acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw);
+ acl_tbl->ethsw = ethsw;
+ acl_tbl->id = acl_tbl_id;
+ acl_tbl->in_use = true;
+ acl_tbl->num_rules = 0;
+ INIT_LIST_HEAD(&acl_tbl->entries);
+
+ err = dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl);
+ if (err)
+ return err;
+
+ err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ int err;
+
+ err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err)
+ dev_warn(dev, "dpsw_close err %d\n", err);
+}
+
+static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
+{
+ dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ dpaa2_switch_free_dpio(ethsw);
+ dpaa2_switch_destroy_rings(ethsw);
+ dpaa2_switch_drain_bp(ethsw);
+ dpaa2_switch_free_dpbp(ethsw);
+}
+
+static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
+{
+ struct ethsw_port_priv *port_priv;
+ struct ethsw_core *ethsw;
+ struct device *dev;
+ int i;
+
+ dev = &sw_dev->dev;
+ ethsw = dev_get_drvdata(dev);
+
+ dpaa2_switch_ctrl_if_teardown(ethsw);
+
+ dpaa2_switch_teardown_irqs(sw_dev);
+
+ dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ port_priv = ethsw->ports[i];
+ unregister_netdev(port_priv->netdev);
+ free_netdev(port_priv->netdev);
+ }
+
+ kfree(ethsw->fdbs);
+ kfree(ethsw->acls);
+ kfree(ethsw->ports);
+
+ dpaa2_switch_takedown(sw_dev);
+
+ destroy_workqueue(ethsw->workqueue);
+
+ fsl_mc_portal_free(ethsw->mc_io);
+
+ kfree(ethsw);
+
+ dev_set_drvdata(dev, NULL);
+
+ return 0;
+}
+
+static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
+ u16 port_idx)
+{
+ struct ethsw_port_priv *port_priv;
+ struct device *dev = ethsw->dev;
+ struct net_device *port_netdev;
+ int err;
+
+ port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
+ if (!port_netdev) {
+ dev_err(dev, "alloc_etherdev error\n");
+ return -ENOMEM;
+ }
+
+ port_priv = netdev_priv(port_netdev);
+ port_priv->netdev = port_netdev;
+ port_priv->ethsw_data = ethsw;
+
+ port_priv->idx = port_idx;
+ port_priv->stp_state = BR_STATE_FORWARDING;
+
+ SET_NETDEV_DEV(port_netdev, dev);
+ port_netdev->netdev_ops = &dpaa2_switch_port_ops;
+ port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops;
+
+ port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM;
+
+ port_priv->bcast_flood = true;
+ port_priv->ucast_flood = true;
+
+ /* Set MTU limits */
+ port_netdev->min_mtu = ETH_MIN_MTU;
+ port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
+
+ /* Populate the private port structure so that later calls to
+ * dpaa2_switch_port_init() can use it.
+ */
+ ethsw->ports[port_idx] = port_priv;
+
+ /* The DPAA2 switch's ingress path depends on the VLAN table,
+ * thus we are not able to disable VLAN filtering.
+ */
+ port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_STAG_FILTER |
+ NETIF_F_HW_TC;
+
+ err = dpaa2_switch_port_init(port_priv, port_idx);
+ if (err)
+ goto err_port_probe;
+
+ err = dpaa2_switch_port_set_mac_addr(port_priv);
+ if (err)
+ goto err_port_probe;
+
+ err = dpaa2_switch_port_set_learning(port_priv, false);
+ if (err)
+ goto err_port_probe;
+ port_priv->learn_ena = false;
+
+ return 0;
+
+err_port_probe:
+ free_netdev(port_netdev);
+ ethsw->ports[port_idx] = NULL;
+
+ return err;
+}
+
+static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw;
+ int i, err;
+
+ /* Allocate switch core*/
+ ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
+
+ if (!ethsw)
+ return -ENOMEM;
+
+ ethsw->dev = dev;
+ ethsw->iommu_domain = iommu_get_domain_for_dev(dev);
+ dev_set_drvdata(dev, ethsw);
+
+ err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+ &ethsw->mc_io);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ goto err_free_drvdata;
+ }
+
+ err = dpaa2_switch_init(sw_dev);
+ if (err)
+ goto err_free_cmdport;
+
+ ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
+ GFP_KERNEL);
+ if (!(ethsw->ports)) {
+ err = -ENOMEM;
+ goto err_takedown;
+ }
+
+ ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
+ GFP_KERNEL);
+ if (!ethsw->fdbs) {
+ err = -ENOMEM;
+ goto err_free_ports;
+ }
+
+ ethsw->acls = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->acls),
+ GFP_KERNEL);
+ if (!ethsw->acls) {
+ err = -ENOMEM;
+ goto err_free_fdbs;
+ }
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ err = dpaa2_switch_probe_port(ethsw, i);
+ if (err)
+ goto err_free_netdev;
+ }
+
+ /* Add a NAPI instance for each of the Rx queues. The first port's
+ * net_device will be associated with the instances since we do not have
+ * different queues for each switch ports.
+ */
+ for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
+ netif_napi_add(ethsw->ports[0]->netdev,
+ &ethsw->fq[i].napi, dpaa2_switch_poll,
+ NAPI_POLL_WEIGHT);
+
+ err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
+ goto err_free_netdev;
+ }
+
+ /* Setup IRQs */
+ err = dpaa2_switch_setup_irqs(sw_dev);
+ if (err)
+ goto err_stop;
+
+ /* Register the netdev only when the entire setup is done and the
+ * switch port interfaces are ready to receive traffic
+ */
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ err = register_netdev(ethsw->ports[i]->netdev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev error %d\n", err);
+ goto err_unregister_ports;
+ }
+ }
+
+ return 0;
+
+err_unregister_ports:
+ for (i--; i >= 0; i--)
+ unregister_netdev(ethsw->ports[i]->netdev);
+ dpaa2_switch_teardown_irqs(sw_dev);
+err_stop:
+ dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+err_free_netdev:
+ for (i--; i >= 0; i--)
+ free_netdev(ethsw->ports[i]->netdev);
+ kfree(ethsw->acls);
+err_free_fdbs:
+ kfree(ethsw->fdbs);
+err_free_ports:
+ kfree(ethsw->ports);
+
+err_takedown:
+ dpaa2_switch_takedown(sw_dev);
+
+err_free_cmdport:
+ fsl_mc_portal_free(ethsw->mc_io);
+
+err_free_drvdata:
+ kfree(ethsw);
+ dev_set_drvdata(dev, NULL);
+
+ return err;
+}
+
+static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpsw",
+ },
+ { .vendor = 0x0 }
+};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table);
+
+static struct fsl_mc_driver dpaa2_switch_drv = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_switch_probe,
+ .remove = dpaa2_switch_remove,
+ .match_id_table = dpaa2_switch_match_id_table
+};
+
+static struct notifier_block dpaa2_switch_port_nb __read_mostly = {
+ .notifier_call = dpaa2_switch_port_netdevice_event,
+};
+
+static struct notifier_block dpaa2_switch_port_switchdev_nb = {
+ .notifier_call = dpaa2_switch_port_event,
+};
+
+static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = {
+ .notifier_call = dpaa2_switch_port_blocking_event,
+};
+
+static int dpaa2_switch_register_notifiers(void)
+{
+ int err;
+
+ err = register_netdevice_notifier(&dpaa2_switch_port_nb);
+ if (err) {
+ pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err);
+ return err;
+ }
+
+ err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
+ if (err) {
+ pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err);
+ goto err_switchdev_nb;
+ }
+
+ err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
+ if (err) {
+ pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err);
+ goto err_switchdev_blocking_nb;
+ }
+
+ return 0;
+
+err_switchdev_blocking_nb:
+ unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
+err_switchdev_nb:
+ unregister_netdevice_notifier(&dpaa2_switch_port_nb);
+
+ return err;
+}
+
+static void dpaa2_switch_unregister_notifiers(void)
+{
+ int err;
+
+ err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
+ if (err)
+ pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n",
+ err);
+
+ err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
+ if (err)
+ pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err);
+
+ err = unregister_netdevice_notifier(&dpaa2_switch_port_nb);
+ if (err)
+ pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err);
+}
+
+static int __init dpaa2_switch_driver_init(void)
+{
+ int err;
+
+ err = fsl_mc_driver_register(&dpaa2_switch_drv);
+ if (err)
+ return err;
+
+ err = dpaa2_switch_register_notifiers();
+ if (err) {
+ fsl_mc_driver_unregister(&dpaa2_switch_drv);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit dpaa2_switch_driver_exit(void)
+{
+ dpaa2_switch_unregister_notifiers();
+ fsl_mc_driver_unregister(&dpaa2_switch_drv);
+}
+
+module_init(dpaa2_switch_driver_init);
+module_exit(dpaa2_switch_driver_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
new file mode 100644
index 000000000000..bdef71f234cb
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DPAA2 Ethernet Switch declarations
+ *
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#ifndef __ETHSW_H
+#define __ETHSW_H
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <uapi/linux/if_bridge.h>
+#include <net/switchdev.h>
+#include <linux/if_bridge.h>
+#include <linux/fsl/mc.h>
+#include <net/pkt_cls.h>
+#include <soc/fsl/dpaa2-io.h>
+
+#include "dpsw.h"
+
+/* Number of IRQs supported */
+#define DPSW_IRQ_NUM 2
+
+/* Port is member of VLAN */
+#define ETHSW_VLAN_MEMBER 1
+/* VLAN to be treated as untagged on egress */
+#define ETHSW_VLAN_UNTAGGED 2
+/* Untagged frames will be assigned to this VLAN */
+#define ETHSW_VLAN_PVID 4
+/* VLAN configured on the switch */
+#define ETHSW_VLAN_GLOBAL 8
+
+/* Maximum Frame Length supported by HW (currently 10k) */
+#define DPAA2_MFL (10 * 1024)
+#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
+
+#define ETHSW_FEATURE_MAC_ADDR BIT(0)
+
+/* Number of receive queues (one RX and one TX_CONF) */
+#define DPAA2_SWITCH_RX_NUM_FQS 2
+
+/* Hardware requires alignment for ingress/egress buffer addresses */
+#define DPAA2_SWITCH_RX_BUF_RAW_SIZE PAGE_SIZE
+#define DPAA2_SWITCH_RX_BUF_TAILROOM \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define DPAA2_SWITCH_RX_BUF_SIZE \
+ (DPAA2_SWITCH_RX_BUF_RAW_SIZE - DPAA2_SWITCH_RX_BUF_TAILROOM)
+
+#define DPAA2_SWITCH_STORE_SIZE 16
+
+/* Buffer management */
+#define BUFS_PER_CMD 7
+#define DPAA2_ETHSW_NUM_BUFS (1024 * BUFS_PER_CMD)
+#define DPAA2_ETHSW_REFILL_THRESH (DPAA2_ETHSW_NUM_BUFS * 5 / 6)
+
+/* Number of times to retry DPIO portal operations while waiting
+ * for portal to finish executing current command and become
+ * available. We want to avoid being stuck in a while loop in case
+ * hardware becomes unresponsive, but not give up too easily if
+ * the portal really is busy for valid reasons
+ */
+#define DPAA2_SWITCH_SWP_BUSY_RETRIES 1000
+
+/* Hardware annotation buffer size */
+#define DPAA2_SWITCH_HWA_SIZE 64
+/* Software annotation buffer size */
+#define DPAA2_SWITCH_SWA_SIZE 64
+
+#define DPAA2_SWITCH_TX_BUF_ALIGN 64
+
+#define DPAA2_SWITCH_TX_DATA_OFFSET \
+ (DPAA2_SWITCH_HWA_SIZE + DPAA2_SWITCH_SWA_SIZE)
+
+#define DPAA2_SWITCH_NEEDED_HEADROOM \
+ (DPAA2_SWITCH_TX_DATA_OFFSET + DPAA2_SWITCH_TX_BUF_ALIGN)
+
+#define DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES 16
+#define DPAA2_ETHSW_PORT_DEFAULT_TRAPS 1
+
+#define DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE 256
+
+extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops;
+
+struct ethsw_core;
+
+struct dpaa2_switch_fq {
+ struct ethsw_core *ethsw;
+ enum dpsw_queue_type type;
+ struct dpaa2_io_store *store;
+ struct dpaa2_io_notification_ctx nctx;
+ struct napi_struct napi;
+ u32 fqid;
+};
+
+struct dpaa2_switch_fdb {
+ struct net_device *bridge_dev;
+ u16 fdb_id;
+ bool in_use;
+};
+
+struct dpaa2_switch_acl_entry {
+ struct list_head list;
+ u16 prio;
+ unsigned long cookie;
+
+ struct dpsw_acl_entry_cfg cfg;
+ struct dpsw_acl_key key;
+};
+
+struct dpaa2_switch_acl_tbl {
+ struct list_head entries;
+ struct ethsw_core *ethsw;
+ u64 ports;
+
+ u16 id;
+ u8 num_rules;
+ bool in_use;
+};
+
+static inline bool
+dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_acl_tbl *acl_tbl)
+{
+ if ((acl_tbl->num_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >=
+ DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES)
+ return true;
+ return false;
+}
+
+/* Per port private data */
+struct ethsw_port_priv {
+ struct net_device *netdev;
+ u16 idx;
+ struct ethsw_core *ethsw_data;
+ u8 link_state;
+ u8 stp_state;
+
+ u8 vlans[VLAN_VID_MASK + 1];
+ u16 pvid;
+ u16 tx_qdid;
+
+ struct dpaa2_switch_fdb *fdb;
+ bool bcast_flood;
+ bool ucast_flood;
+ bool learn_ena;
+
+ struct dpaa2_switch_acl_tbl *acl_tbl;
+};
+
+/* Switch data */
+struct ethsw_core {
+ struct device *dev;
+ struct fsl_mc_io *mc_io;
+ u16 dpsw_handle;
+ struct dpsw_attr sw_attr;
+ u16 major, minor;
+ unsigned long features;
+ int dev_id;
+ struct ethsw_port_priv **ports;
+ struct iommu_domain *iommu_domain;
+
+ u8 vlans[VLAN_VID_MASK + 1];
+
+ struct workqueue_struct *workqueue;
+
+ struct dpaa2_switch_fq fq[DPAA2_SWITCH_RX_NUM_FQS];
+ struct fsl_mc_device *dpbp_dev;
+ int buf_count;
+ u16 bpid;
+ int napi_users;
+
+ struct dpaa2_switch_fdb *fdbs;
+ struct dpaa2_switch_acl_tbl *acls;
+};
+
+static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw,
+ struct net_device *netdev)
+{
+ int i;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+ if (ethsw->ports[i]->netdev == netdev)
+ return ethsw->ports[i]->idx;
+
+ return -EINVAL;
+}
+
+static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw)
+{
+ if (ethsw->sw_attr.options & DPSW_OPT_CTRL_IF_DIS) {
+ dev_err(ethsw->dev, "Control Interface is disabled, cannot probe\n");
+ return false;
+ }
+
+ if (ethsw->sw_attr.flooding_cfg != DPSW_FLOODING_PER_FDB) {
+ dev_err(ethsw->dev, "Flooding domain is not per FDB, cannot probe\n");
+ return false;
+ }
+
+ if (ethsw->sw_attr.broadcast_cfg != DPSW_BROADCAST_PER_FDB) {
+ dev_err(ethsw->dev, "Broadcast domain is not per FDB, cannot probe\n");
+ return false;
+ }
+
+ if (ethsw->sw_attr.max_fdbs < ethsw->sw_attr.num_ifs) {
+ dev_err(ethsw->dev, "The number of FDBs is lower than the number of ports, cannot probe\n");
+ return false;
+ }
+
+ return true;
+}
+
+bool dpaa2_switch_port_dev_check(const struct net_device *netdev);
+
+int dpaa2_switch_port_vlans_add(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan);
+
+int dpaa2_switch_port_vlans_del(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan);
+
+typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv,
+ struct fdb_dump_entry *fdb_entry,
+ void *data);
+
+/* TC offload */
+
+int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct flow_cls_offload *cls);
+
+int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct flow_cls_offload *cls);
+
+int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct tc_cls_matchall_offload *cls);
+
+int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct tc_cls_matchall_offload *cls);
+
+int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
+ struct dpaa2_switch_acl_entry *entry);
+#endif /* __ETHSW_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpkg.h b/drivers/net/ethernet/freescale/dpaa2/dpkg.h
index 6de613b13e4d..6f596a5fbeeb 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpkg.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpkg.h
@@ -13,11 +13,12 @@
/** Key Generator properties */
/**
- * Number of masks per key extraction
+ * DPKG_NUM_OF_MASKS - Number of masks per key extraction
*/
#define DPKG_NUM_OF_MASKS 4
+
/**
- * Number of extractions per key profile
+ * DPKG_MAX_NUM_OF_EXTRACTS - Number of extractions per key profile
*/
#define DPKG_MAX_NUM_OF_EXTRACTS 10
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.h b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
index 135f143097a5..8f7ceb731282 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
@@ -83,39 +83,21 @@ int dpmac_get_attributes(struct fsl_mc_io *mc_io,
u16 token,
struct dpmac_attr *attr);
-/**
- * DPMAC link configuration/state options
- */
+/* DPMAC link configuration/state options */
-/**
- * Enable auto-negotiation
- */
#define DPMAC_LINK_OPT_AUTONEG BIT_ULL(0)
-/**
- * Enable half-duplex mode
- */
#define DPMAC_LINK_OPT_HALF_DUPLEX BIT_ULL(1)
-/**
- * Enable pause frames
- */
#define DPMAC_LINK_OPT_PAUSE BIT_ULL(2)
-/**
- * Enable a-symmetric pause frames
- */
#define DPMAC_LINK_OPT_ASYM_PAUSE BIT_ULL(3)
-/**
- * Advertised link speeds
- */
+/* Advertised link speeds */
#define DPMAC_ADVERTISED_10BASET_FULL BIT_ULL(0)
#define DPMAC_ADVERTISED_100BASET_FULL BIT_ULL(1)
#define DPMAC_ADVERTISED_1000BASET_FULL BIT_ULL(2)
#define DPMAC_ADVERTISED_10000BASET_FULL BIT_ULL(4)
#define DPMAC_ADVERTISED_2500BASEX_FULL BIT_ULL(5)
-/**
- * Advertise auto-negotiation enable
- */
+/* Advertise auto-negotiation enable */
#define DPMAC_ADVERTISED_AUTONEG BIT_ULL(3)
/**
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index aa429c17c343..d6afada99fb6 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -17,6 +17,8 @@
* This function has to be called before the following functions:
* - dpni_set_rx_tc_dist()
* - dpni_set_qos_table()
+ *
+ * Return: '0' on Success; Error code otherwise.
*/
int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf)
{
@@ -1793,6 +1795,8 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io,
* If cfg.enable is set to 0 the command will clear flow steering table.
* The packets will be classified according to settings made in
* dpni_set_rx_hash_dist()
+ *
+ * Return: '0' on Success; Error code otherwise.
*/
int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
u32 cmd_flags,
@@ -1826,6 +1830,8 @@ int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
* If cfg.enable is set to 1 the packets will be classified using a hash
* function based on the key received in cfg.key_cfg_iova parameter.
* If cfg.enable is set to 0 the packets will be sent to the default queue
+ *
+ * Return: '0' on Success; Error code otherwise.
*/
int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
u32 cmd_flags,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index 4e96d9362dd2..7de0562bbf59 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -10,73 +10,76 @@
struct fsl_mc_io;
-/**
- * Data Path Network Interface API
+/* Data Path Network Interface API
* Contains initialization APIs and runtime control APIs for DPNI
*/
/** General DPNI macros */
/**
- * Maximum number of traffic classes
+ * DPNI_MAX_TC - Maximum number of traffic classes
*/
#define DPNI_MAX_TC 8
/**
- * Maximum number of buffer pools per DPNI
+ * DPNI_MAX_DPBP - Maximum number of buffer pools per DPNI
*/
#define DPNI_MAX_DPBP 8
/**
- * All traffic classes considered; see dpni_set_queue()
+ * DPNI_ALL_TCS - All traffic classes considered; see dpni_set_queue()
*/
#define DPNI_ALL_TCS (u8)(-1)
/**
- * All flows within traffic class considered; see dpni_set_queue()
+ * DPNI_ALL_TC_FLOWS - All flows within traffic class considered; see
+ * dpni_set_queue()
*/
#define DPNI_ALL_TC_FLOWS (u16)(-1)
/**
- * Generate new flow ID; see dpni_set_queue()
+ * DPNI_NEW_FLOW_ID - Generate new flow ID; see dpni_set_queue()
*/
#define DPNI_NEW_FLOW_ID (u16)(-1)
/**
- * Tx traffic is always released to a buffer pool on transmit, there are no
- * resources allocated to have the frames confirmed back to the source after
- * transmission.
+ * DPNI_OPT_TX_FRM_RELEASE - Tx traffic is always released to a buffer pool on
+ * transmit, there are no resources allocated to have the frames confirmed back
+ * to the source after transmission.
*/
#define DPNI_OPT_TX_FRM_RELEASE 0x000001
/**
- * Disables support for MAC address filtering for addresses other than primary
- * MAC address. This affects both unicast and multicast. Promiscuous mode can
- * still be enabled/disabled for both unicast and multicast. If promiscuous mode
- * is disabled, only traffic matching the primary MAC address will be accepted.
+ * DPNI_OPT_NO_MAC_FILTER - Disables support for MAC address filtering for
+ * addresses other than primary MAC address. This affects both unicast and
+ * multicast. Promiscuous mode can still be enabled/disabled for both unicast
+ * and multicast. If promiscuous mode is disabled, only traffic matching the
+ * primary MAC address will be accepted.
*/
#define DPNI_OPT_NO_MAC_FILTER 0x000002
/**
- * Allocate policers for this DPNI. They can be used to rate-limit traffic per
- * traffic class (TC) basis.
+ * DPNI_OPT_HAS_POLICING - Allocate policers for this DPNI. They can be used to
+ * rate-limit traffic per traffic class (TC) basis.
*/
#define DPNI_OPT_HAS_POLICING 0x000004
/**
- * Congestion can be managed in several ways, allowing the buffer pool to
- * deplete on ingress, taildrop on each queue or use congestion groups for sets
- * of queues. If set, it configures a single congestion groups across all TCs.
- * If reset, a congestion group is allocated for each TC. Only relevant if the
- * DPNI has multiple traffic classes.
+ * DPNI_OPT_SHARED_CONGESTION - Congestion can be managed in several ways,
+ * allowing the buffer pool to deplete on ingress, taildrop on each queue or
+ * use congestion groups for sets of queues. If set, it configures a single
+ * congestion groups across all TCs. If reset, a congestion group is allocated
+ * for each TC. Only relevant if the DPNI has multiple traffic classes.
*/
#define DPNI_OPT_SHARED_CONGESTION 0x000008
/**
- * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all
- * look-ups are exact match. Note that TCAM is not available on LS1088 and its
- * variants. Setting this bit on these SoCs will trigger an error.
+ * DPNI_OPT_HAS_KEY_MASKING - Enables TCAM for Flow Steering and QoS look-ups.
+ * If not specified, all look-ups are exact match. Note that TCAM is not
+ * available on LS1088 and its variants. Setting this bit on these SoCs will
+ * trigger an error.
*/
#define DPNI_OPT_HAS_KEY_MASKING 0x000010
/**
- * Disables the flow steering table.
+ * DPNI_OPT_NO_FS - Disables the flow steering table.
*/
#define DPNI_OPT_NO_FS 0x000020
/**
- * Flow steering table is shared between all traffic classes
+ * DPNI_OPT_SHARED_FS - Flow steering table is shared between all traffic
+ * classes
*/
#define DPNI_OPT_SHARED_FS 0x001000
@@ -129,20 +132,14 @@ int dpni_reset(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
-/**
- * DPNI IRQ Index and Events
- */
+/* DPNI IRQ Index and Events */
-/**
- * IRQ index
- */
#define DPNI_IRQ_INDEX 0
-/**
- * IRQ events:
- * indicates a change in link state
- * indicates a change in endpoint
- */
+
+/* DPNI_IRQ_EVENT_LINK_CHANGED - indicates a change in link state */
#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
+
+/* DPNI_IRQ_EVENT_ENDPOINT_CHANGED - indicates a change in endpoint */
#define DPNI_IRQ_EVENT_ENDPOINT_CHANGED 0x00000002
int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
@@ -222,32 +219,30 @@ int dpni_get_attributes(struct fsl_mc_io *mc_io,
u16 token,
struct dpni_attr *attr);
-/**
- * DPNI errors
- */
+/* DPNI errors */
/**
- * Extract out of frame header error
+ * DPNI_ERROR_EOFHE - Extract out of frame header error
*/
#define DPNI_ERROR_EOFHE 0x00020000
/**
- * Frame length error
+ * DPNI_ERROR_FLE - Frame length error
*/
#define DPNI_ERROR_FLE 0x00002000
/**
- * Frame physical error
+ * DPNI_ERROR_FPE - Frame physical error
*/
#define DPNI_ERROR_FPE 0x00001000
/**
- * Parsing header error
+ * DPNI_ERROR_PHE - Parsing header error
*/
#define DPNI_ERROR_PHE 0x00000020
/**
- * Parser L3 checksum error
+ * DPNI_ERROR_L3CE - Parser L3 checksum error
*/
#define DPNI_ERROR_L3CE 0x00000004
/**
- * Parser L3 checksum error
+ * DPNI_ERROR_L4CE - Parser L3 checksum error
*/
#define DPNI_ERROR_L4CE 0x00000001
@@ -281,36 +276,35 @@ int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
u16 token,
struct dpni_error_cfg *cfg);
-/**
- * DPNI buffer layout modification options
- */
+/* DPNI buffer layout modification options */
/**
- * Select to modify the time-stamp setting
+ * DPNI_BUF_LAYOUT_OPT_TIMESTAMP - Select to modify the time-stamp setting
*/
#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
/**
- * Select to modify the parser-result setting; not applicable for Tx
+ * DPNI_BUF_LAYOUT_OPT_PARSER_RESULT - Select to modify the parser-result
+ * setting; not applicable for Tx
*/
#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
/**
- * Select to modify the frame-status setting
+ * DPNI_BUF_LAYOUT_OPT_FRAME_STATUS - Select to modify the frame-status setting
*/
#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
/**
- * Select to modify the private-data-size setting
+ * DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE - Select to modify the private-data-size setting
*/
#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
/**
- * Select to modify the data-alignment setting
+ * DPNI_BUF_LAYOUT_OPT_DATA_ALIGN - Select to modify the data-alignment setting
*/
#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
/**
- * Select to modify the data-head-room setting
+ * DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM - Select to modify the data-head-room setting
*/
#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
/**
- * Select to modify the data-tail-room setting
+ * DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM - Select to modify the data-tail-room setting
*/
#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
@@ -343,7 +337,8 @@ struct dpni_buffer_layout {
* @DPNI_QUEUE_TX: Tx queue
* @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
* @DPNI_QUEUE_RX_ERR: Rx error queue
- */enum dpni_queue_type {
+ */
+enum dpni_queue_type {
DPNI_QUEUE_RX,
DPNI_QUEUE_TX,
DPNI_QUEUE_TX_CONFIRM,
@@ -424,7 +419,7 @@ int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
* lack of buffers
* @page_2.egress_discarded_frames: Egress discarded frame count
* @page_2.egress_confirmed_frames: Egress confirmed frame count
- * @page3: Page_3 statistics structure
+ * @page_3: Page_3 statistics structure
* @page_3.egress_dequeue_bytes: Cumulative count of the number of bytes
* dequeued from egress FQs
* @page_3.egress_dequeue_frames: Cumulative count of the number of frames
@@ -501,30 +496,14 @@ int dpni_get_statistics(struct fsl_mc_io *mc_io,
u8 page,
union dpni_statistics *stat);
-/**
- * Enable auto-negotiation
- */
#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
-/**
- * Enable half-duplex mode
- */
#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
-/**
- * Enable pause frames
- */
#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
-/**
- * Enable a-symmetric pause frames
- */
#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
-
-/**
- * Enable priority flow control pause frames
- */
#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
/**
- * struct - Structure representing DPNI link configuration
+ * struct dpni_link_cfg - Structure representing DPNI link configuration
* @rate: Rate
* @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
*/
@@ -687,8 +666,8 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
const struct dpni_rx_tc_dist_cfg *cfg);
/**
- * When used for fs_miss_flow_id in function dpni_set_rx_dist,
- * will signal to dpni to drop all unclassified frames
+ * DPNI_FS_MISS_DROP - When used for fs_miss_flow_id in function
+ * dpni_set_rx_dist, will signal to dpni to drop all unclassified frames
*/
#define DPNI_FS_MISS_DROP ((uint16_t)-1)
@@ -766,7 +745,7 @@ enum dpni_dest {
/**
* struct dpni_queue - Queue structure
- * @destination - Destination structure
+ * @destination: - Destination structure
* @destination.id: ID of the destination, only relevant if DEST_TYPE is > 0.
* Identifies either a DPIO or a DPCON object.
* Not relevant for Tx queues.
@@ -837,9 +816,7 @@ struct dpni_queue_id {
u16 qdbin;
};
-/**
- * Set User Context
- */
+/* Set User Context */
#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
#define DPNI_QUEUE_OPT_DEST 0x00000002
#define DPNI_QUEUE_OPT_FLC 0x00000004
@@ -904,9 +881,9 @@ struct dpni_dest_cfg {
/* DPNI congestion options */
/**
- * This congestion will trigger flow control or priority flow control.
- * This will have effect only if flow control is enabled with
- * dpni_set_link_cfg().
+ * DPNI_CONG_OPT_FLOW_CONTROL - This congestion will trigger flow control or
+ * priority flow control. This will have effect only if flow control is
+ * enabled with dpni_set_link_cfg().
*/
#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
@@ -990,23 +967,24 @@ struct dpni_rule_cfg {
};
/**
- * Discard matching traffic. If set, this takes precedence over any other
- * configuration and matching traffic is always discarded.
+ * DPNI_FS_OPT_DISCARD - Discard matching traffic. If set, this takes
+ * precedence over any other configuration and matching traffic is always
+ * discarded.
*/
#define DPNI_FS_OPT_DISCARD 0x1
/**
- * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to
- * override the FLC value set per queue.
+ * DPNI_FS_OPT_SET_FLC - Set FLC value. If set, flc member of struct
+ * dpni_fs_action_cfg is used to override the FLC value set per queue.
* For more details check the Frame Descriptor section in the hardware
* documentation.
*/
#define DPNI_FS_OPT_SET_FLC 0x2
/**
- * Indicates whether the 6 lowest significant bits of FLC are used for stash
- * control. If set, the 6 least significant bits in value are interpreted as
- * follows:
+ * DPNI_FS_OPT_SET_STASH_CONTROL - Indicates whether the 6 lowest significant
+ * bits of FLC are used for stash control. If set, the 6 least significant bits
+ * in value are interpreted as follows:
* - bits 0-1: indicates the number of 64 byte units of context that are
* stashed. FLC value is interpreted as a memory address in this case,
* excluding the 6 LS bits.
@@ -1068,7 +1046,7 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io,
u16 *major_ver,
u16 *minor_ver);
/**
- * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
+ * struct dpni_tx_shaping_cfg - Structure representing DPNI tx shaping configuration
* @rate_limit: Rate in Mbps
* @max_burst_size: Burst size in bytes (up to 64KB)
*/
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
index 05c413719e55..01d77c685a5b 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dprtc.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
@@ -13,9 +13,6 @@
struct fsl_mc_io;
-/**
- * Number of irq's
- */
#define DPRTC_MAX_IRQ_NUM 1
#define DPRTC_IRQ_INDEX 0
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h
new file mode 100644
index 000000000000..cb13e740f72b
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h
@@ -0,0 +1,537 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#ifndef __FSL_DPSW_CMD_H
+#define __FSL_DPSW_CMD_H
+
+#include "dpsw.h"
+
+/* DPSW Version */
+#define DPSW_VER_MAJOR 8
+#define DPSW_VER_MINOR 9
+
+#define DPSW_CMD_BASE_VERSION 1
+#define DPSW_CMD_VERSION_2 2
+#define DPSW_CMD_ID_OFFSET 4
+
+#define DPSW_CMD_ID(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_BASE_VERSION)
+#define DPSW_CMD_V2(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_VERSION_2)
+
+/* Command IDs */
+#define DPSW_CMDID_CLOSE DPSW_CMD_ID(0x800)
+#define DPSW_CMDID_OPEN DPSW_CMD_ID(0x802)
+
+#define DPSW_CMDID_GET_API_VERSION DPSW_CMD_ID(0xa02)
+
+#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002)
+#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003)
+#define DPSW_CMDID_GET_ATTR DPSW_CMD_V2(0x004)
+#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005)
+
+#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012)
+
+#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014)
+
+#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
+#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
+
+#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
+#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
+
+#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_V2(0x034)
+
+#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
+#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
+
+#define DPSW_CMDID_IF_GET_ATTR DPSW_CMD_ID(0x042)
+
+#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044)
+
+#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046)
+
+#define DPSW_CMDID_IF_GET_TCI DPSW_CMD_ID(0x04A)
+
+#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
+
+#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
+#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_V2(0x061)
+#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
+
+#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064)
+#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065)
+#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066)
+#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067)
+
+#define DPSW_CMDID_FDB_ADD DPSW_CMD_ID(0x082)
+#define DPSW_CMDID_FDB_REMOVE DPSW_CMD_ID(0x083)
+#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084)
+#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085)
+#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
+#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
+#define DPSW_CMDID_FDB_DUMP DPSW_CMD_ID(0x08A)
+
+#define DPSW_CMDID_ACL_ADD DPSW_CMD_ID(0x090)
+#define DPSW_CMDID_ACL_REMOVE DPSW_CMD_ID(0x091)
+#define DPSW_CMDID_ACL_ADD_ENTRY DPSW_CMD_ID(0x092)
+#define DPSW_CMDID_ACL_REMOVE_ENTRY DPSW_CMD_ID(0x093)
+#define DPSW_CMDID_ACL_ADD_IF DPSW_CMD_ID(0x094)
+#define DPSW_CMDID_ACL_REMOVE_IF DPSW_CMD_ID(0x095)
+
+#define DPSW_CMDID_IF_GET_PORT_MAC_ADDR DPSW_CMD_ID(0x0A7)
+
+#define DPSW_CMDID_CTRL_IF_GET_ATTR DPSW_CMD_ID(0x0A0)
+#define DPSW_CMDID_CTRL_IF_SET_POOLS DPSW_CMD_ID(0x0A1)
+#define DPSW_CMDID_CTRL_IF_ENABLE DPSW_CMD_ID(0x0A2)
+#define DPSW_CMDID_CTRL_IF_DISABLE DPSW_CMD_ID(0x0A3)
+#define DPSW_CMDID_CTRL_IF_SET_QUEUE DPSW_CMD_ID(0x0A6)
+
+#define DPSW_CMDID_SET_EGRESS_FLOOD DPSW_CMD_ID(0x0AC)
+#define DPSW_CMDID_IF_SET_LEARNING_MODE DPSW_CMD_ID(0x0AD)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPSW_MASK(field) \
+ GENMASK(DPSW_##field##_SHIFT + DPSW_##field##_SIZE - 1, \
+ DPSW_##field##_SHIFT)
+#define dpsw_set_field(var, field, val) \
+ ((var) |= (((val) << DPSW_##field##_SHIFT) & DPSW_MASK(field)))
+#define dpsw_get_field(var, field) \
+ (((var) & DPSW_MASK(field)) >> DPSW_##field##_SHIFT)
+#define dpsw_get_bit(var, bit) \
+ (((var) >> (bit)) & GENMASK(0, 0))
+
+#pragma pack(push, 1)
+struct dpsw_cmd_open {
+ __le32 dpsw_id;
+};
+
+#define DPSW_COMPONENT_TYPE_SHIFT 0
+#define DPSW_COMPONENT_TYPE_SIZE 4
+
+struct dpsw_cmd_create {
+ /* cmd word 0 */
+ __le16 num_ifs;
+ u8 max_fdbs;
+ u8 max_meters_per_if;
+ /* from LSB: only the first 4 bits */
+ u8 component_type;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le16 max_vlans;
+ __le16 max_fdb_entries;
+ __le16 fdb_aging_time;
+ __le16 max_fdb_mc_groups;
+ /* cmd word 2 */
+ __le64 options;
+};
+
+struct dpsw_cmd_destroy {
+ __le32 dpsw_id;
+};
+
+#define DPSW_ENABLE_SHIFT 0
+#define DPSW_ENABLE_SIZE 1
+
+struct dpsw_rsp_is_enabled {
+ /* from LSB: enable:1 */
+ u8 enabled;
+};
+
+struct dpsw_cmd_set_irq_enable {
+ u8 enable_state;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dpsw_cmd_get_irq_enable {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpsw_rsp_get_irq_enable {
+ u8 enable_state;
+};
+
+struct dpsw_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dpsw_cmd_get_irq_mask {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpsw_rsp_get_irq_mask {
+ __le32 mask;
+};
+
+struct dpsw_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dpsw_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dpsw_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+#define DPSW_COMPONENT_TYPE_SHIFT 0
+#define DPSW_COMPONENT_TYPE_SIZE 4
+
+#define DPSW_FLOODING_CFG_SHIFT 0
+#define DPSW_FLOODING_CFG_SIZE 4
+
+#define DPSW_BROADCAST_CFG_SHIFT 4
+#define DPSW_BROADCAST_CFG_SIZE 4
+
+struct dpsw_rsp_get_attr {
+ /* cmd word 0 */
+ __le16 num_ifs;
+ u8 max_fdbs;
+ u8 num_fdbs;
+ __le16 max_vlans;
+ __le16 num_vlans;
+ /* cmd word 1 */
+ __le16 max_fdb_entries;
+ __le16 fdb_aging_time;
+ __le32 dpsw_id;
+ /* cmd word 2 */
+ __le16 mem_size;
+ __le16 max_fdb_mc_groups;
+ u8 max_meters_per_if;
+ /* from LSB only the first 4 bits */
+ u8 component_type;
+ /* [0:3] - flooding configuration
+ * [4:7] - broadcast configuration
+ */
+ u8 repl_cfg;
+ u8 pad;
+ /* cmd word 3 */
+ __le64 options;
+};
+
+#define DPSW_VLAN_ID_SHIFT 0
+#define DPSW_VLAN_ID_SIZE 12
+#define DPSW_DEI_SHIFT 12
+#define DPSW_DEI_SIZE 1
+#define DPSW_PCP_SHIFT 13
+#define DPSW_PCP_SIZE 3
+
+struct dpsw_cmd_if_set_tci {
+ __le16 if_id;
+ /* from LSB: VLAN_ID:12 DEI:1 PCP:3 */
+ __le16 conf;
+};
+
+struct dpsw_cmd_if_get_tci {
+ __le16 if_id;
+};
+
+struct dpsw_rsp_if_get_tci {
+ __le16 pad;
+ __le16 vlan_id;
+ u8 dei;
+ u8 pcp;
+};
+
+#define DPSW_STATE_SHIFT 0
+#define DPSW_STATE_SIZE 4
+
+struct dpsw_cmd_if_set_stp {
+ __le16 if_id;
+ __le16 vlan_id;
+ /* only the first LSB 4 bits */
+ u8 state;
+};
+
+#define DPSW_COUNTER_TYPE_SHIFT 0
+#define DPSW_COUNTER_TYPE_SIZE 5
+
+struct dpsw_cmd_if_get_counter {
+ __le16 if_id;
+ /* from LSB: type:5 */
+ u8 type;
+};
+
+struct dpsw_rsp_if_get_counter {
+ __le64 pad;
+ __le64 counter;
+};
+
+struct dpsw_cmd_if {
+ __le16 if_id;
+};
+
+#define DPSW_ADMIT_UNTAGGED_SHIFT 0
+#define DPSW_ADMIT_UNTAGGED_SIZE 4
+#define DPSW_ENABLED_SHIFT 5
+#define DPSW_ENABLED_SIZE 1
+#define DPSW_ACCEPT_ALL_VLAN_SHIFT 6
+#define DPSW_ACCEPT_ALL_VLAN_SIZE 1
+
+struct dpsw_rsp_if_get_attr {
+ /* cmd word 0 */
+ /* from LSB: admit_untagged:4 enabled:1 accept_all_vlan:1 */
+ u8 conf;
+ u8 pad1;
+ u8 num_tcs;
+ u8 pad2;
+ __le16 qdid;
+ /* cmd word 1 */
+ __le32 options;
+ __le32 pad3;
+ /* cmd word 2 */
+ __le32 rate;
+};
+
+struct dpsw_cmd_if_set_max_frame_length {
+ __le16 if_id;
+ __le16 frame_length;
+};
+
+struct dpsw_cmd_if_set_link_cfg {
+ /* cmd word 0 */
+ __le16 if_id;
+ u8 pad[6];
+ /* cmd word 1 */
+ __le32 rate;
+ __le32 pad1;
+ /* cmd word 2 */
+ __le64 options;
+};
+
+struct dpsw_cmd_if_get_link_state {
+ __le16 if_id;
+};
+
+#define DPSW_UP_SHIFT 0
+#define DPSW_UP_SIZE 1
+
+struct dpsw_rsp_if_get_link_state {
+ /* cmd word 0 */
+ __le32 pad0;
+ u8 up;
+ u8 pad1[3];
+ /* cmd word 1 */
+ __le32 rate;
+ __le32 pad2;
+ /* cmd word 2 */
+ __le64 options;
+};
+
+struct dpsw_vlan_add {
+ __le16 fdb_id;
+ __le16 vlan_id;
+};
+
+struct dpsw_cmd_vlan_add_if {
+ /* cmd word 0 */
+ __le16 options;
+ __le16 vlan_id;
+ __le16 fdb_id;
+ __le16 pad0;
+ /* cmd word 1-4 */
+ __le64 if_id;
+};
+
+struct dpsw_cmd_vlan_manage_if {
+ /* cmd word 0 */
+ __le16 pad0;
+ __le16 vlan_id;
+ __le32 pad1;
+ /* cmd word 1-4 */
+ __le64 if_id;
+};
+
+struct dpsw_cmd_vlan_remove {
+ __le16 pad;
+ __le16 vlan_id;
+};
+
+struct dpsw_cmd_fdb_add {
+ __le32 pad;
+ __le16 fdb_ageing_time;
+ __le16 num_fdb_entries;
+};
+
+struct dpsw_rsp_fdb_add {
+ __le16 fdb_id;
+};
+
+struct dpsw_cmd_fdb_remove {
+ __le16 fdb_id;
+};
+
+#define DPSW_ENTRY_TYPE_SHIFT 0
+#define DPSW_ENTRY_TYPE_SIZE 4
+
+struct dpsw_cmd_fdb_unicast_op {
+ /* cmd word 0 */
+ __le16 fdb_id;
+ u8 mac_addr[6];
+ /* cmd word 1 */
+ __le16 if_egress;
+ /* only the first 4 bits from LSB */
+ u8 type;
+};
+
+struct dpsw_cmd_fdb_multicast_op {
+ /* cmd word 0 */
+ __le16 fdb_id;
+ __le16 num_ifs;
+ /* only the first 4 bits from LSB */
+ u8 type;
+ u8 pad[3];
+ /* cmd word 1 */
+ u8 mac_addr[6];
+ __le16 pad2;
+ /* cmd word 2-5 */
+ __le64 if_id;
+};
+
+struct dpsw_cmd_fdb_dump {
+ __le16 fdb_id;
+ __le16 pad0;
+ __le32 pad1;
+ __le64 iova_addr;
+ __le32 iova_size;
+};
+
+struct dpsw_rsp_fdb_dump {
+ __le16 num_entries;
+};
+
+struct dpsw_rsp_ctrl_if_get_attr {
+ __le64 pad;
+ __le32 rx_fqid;
+ __le32 rx_err_fqid;
+ __le32 tx_err_conf_fqid;
+};
+
+#define DPSW_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
+struct dpsw_cmd_ctrl_if_set_pools {
+ u8 num_dpbp;
+ u8 backup_pool_mask;
+ __le16 pad;
+ __le32 dpbp_id[DPSW_MAX_DPBP];
+ __le16 buffer_size[DPSW_MAX_DPBP];
+};
+
+#define DPSW_DEST_TYPE_SHIFT 0
+#define DPSW_DEST_TYPE_SIZE 4
+
+struct dpsw_cmd_ctrl_if_set_queue {
+ __le32 dest_id;
+ u8 dest_priority;
+ u8 pad;
+ /* from LSB: dest_type:4 */
+ u8 dest_type;
+ u8 qtype;
+ __le64 user_ctx;
+ __le32 options;
+};
+
+struct dpsw_rsp_get_api_version {
+ __le16 version_major;
+ __le16 version_minor;
+};
+
+struct dpsw_rsp_if_get_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+struct dpsw_cmd_set_egress_flood {
+ __le16 fdb_id;
+ u8 flood_type;
+ u8 pad[5];
+ __le64 if_id;
+};
+
+#define DPSW_LEARNING_MODE_SHIFT 0
+#define DPSW_LEARNING_MODE_SIZE 4
+
+struct dpsw_cmd_if_set_learning_mode {
+ __le16 if_id;
+ /* only the first 4 bits from LSB */
+ u8 mode;
+};
+
+struct dpsw_cmd_acl_add {
+ __le16 pad;
+ __le16 max_entries;
+};
+
+struct dpsw_rsp_acl_add {
+ __le16 acl_id;
+};
+
+struct dpsw_cmd_acl_remove {
+ __le16 acl_id;
+};
+
+struct dpsw_cmd_acl_if {
+ __le16 acl_id;
+ __le16 num_ifs;
+ __le32 pad;
+ __le64 if_id;
+};
+
+struct dpsw_prep_acl_entry {
+ u8 match_l2_dest_mac[6];
+ __le16 match_l2_tpid;
+
+ u8 match_l2_source_mac[6];
+ __le16 match_l2_vlan_id;
+
+ __le32 match_l3_dest_ip;
+ __le32 match_l3_source_ip;
+
+ __le16 match_l4_dest_port;
+ __le16 match_l4_source_port;
+ __le16 match_l2_ether_type;
+ u8 match_l2_pcp_dei;
+ u8 match_l3_dscp;
+
+ u8 mask_l2_dest_mac[6];
+ __le16 mask_l2_tpid;
+
+ u8 mask_l2_source_mac[6];
+ __le16 mask_l2_vlan_id;
+
+ __le32 mask_l3_dest_ip;
+ __le32 mask_l3_source_ip;
+
+ __le16 mask_l4_dest_port;
+ __le16 mask_l4_source_port;
+ __le16 mask_l2_ether_type;
+ u8 mask_l2_pcp_dei;
+ u8 mask_l3_dscp;
+
+ u8 match_l3_protocol;
+ u8 mask_l3_protocol;
+};
+
+#define DPSW_RESULT_ACTION_SHIFT 0
+#define DPSW_RESULT_ACTION_SIZE 4
+
+struct dpsw_cmd_acl_entry {
+ __le16 acl_id;
+ __le16 result_if_id;
+ __le32 precedence;
+ /* from LSB only the first 4 bits */
+ u8 result_action;
+ u8 pad[7];
+ __le64 pad2[4];
+ __le64 key_iova;
+};
+#pragma pack(pop)
+#endif /* __FSL_DPSW_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.c b/drivers/net/ethernet/freescale/dpaa2/dpsw.c
new file mode 100644
index 000000000000..6352d6d1ecba
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.c
@@ -0,0 +1,1581 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#include <linux/fsl/mc.h>
+#include "dpsw.h"
+#include "dpsw-cmd.h"
+
+static void build_if_id_bitmap(__le64 *bmap, const u16 *id, const u16 num_ifs)
+{
+ int i;
+
+ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++) {
+ if (id[i] < DPSW_MAX_IF)
+ bmap[id[i] / 64] |= cpu_to_le64(BIT_MASK(id[i] % 64));
+ }
+}
+
+/**
+ * dpsw_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpsw_id: DPSW unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpsw_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpsw_id, u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpsw_cmd_open *)cmd.params;
+ cmd_params->dpsw_id = cpu_to_le32(dpsw_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpsw_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_enable() - Enable DPSW functionality
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_disable() - Disable DPSW functionality
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_reset() - Reset the DPSW, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u8 en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_set_irq_enable *)cmd.params;
+ dpsw_set_field(cmd_params->enable_state, ENABLE, en);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @irq_index: The interrupt index to configure
+ * @mask: Event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 mask)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_get_irq_status() - Get the current status of any pending interrupts
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 *status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_get_irq_status *cmd_params;
+ struct dpsw_rsp_get_irq_status *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dpsw_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @irq_index: The interrupt index to configure
+ * @status: bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_clear_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_get_attributes() - Retrieve DPSW attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @attr: Returned DPSW attributes
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpsw_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_rsp_get_attr *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_get_attr *)cmd.params;
+ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
+ attr->max_fdbs = rsp_params->max_fdbs;
+ attr->num_fdbs = rsp_params->num_fdbs;
+ attr->max_vlans = le16_to_cpu(rsp_params->max_vlans);
+ attr->num_vlans = le16_to_cpu(rsp_params->num_vlans);
+ attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
+ attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
+ attr->id = le32_to_cpu(rsp_params->dpsw_id);
+ attr->mem_size = le16_to_cpu(rsp_params->mem_size);
+ attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
+ attr->max_meters_per_if = rsp_params->max_meters_per_if;
+ attr->options = le64_to_cpu(rsp_params->options);
+ attr->component_type = dpsw_get_field(rsp_params->component_type, COMPONENT_TYPE);
+ attr->flooding_cfg = dpsw_get_field(rsp_params->repl_cfg, FLOODING_CFG);
+ attr->broadcast_cfg = dpsw_get_field(rsp_params->repl_cfg, BROADCAST_CFG);
+ return 0;
+}
+
+/**
+ * dpsw_if_set_link_cfg() - Set the link configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface id
+ * @cfg: Link configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ struct dpsw_link_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_link_cfg *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_link_cfg *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->rate = cpu_to_le32(cfg->rate);
+ cmd_params->options = cpu_to_le64(cfg->options);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_get_link_state - Return the link state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface id
+ * @state: Link state 1 - linkup, 0 - link down or disconnected
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, struct dpsw_link_state *state)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_get_link_state *cmd_params;
+ struct dpsw_rsp_if_get_link_state *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_get_link_state *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_if_get_link_state *)cmd.params;
+ state->rate = le32_to_cpu(rsp_params->rate);
+ state->options = le64_to_cpu(rsp_params->options);
+ state->up = dpsw_get_field(rsp_params->up, UP);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: Tag Control Information Configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ const struct dpsw_tci_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_tci *cmd_params;
+ u16 tmp_conf = 0;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ dpsw_set_field(tmp_conf, VLAN_ID, cfg->vlan_id);
+ dpsw_set_field(tmp_conf, DEI, cfg->dei);
+ dpsw_set_field(tmp_conf, PCP, cfg->pcp);
+ cmd_params->conf = cpu_to_le16(tmp_conf);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_get_tci() - Get default VLAN Tag Control Information (TCI)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: Tag Control Information Configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ struct dpsw_tci_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_get_tci *cmd_params;
+ struct dpsw_rsp_if_get_tci *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_TCI,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_get_tci *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_if_get_tci *)cmd.params;
+ cfg->pcp = rsp_params->pcp;
+ cfg->dei = rsp_params->dei;
+ cfg->vlan_id = le16_to_cpu(rsp_params->vlan_id);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: STP State configuration parameters
+ *
+ * The following STP states are supported -
+ * blocking, listening, learning, forwarding and disabled.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_stp(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ const struct dpsw_stp_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_stp *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_stp *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
+ dpsw_set_field(cmd_params->state, STATE, cfg->state);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_get_counter() - Get specific counter of particular interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @type: Counter type
+ * @counter: return value
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, enum dpsw_counter type, u64 *counter)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_get_counter *cmd_params;
+ struct dpsw_rsp_if_get_counter *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_get_counter *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_if_get_counter *)cmd.params;
+ *counter = le64_to_cpu(rsp_params->counter);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_enable() - Enable Interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_disable() - Disable Interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_get_attributes() - Function obtains attributes of interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @attr: Returned interface attributes
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, struct dpsw_if_attr *attr)
+{
+ struct dpsw_rsp_if_get_attr *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_ATTR, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_if_get_attr *)cmd.params;
+ attr->num_tcs = rsp_params->num_tcs;
+ attr->rate = le32_to_cpu(rsp_params->rate);
+ attr->options = le32_to_cpu(rsp_params->options);
+ attr->qdid = le16_to_cpu(rsp_params->qdid);
+ attr->enabled = dpsw_get_field(rsp_params->conf, ENABLED);
+ attr->accept_all_vlan = dpsw_get_field(rsp_params->conf,
+ ACCEPT_ALL_VLAN);
+ attr->admit_untagged = dpsw_get_field(rsp_params->conf,
+ ADMIT_UNTAGGED);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @frame_length: Maximum Frame Length
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, u16 frame_length)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_max_frame_length *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_max_frame_length *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->frame_length = cpu_to_le16(frame_length);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_add() - Adding new VLAN to DPSW.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: VLAN configuration
+ *
+ * Only VLAN ID and FDB ID are required parameters here.
+ * 12 bit VLAN ID is defined in IEEE802.1Q.
+ * Adding a duplicate VLAN ID is not allowed.
+ * FDB ID can be shared across multiple VLANs. Shared learning
+ * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
+ * with same fdb_id
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_vlan_add *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_vlan_add *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: Set of interfaces to add
+ *
+ * It adds only interfaces not belonging to this VLAN yet,
+ * otherwise an error is generated and an entire command is
+ * ignored. This function can be called numerous times always
+ * providing required interfaces delta.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg)
+{
+ struct dpsw_cmd_vlan_add_if *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_add_if *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ cmd_params->options = cpu_to_le16(cfg->options);
+ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
+ * transmitted as untagged.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: Set of interfaces that should be transmitted as untagged
+ *
+ * These interfaces should already belong to this VLAN.
+ * By default all interfaces are transmitted as tagged.
+ * Providing un-existing interface or untagged interface that is
+ * configured untagged already generates an error and the entire
+ * command is ignored.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: Set of interfaces that should be removed
+ *
+ * Interfaces must belong to this VLAN, otherwise an error
+ * is returned and an the command is ignored
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
+ * converted from transmitted as untagged to transmit as tagged.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: Set of interfaces that should be removed
+ *
+ * Interfaces provided by API have to belong to this VLAN and
+ * configured untagged, otherwise an error is returned and the
+ * command is ignored
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_remove() - Remove an entire VLAN
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_remove *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_remove *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_add() - Add FDB to switch and Returns handle to FDB table for
+ * the reference
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Returned Forwarding Database Identifier
+ * @cfg: FDB Configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *fdb_id,
+ const struct dpsw_fdb_cfg *cfg)
+{
+ struct dpsw_cmd_fdb_add *cmd_params;
+ struct dpsw_rsp_fdb_add *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_add *)cmd.params;
+ cmd_params->fdb_ageing_time = cpu_to_le16(cfg->fdb_ageing_time);
+ cmd_params->num_fdb_entries = cpu_to_le16(cfg->num_fdb_entries);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_fdb_add *)cmd.params;
+ *fdb_id = le16_to_cpu(rsp_params->fdb_id);
+
+ return 0;
+}
+
+/**
+ * dpsw_fdb_remove() - Remove FDB from switch
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id)
+{
+ struct dpsw_cmd_fdb_remove *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_remove *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @cfg: Unicast entry configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_unicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_dump() - Dump the content of FDB table into memory.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @iova_addr: Data will be stored here as an array of struct fdb_dump_entry
+ * @iova_size: Memory size allocated at iova_addr
+ * @num_entries:Number of entries written at iova_addr
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ *
+ * The memory allocated at iova_addr must be initialized with zero before
+ * command execution. If the FDB table does not fit into memory MC will stop
+ * after the memory is filled up.
+ * The struct fdb_dump_entry array must be parsed until the end of memory
+ * area or until an entry with mac_addr set to zero is found.
+ */
+int dpsw_fdb_dump(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id,
+ u64 iova_addr, u32 iova_size, u16 *num_entries)
+{
+ struct dpsw_cmd_fdb_dump *cmd_params;
+ struct dpsw_rsp_fdb_dump *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_DUMP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_dump *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->iova_addr = cpu_to_le64(iova_addr);
+ cmd_params->iova_size = cpu_to_le32(iova_size);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_fdb_dump *)cmd.params;
+ *num_entries = le16_to_cpu(rsp_params->num_entries);
+
+ return 0;
+}
+
+/**
+ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @cfg: Unicast entry configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_unicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @cfg: Multicast entry configuration
+ *
+ * If group doesn't exist, it will be created.
+ * It adds only interfaces not belonging to this multicast group
+ * yet, otherwise error will be generated and the command is
+ * ignored.
+ * This function may be called numerous times always providing
+ * required interfaces delta.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_multicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
+ * group.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @cfg: Multicast entry configuration
+ *
+ * Interfaces provided by this API have to exist in the group,
+ * otherwise an error will be returned and an entire command
+ * ignored. If there is no interface left in the group,
+ * an entire group is deleted
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_multicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_ctrl_if_get_attributes() - Obtain control interface attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @attr: Returned control interface attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpsw_ctrl_if_attr *attr)
+{
+ struct dpsw_rsp_ctrl_if_get_attr *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_GET_ATTR,
+ cmd_flags, token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_ctrl_if_get_attr *)cmd.params;
+ attr->rx_fqid = le32_to_cpu(rsp_params->rx_fqid);
+ attr->rx_err_fqid = le32_to_cpu(rsp_params->rx_err_fqid);
+ attr->tx_err_conf_fqid = le32_to_cpu(rsp_params->tx_err_conf_fqid);
+
+ return 0;
+}
+
+/**
+ * dpsw_ctrl_if_set_pools() - Set control interface buffer pools
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @cfg: Buffer pools configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ const struct dpsw_ctrl_if_pools_cfg *cfg)
+{
+ struct dpsw_cmd_ctrl_if_set_pools *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int i;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_POOLS,
+ cmd_flags, token);
+ cmd_params = (struct dpsw_cmd_ctrl_if_set_pools *)cmd.params;
+ cmd_params->num_dpbp = cfg->num_dpbp;
+ for (i = 0; i < DPSW_MAX_DPBP; i++) {
+ cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
+ cmd_params->buffer_size[i] =
+ cpu_to_le16(cfg->pools[i].buffer_size);
+ cmd_params->backup_pool_mask |=
+ DPSW_BACKUP_POOL(cfg->pools[i].backup_pool, i);
+ }
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_ctrl_if_set_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of dpsw object
+ * @qtype: dpsw_queue_type of the targeted queue
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_set_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpsw_queue_type qtype,
+ const struct dpsw_ctrl_if_queue_cfg *cfg)
+{
+ struct dpsw_cmd_ctrl_if_set_queue *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_SET_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_ctrl_if_set_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->qtype = qtype;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpsw_set_field(cmd_params->dest_type,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_get_api_version() - Get Data Path Switch API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path switch API
+ * @minor_ver: Minor version of data path switch API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_rsp_get_api_version *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->version_major);
+ *minor_ver = le16_to_cpu(rsp_params->version_minor);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_get_port_mac_addr() - Retrieve MAC address associated to the physical port
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @mac_addr: MAC address of the physical port, if any, otherwise 0
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_port_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, u8 mac_addr[6])
+{
+ struct dpsw_rsp_if_get_mac_addr *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ struct dpsw_cmd_if *cmd_params;
+ int err, i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_PORT_MAC_ADDR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_if_get_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ mac_addr[5 - i] = rsp_params->mac_addr[i];
+
+ return 0;
+}
+
+/**
+ * dpsw_ctrl_if_enable() - Enable control interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_ENABLE, cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_ctrl_if_disable() - Function disables control interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CTRL_IF_DISABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_set_egress_flood() - Set egress parameters associated with an FDB ID
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @cfg: Egress flooding configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_set_egress_flood(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ const struct dpsw_egress_flood_cfg *cfg)
+{
+ struct dpsw_cmd_set_egress_flood *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_EGRESS_FLOOD, cmd_flags, token);
+ cmd_params = (struct dpsw_cmd_set_egress_flood *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
+ cmd_params->flood_type = cfg->flood_type;
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_set_learning_mode() - Configure the learning mode on an interface.
+ * If this API is used, it will take precedence over the FDB configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: InterfaceID
+ * @mode: Learning mode
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_learning_mode(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, enum dpsw_learning_mode mode)
+{
+ struct dpsw_cmd_if_set_learning_mode *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LEARNING_MODE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_learning_mode *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_add() - Create an ACL table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: Returned ACL ID, for future references
+ * @cfg: ACL configuration
+ *
+ * Create Access Control List table. Multiple ACLs can be created and
+ * co-exist in L2 switch
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *acl_id,
+ const struct dpsw_acl_cfg *cfg)
+{
+ struct dpsw_cmd_acl_add *cmd_params;
+ struct dpsw_rsp_acl_add *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD, cmd_flags, token);
+ cmd_params = (struct dpsw_cmd_acl_add *)cmd.params;
+ cmd_params->max_entries = cpu_to_le16(cfg->max_entries);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_acl_add *)cmd.params;
+ *acl_id = le16_to_cpu(rsp_params->acl_id);
+
+ return 0;
+}
+
+/**
+ * dpsw_acl_remove() - Remove an ACL table from L2 switch.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id)
+{
+ struct dpsw_cmd_acl_remove *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_remove *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_add_if() - Associate interface/interfaces with an ACL table.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ * @cfg: Interfaces list
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_if_cfg *cfg)
+{
+ struct dpsw_cmd_acl_if *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_IF, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_remove_if() - De-associate interface/interfaces from an ACL table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ * @cfg: Interfaces list
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_if_cfg *cfg)
+{
+ struct dpsw_cmd_acl_if *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_IF, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_if *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ build_if_id_bitmap(&cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_prepare_entry_cfg() - Setup an ACL entry
+ * @key: Key
+ * @entry_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
+ *
+ * This function has to be called before adding or removing acl_entry
+ *
+ */
+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
+ u8 *entry_cfg_buf)
+{
+ struct dpsw_prep_acl_entry *ext_params;
+ int i;
+
+ ext_params = (struct dpsw_prep_acl_entry *)entry_cfg_buf;
+
+ for (i = 0; i < 6; i++) {
+ ext_params->match_l2_dest_mac[i] = key->match.l2_dest_mac[5 - i];
+ ext_params->match_l2_source_mac[i] = key->match.l2_source_mac[5 - i];
+ ext_params->mask_l2_dest_mac[i] = key->mask.l2_dest_mac[5 - i];
+ ext_params->mask_l2_source_mac[i] = key->mask.l2_source_mac[5 - i];
+ }
+
+ ext_params->match_l2_tpid = cpu_to_le16(key->match.l2_tpid);
+ ext_params->match_l2_vlan_id = cpu_to_le16(key->match.l2_vlan_id);
+ ext_params->match_l3_dest_ip = cpu_to_le32(key->match.l3_dest_ip);
+ ext_params->match_l3_source_ip = cpu_to_le32(key->match.l3_source_ip);
+ ext_params->match_l4_dest_port = cpu_to_le16(key->match.l4_dest_port);
+ ext_params->match_l4_source_port = cpu_to_le16(key->match.l4_source_port);
+ ext_params->match_l2_ether_type = cpu_to_le16(key->match.l2_ether_type);
+ ext_params->match_l2_pcp_dei = key->match.l2_pcp_dei;
+ ext_params->match_l3_dscp = key->match.l3_dscp;
+
+ ext_params->mask_l2_tpid = cpu_to_le16(key->mask.l2_tpid);
+ ext_params->mask_l2_vlan_id = cpu_to_le16(key->mask.l2_vlan_id);
+ ext_params->mask_l3_dest_ip = cpu_to_le32(key->mask.l3_dest_ip);
+ ext_params->mask_l3_source_ip = cpu_to_le32(key->mask.l3_source_ip);
+ ext_params->mask_l4_dest_port = cpu_to_le16(key->mask.l4_dest_port);
+ ext_params->mask_l4_source_port = cpu_to_le16(key->mask.l4_source_port);
+ ext_params->mask_l2_ether_type = cpu_to_le16(key->mask.l2_ether_type);
+ ext_params->mask_l2_pcp_dei = key->mask.l2_pcp_dei;
+ ext_params->mask_l3_dscp = key->mask.l3_dscp;
+ ext_params->match_l3_protocol = key->match.l3_protocol;
+ ext_params->mask_l3_protocol = key->mask.l3_protocol;
+}
+
+/**
+ * dpsw_acl_add_entry() - Add a rule to the ACL table.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ * @cfg: Entry configuration
+ *
+ * warning: This function has to be called after dpsw_acl_prepare_entry_cfg()
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_entry_cfg *cfg)
+{
+ struct dpsw_cmd_acl_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_ADD_ENTRY, cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
+ cmd_params->precedence = cpu_to_le32(cfg->precedence);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ dpsw_set_field(cmd_params->result_action,
+ RESULT_ACTION,
+ cfg->result.action);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_acl_remove_entry() - Removes an entry from ACL.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @acl_id: ACL ID
+ * @cfg: Entry configuration
+ *
+ * warning: This function has to be called after dpsw_acl_set_entry_cfg()
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_entry_cfg *cfg)
+{
+ struct dpsw_cmd_acl_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ACL_REMOVE_ENTRY,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params;
+ cmd_params->acl_id = cpu_to_le16(acl_id);
+ cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id);
+ cmd_params->precedence = cpu_to_le32(cfg->precedence);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ dpsw_set_field(cmd_params->result_action,
+ RESULT_ACTION,
+ cfg->result.action);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.h b/drivers/net/ethernet/freescale/dpaa2/dpsw.h
new file mode 100644
index 000000000000..5ef221a25b02
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.h
@@ -0,0 +1,755 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2021 NXP
+ *
+ */
+
+#ifndef __FSL_DPSW_H
+#define __FSL_DPSW_H
+
+/* Data Path L2-Switch API
+ * Contains API for handling DPSW topology and functionality
+ */
+
+struct fsl_mc_io;
+
+/* DPSW general definitions */
+
+#define DPSW_MAX_PRIORITIES 8
+
+#define DPSW_MAX_IF 64
+
+int dpsw_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpsw_id, u16 *token);
+
+int dpsw_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/* DPSW options */
+
+/**
+ * DPSW_OPT_FLOODING_DIS - Flooding was disabled at device create
+ */
+#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL
+/**
+ * DPSW_OPT_MULTICAST_DIS - Multicast was disabled at device create
+ */
+#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL
+/**
+ * DPSW_OPT_CTRL_IF_DIS - Control interface support is disabled
+ */
+#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL
+
+/**
+ * enum dpsw_component_type - component type of a bridge
+ * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an
+ * enterprise VLAN bridge or of a Provider Bridge used
+ * to process C-tagged frames
+ * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a
+ * Provider Bridge
+ *
+ */
+enum dpsw_component_type {
+ DPSW_COMPONENT_TYPE_C_VLAN = 0,
+ DPSW_COMPONENT_TYPE_S_VLAN
+};
+
+/**
+ * enum dpsw_flooding_cfg - flooding configuration requested
+ * @DPSW_FLOODING_PER_VLAN: Flooding replicators are allocated per VLAN and
+ * interfaces present in each of them can be configured using
+ * dpsw_vlan_add_if_flooding()/dpsw_vlan_remove_if_flooding().
+ * This is the default configuration.
+ *
+ * @DPSW_FLOODING_PER_FDB: Flooding replicators are allocated per FDB and
+ * interfaces present in each of them can be configured using
+ * dpsw_set_egress_flood().
+ */
+enum dpsw_flooding_cfg {
+ DPSW_FLOODING_PER_VLAN = 0,
+ DPSW_FLOODING_PER_FDB,
+};
+
+/**
+ * enum dpsw_broadcast_cfg - broadcast configuration requested
+ * @DPSW_BROADCAST_PER_OBJECT: There is only one broadcast replicator per DPSW
+ * object. This is the default configuration.
+ * @DPSW_BROADCAST_PER_FDB: Broadcast replicators are allocated per FDB and
+ * interfaces present in each of them can be configured using
+ * dpsw_set_egress_flood().
+ */
+enum dpsw_broadcast_cfg {
+ DPSW_BROADCAST_PER_OBJECT = 0,
+ DPSW_BROADCAST_PER_FDB,
+};
+
+int dpsw_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpsw_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpsw_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/* DPSW IRQ Index and Events */
+
+#define DPSW_IRQ_INDEX_IF 0x0000
+#define DPSW_IRQ_INDEX_L2SW 0x0001
+
+/**
+ * DPSW_IRQ_EVENT_LINK_CHANGED - Indicates that the link state changed
+ */
+#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001
+
+/**
+ * struct dpsw_irq_cfg - IRQ configuration
+ * @addr: Address that must be written to signal a message-based interrupt
+ * @val: Value to write into irq_addr address
+ * @irq_num: A user defined number associated with this IRQ
+ */
+struct dpsw_irq_cfg {
+ u64 addr;
+ u32 val;
+ int irq_num;
+};
+
+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u8 en);
+
+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 mask);
+
+int dpsw_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 *status);
+
+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 status);
+
+/**
+ * struct dpsw_attr - Structure representing DPSW attributes
+ * @id: DPSW object ID
+ * @options: Enable/Disable DPSW features
+ * @max_vlans: Maximum Number of VLANs
+ * @max_meters_per_if: Number of meters per interface
+ * @max_fdbs: Maximum Number of FDBs
+ * @max_fdb_entries: Number of FDB entries for default FDB table;
+ * 0 - indicates default 1024 entries.
+ * @fdb_aging_time: Default FDB aging time for default FDB table;
+ * 0 - indicates default 300 seconds
+ * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
+ * 0 - indicates default 32
+ * @mem_size: DPSW frame storage memory size
+ * @num_ifs: Number of interfaces
+ * @num_vlans: Current number of VLANs
+ * @num_fdbs: Current number of FDBs
+ * @component_type: Component type of this bridge
+ * @flooding_cfg: Flooding configuration (PER_VLAN - default, PER_FDB)
+ * @broadcast_cfg: Broadcast configuration (PER_OBJECT - default, PER_FDB)
+ */
+struct dpsw_attr {
+ int id;
+ u64 options;
+ u16 max_vlans;
+ u8 max_meters_per_if;
+ u8 max_fdbs;
+ u16 max_fdb_entries;
+ u16 fdb_aging_time;
+ u16 max_fdb_mc_groups;
+ u16 num_ifs;
+ u16 mem_size;
+ u16 num_vlans;
+ u8 num_fdbs;
+ enum dpsw_component_type component_type;
+ enum dpsw_flooding_cfg flooding_cfg;
+ enum dpsw_broadcast_cfg broadcast_cfg;
+};
+
+int dpsw_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpsw_attr *attr);
+
+/**
+ * struct dpsw_ctrl_if_attr - Control interface attributes
+ * @rx_fqid: Receive FQID
+ * @rx_err_fqid: Receive error FQID
+ * @tx_err_conf_fqid: Transmit error and confirmation FQID
+ */
+struct dpsw_ctrl_if_attr {
+ u32 rx_fqid;
+ u32 rx_err_fqid;
+ u32 tx_err_conf_fqid;
+};
+
+int dpsw_ctrl_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpsw_ctrl_if_attr *attr);
+
+enum dpsw_queue_type {
+ DPSW_QUEUE_RX,
+ DPSW_QUEUE_TX_ERR_CONF,
+ DPSW_QUEUE_RX_ERR,
+};
+
+#define DPSW_MAX_DPBP 8
+
+/**
+ * struct dpsw_ctrl_if_pools_cfg - Control interface buffer pools configuration
+ * @num_dpbp: Number of DPBPs
+ * @pools: Array of buffer pools parameters; The number of valid entries
+ * must match 'num_dpbp' value
+ * @pools.dpbp_id: DPBP object ID
+ * @pools.buffer_size: Buffer size
+ * @pools.backup_pool: Backup pool
+ */
+struct dpsw_ctrl_if_pools_cfg {
+ u8 num_dpbp;
+ struct {
+ int dpbp_id;
+ u16 buffer_size;
+ int backup_pool;
+ } pools[DPSW_MAX_DPBP];
+};
+
+int dpsw_ctrl_if_set_pools(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ const struct dpsw_ctrl_if_pools_cfg *cfg);
+
+#define DPSW_CTRL_IF_QUEUE_OPT_USER_CTX 0x00000001
+#define DPSW_CTRL_IF_QUEUE_OPT_DEST 0x00000002
+
+enum dpsw_ctrl_if_dest {
+ DPSW_CTRL_IF_DEST_NONE = 0,
+ DPSW_CTRL_IF_DEST_DPIO = 1,
+};
+
+struct dpsw_ctrl_if_dest_cfg {
+ enum dpsw_ctrl_if_dest dest_type;
+ int dest_id;
+ u8 priority;
+};
+
+struct dpsw_ctrl_if_queue_cfg {
+ u32 options;
+ u64 user_ctx;
+ struct dpsw_ctrl_if_dest_cfg dest_cfg;
+};
+
+int dpsw_ctrl_if_set_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpsw_queue_type qtype,
+ const struct dpsw_ctrl_if_queue_cfg *cfg);
+
+int dpsw_ctrl_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpsw_ctrl_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/**
+ * enum dpsw_action - Action selection for special/control frames
+ * @DPSW_ACTION_DROP: Drop frame
+ * @DPSW_ACTION_REDIRECT: Redirect frame to control port
+ */
+enum dpsw_action {
+ DPSW_ACTION_DROP = 0,
+ DPSW_ACTION_REDIRECT = 1
+};
+
+#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL
+#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
+#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL
+#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
+
+/**
+ * struct dpsw_link_cfg - Structure representing DPSW link configuration
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
+ */
+struct dpsw_link_cfg {
+ u32 rate;
+ u64 options;
+};
+
+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ struct dpsw_link_cfg *cfg);
+
+/**
+ * struct dpsw_link_state - Structure representing DPSW link state
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
+ * @up: 0 - covers two cases: down and disconnected, 1 - up
+ */
+struct dpsw_link_state {
+ u32 rate;
+ u64 options;
+ u8 up;
+};
+
+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, struct dpsw_link_state *state);
+
+/**
+ * struct dpsw_tci_cfg - Tag Control Information (TCI) configuration
+ * @pcp: Priority Code Point (PCP): a 3-bit field which refers
+ * to the IEEE 802.1p priority
+ * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
+ * separately or in conjunction with PCP to indicate frames
+ * eligible to be dropped in the presence of congestion
+ * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN
+ * to which the frame belongs. The hexadecimal values
+ * of 0x000 and 0xFFF are reserved;
+ * all other values may be used as VLAN identifiers,
+ * allowing up to 4,094 VLANs
+ */
+struct dpsw_tci_cfg {
+ u8 pcp;
+ u8 dei;
+ u16 vlan_id;
+};
+
+int dpsw_if_set_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ const struct dpsw_tci_cfg *cfg);
+
+int dpsw_if_get_tci(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ struct dpsw_tci_cfg *cfg);
+
+/**
+ * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
+ * @DPSW_STP_STATE_DISABLED: Disabled state
+ * @DPSW_STP_STATE_LISTENING: Listening state
+ * @DPSW_STP_STATE_LEARNING: Learning state
+ * @DPSW_STP_STATE_FORWARDING: Forwarding state
+ * @DPSW_STP_STATE_BLOCKING: Blocking state
+ *
+ */
+enum dpsw_stp_state {
+ DPSW_STP_STATE_DISABLED = 0,
+ DPSW_STP_STATE_LISTENING = 1,
+ DPSW_STP_STATE_LEARNING = 2,
+ DPSW_STP_STATE_FORWARDING = 3,
+ DPSW_STP_STATE_BLOCKING = 0
+};
+
+/**
+ * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
+ * @vlan_id: VLAN ID STP state
+ * @state: STP state
+ */
+struct dpsw_stp_cfg {
+ u16 vlan_id;
+ enum dpsw_stp_state state;
+};
+
+int dpsw_if_set_stp(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id,
+ const struct dpsw_stp_cfg *cfg);
+
+/**
+ * enum dpsw_accepted_frames - Types of frames to accept
+ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
+ * priority tagged frames
+ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
+ * Priority-Tagged frames received on this interface.
+ *
+ */
+enum dpsw_accepted_frames {
+ DPSW_ADMIT_ALL = 1,
+ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
+};
+
+/**
+ * enum dpsw_counter - Counters types
+ * @DPSW_CNT_ING_FRAME: Counts ingress frames
+ * @DPSW_CNT_ING_BYTE: Counts ingress bytes
+ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
+ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
+ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
+ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
+ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
+ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
+ * @DPSW_CNT_EGR_FRAME: Counts egress frames
+ * @DPSW_CNT_EGR_BYTE: Counts egress bytes
+ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
+ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
+ * @DPSW_CNT_ING_NO_BUFF_DISCARD: Counts ingress no buffer discarded frames
+ */
+enum dpsw_counter {
+ DPSW_CNT_ING_FRAME = 0x0,
+ DPSW_CNT_ING_BYTE = 0x1,
+ DPSW_CNT_ING_FLTR_FRAME = 0x2,
+ DPSW_CNT_ING_FRAME_DISCARD = 0x3,
+ DPSW_CNT_ING_MCAST_FRAME = 0x4,
+ DPSW_CNT_ING_MCAST_BYTE = 0x5,
+ DPSW_CNT_ING_BCAST_FRAME = 0x6,
+ DPSW_CNT_ING_BCAST_BYTES = 0x7,
+ DPSW_CNT_EGR_FRAME = 0x8,
+ DPSW_CNT_EGR_BYTE = 0x9,
+ DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
+ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb,
+ DPSW_CNT_ING_NO_BUFF_DISCARD = 0xc,
+};
+
+int dpsw_if_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, enum dpsw_counter type, u64 *counter);
+
+int dpsw_if_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id);
+
+int dpsw_if_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 if_id);
+
+/**
+ * struct dpsw_if_attr - Structure representing DPSW interface attributes
+ * @num_tcs: Number of traffic classes
+ * @rate: Transmit rate in bits per second
+ * @options: Interface configuration options (bitmap)
+ * @enabled: Indicates if interface is enabled
+ * @accept_all_vlan: The device discards/accepts incoming frames
+ * for VLANs that do not include this interface
+ * @admit_untagged: When set to 'DPSW_ADMIT_ONLY_VLAN_TAGGED', the device
+ * discards untagged frames or priority-tagged frames received on
+ * this interface;
+ * When set to 'DPSW_ADMIT_ALL', untagged frames or priority-
+ * tagged frames received on this interface are accepted
+ * @qdid: control frames transmit qdid
+ */
+struct dpsw_if_attr {
+ u8 num_tcs;
+ u32 rate;
+ u32 options;
+ int enabled;
+ int accept_all_vlan;
+ enum dpsw_accepted_frames admit_untagged;
+ u16 qdid;
+};
+
+int dpsw_if_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, struct dpsw_if_attr *attr);
+
+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, u16 frame_length);
+
+/**
+ * struct dpsw_vlan_cfg - VLAN Configuration
+ * @fdb_id: Forwarding Data Base
+ */
+struct dpsw_vlan_cfg {
+ u16 fdb_id;
+};
+
+int dpsw_vlan_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_cfg *cfg);
+
+#define DPSW_VLAN_ADD_IF_OPT_FDB_ID 0x0001
+
+/**
+ * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces
+ * @num_ifs: The number of interfaces that are assigned to the egress
+ * list for this VLAN
+ * @if_id: The set of interfaces that are
+ * assigned to the egress list for this VLAN
+ * @options: Options map for this command (DPSW_VLAN_ADD_IF_OPT_FDB_ID)
+ * @fdb_id: FDB id to be used by this VLAN on these specific interfaces
+ * (taken into account only if the DPSW_VLAN_ADD_IF_OPT_FDB_ID is
+ * specified in the options field)
+ */
+struct dpsw_vlan_if_cfg {
+ u16 num_ifs;
+ u16 options;
+ u16 if_id[DPSW_MAX_IF];
+ u16 fdb_id;
+};
+
+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id);
+
+/**
+ * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
+ * @DPSW_FDB_ENTRY_STATIC: Static entry
+ * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
+ */
+enum dpsw_fdb_entry_type {
+ DPSW_FDB_ENTRY_STATIC = 0,
+ DPSW_FDB_ENTRY_DINAMIC = 1
+};
+
+/**
+ * struct dpsw_fdb_unicast_cfg - Unicast entry configuration
+ * @type: Select static or dynamic entry
+ * @mac_addr: MAC address
+ * @if_egress: Egress interface ID
+ */
+struct dpsw_fdb_unicast_cfg {
+ enum dpsw_fdb_entry_type type;
+ u8 mac_addr[6];
+ u16 if_egress;
+};
+
+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg);
+
+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_unicast_cfg *cfg);
+
+#define DPSW_FDB_ENTRY_TYPE_DYNAMIC BIT(0)
+#define DPSW_FDB_ENTRY_TYPE_UNICAST BIT(1)
+
+/**
+ * struct fdb_dump_entry - fdb snapshot entry
+ * @mac_addr: MAC address
+ * @type: bit0 - DINAMIC(1)/STATIC(0), bit1 - UNICAST(1)/MULTICAST(0)
+ * @if_info: unicast - egress interface, multicast - number of egress interfaces
+ * @if_mask: multicast - egress interface mask
+ */
+struct fdb_dump_entry {
+ u8 mac_addr[6];
+ u8 type;
+ u8 if_info;
+ u8 if_mask[8];
+};
+
+int dpsw_fdb_dump(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id,
+ u64 iova_addr, u32 iova_size, u16 *num_entries);
+
+/**
+ * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration
+ * @type: Select static or dynamic entry
+ * @mac_addr: MAC address
+ * @num_ifs: Number of external and internal interfaces
+ * @if_id: Egress interface IDs
+ */
+struct dpsw_fdb_multicast_cfg {
+ enum dpsw_fdb_entry_type type;
+ u8 mac_addr[6];
+ u16 num_ifs;
+ u16 if_id[DPSW_MAX_IF];
+};
+
+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg);
+
+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 fdb_id, const struct dpsw_fdb_multicast_cfg *cfg);
+
+/**
+ * enum dpsw_learning_mode - Auto-learning modes
+ * @DPSW_LEARNING_MODE_DIS: Disable Auto-learning
+ * @DPSW_LEARNING_MODE_HW: Enable HW auto-Learning
+ * @DPSW_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
+ * @DPSW_LEARNING_MODE_SECURE: Enable secure learning by CPU
+ *
+ * NONE - SECURE LEARNING
+ * SMAC found DMAC found CTLU Action
+ * v v Forward frame to
+ * 1. DMAC destination
+ * - v Forward frame to
+ * 1. DMAC destination
+ * 2. Control interface
+ * v - Forward frame to
+ * 1. Flooding list of interfaces
+ * - - Forward frame to
+ * 1. Flooding list of interfaces
+ * 2. Control interface
+ * SECURE LEARING
+ * SMAC found DMAC found CTLU Action
+ * v v Forward frame to
+ * 1. DMAC destination
+ * - v Forward frame to
+ * 1. Control interface
+ * v - Forward frame to
+ * 1. Flooding list of interfaces
+ * - - Forward frame to
+ * 1. Control interface
+ */
+enum dpsw_learning_mode {
+ DPSW_LEARNING_MODE_DIS = 0,
+ DPSW_LEARNING_MODE_HW = 1,
+ DPSW_LEARNING_MODE_NON_SECURE = 2,
+ DPSW_LEARNING_MODE_SECURE = 3
+};
+
+/**
+ * struct dpsw_fdb_attr - FDB Attributes
+ * @max_fdb_entries: Number of FDB entries
+ * @fdb_ageing_time: Ageing time in seconds
+ * @learning_mode: Learning mode
+ * @num_fdb_mc_groups: Current number of multicast groups
+ * @max_fdb_mc_groups: Maximum number of multicast groups
+ */
+struct dpsw_fdb_attr {
+ u16 max_fdb_entries;
+ u16 fdb_ageing_time;
+ enum dpsw_learning_mode learning_mode;
+ u16 num_fdb_mc_groups;
+ u16 max_fdb_mc_groups;
+};
+
+int dpsw_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver);
+
+int dpsw_if_get_port_mac_addr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, u8 mac_addr[6]);
+
+/**
+ * struct dpsw_fdb_cfg - FDB Configuration
+ * @num_fdb_entries: Number of FDB entries
+ * @fdb_ageing_time: Ageing time in seconds
+ */
+struct dpsw_fdb_cfg {
+ u16 num_fdb_entries;
+ u16 fdb_ageing_time;
+};
+
+int dpsw_fdb_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *fdb_id,
+ const struct dpsw_fdb_cfg *cfg);
+
+int dpsw_fdb_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 fdb_id);
+
+/**
+ * enum dpsw_flood_type - Define the flood type of a DPSW object
+ * @DPSW_BROADCAST: Broadcast flooding
+ * @DPSW_FLOODING: Unknown flooding
+ */
+enum dpsw_flood_type {
+ DPSW_BROADCAST = 0,
+ DPSW_FLOODING,
+};
+
+struct dpsw_egress_flood_cfg {
+ u16 fdb_id;
+ enum dpsw_flood_type flood_type;
+ u16 num_ifs;
+ u16 if_id[DPSW_MAX_IF];
+};
+
+int dpsw_set_egress_flood(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ const struct dpsw_egress_flood_cfg *cfg);
+
+int dpsw_if_set_learning_mode(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 if_id, enum dpsw_learning_mode mode);
+
+/**
+ * struct dpsw_acl_cfg - ACL Configuration
+ * @max_entries: Number of ACL rules
+ */
+struct dpsw_acl_cfg {
+ u16 max_entries;
+};
+
+int dpsw_acl_add(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 *acl_id,
+ const struct dpsw_acl_cfg *cfg);
+
+int dpsw_acl_remove(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id);
+
+/**
+ * struct dpsw_acl_if_cfg - List of interfaces to associate with an ACL table
+ * @num_ifs: Number of interfaces
+ * @if_id: List of interfaces
+ */
+struct dpsw_acl_if_cfg {
+ u16 num_ifs;
+ u16 if_id[DPSW_MAX_IF];
+};
+
+int dpsw_acl_add_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_if_cfg *cfg);
+
+int dpsw_acl_remove_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_if_cfg *cfg);
+
+/**
+ * struct dpsw_acl_fields - ACL fields.
+ * @l2_dest_mac: Destination MAC address: BPDU, Multicast, Broadcast, Unicast,
+ * slow protocols, MVRP, STP
+ * @l2_source_mac: Source MAC address
+ * @l2_tpid: Layer 2 (Ethernet) protocol type, used to identify the following
+ * protocols: MPLS, PTP, PFC, ARP, Jumbo frames, LLDP, IEEE802.1ae,
+ * Q-in-Q, IPv4, IPv6, PPPoE
+ * @l2_pcp_dei: indicate which protocol is encapsulated in the payload
+ * @l2_vlan_id: layer 2 VLAN ID
+ * @l2_ether_type: layer 2 Ethernet type
+ * @l3_dscp: Layer 3 differentiated services code point
+ * @l3_protocol: Tells the Network layer at the destination host, to which
+ * Protocol this packet belongs to. The following protocol are
+ * supported: ICMP, IGMP, IPv4 (encapsulation), TCP, IPv6
+ * (encapsulation), GRE, PTP
+ * @l3_source_ip: Source IPv4 IP
+ * @l3_dest_ip: Destination IPv4 IP
+ * @l4_source_port: Source TCP/UDP Port
+ * @l4_dest_port: Destination TCP/UDP Port
+ */
+struct dpsw_acl_fields {
+ u8 l2_dest_mac[6];
+ u8 l2_source_mac[6];
+ u16 l2_tpid;
+ u8 l2_pcp_dei;
+ u16 l2_vlan_id;
+ u16 l2_ether_type;
+ u8 l3_dscp;
+ u8 l3_protocol;
+ u32 l3_source_ip;
+ u32 l3_dest_ip;
+ u16 l4_source_port;
+ u16 l4_dest_port;
+};
+
+/**
+ * struct dpsw_acl_key - ACL key
+ * @match: Match fields
+ * @mask: Mask: b'1 - valid, b'0 don't care
+ */
+struct dpsw_acl_key {
+ struct dpsw_acl_fields match;
+ struct dpsw_acl_fields mask;
+};
+
+/**
+ * enum dpsw_acl_action - action to be run on the ACL rule match
+ * @DPSW_ACL_ACTION_DROP: Drop frame
+ * @DPSW_ACL_ACTION_REDIRECT: Redirect to certain port
+ * @DPSW_ACL_ACTION_ACCEPT: Accept frame
+ * @DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF: Redirect to control interface
+ */
+enum dpsw_acl_action {
+ DPSW_ACL_ACTION_DROP,
+ DPSW_ACL_ACTION_REDIRECT,
+ DPSW_ACL_ACTION_ACCEPT,
+ DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF
+};
+
+/**
+ * struct dpsw_acl_result - ACL action
+ * @action: Action should be taken when ACL entry hit
+ * @if_id: Interface IDs to redirect frame. Valid only if redirect selected for
+ * action
+ */
+struct dpsw_acl_result {
+ enum dpsw_acl_action action;
+ u16 if_id;
+};
+
+/**
+ * struct dpsw_acl_entry_cfg - ACL entry
+ * @key_iova: I/O virtual address of DMA-able memory filled with key after call
+ * to dpsw_acl_prepare_entry_cfg()
+ * @result: Required action when entry hit occurs
+ * @precedence: Precedence inside ACL 0 is lowest; This priority can not change
+ * during the lifetime of a Policy. It is user responsibility to
+ * space the priorities according to consequent rule additions.
+ */
+struct dpsw_acl_entry_cfg {
+ u64 key_iova;
+ struct dpsw_acl_result result;
+ int precedence;
+};
+
+void dpsw_acl_prepare_entry_cfg(const struct dpsw_acl_key *key,
+ u8 *entry_cfg_buf);
+
+int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_entry_cfg *cfg);
+
+int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 acl_id, const struct dpsw_acl_entry_cfg *cfg);
+#endif /* __FSL_DPSW_H */
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index ab92382c399a..cdc0ff89388a 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -2,6 +2,7 @@
config FSL_ENETC
tristate "ENETC PF driver"
depends on PCI && PCI_MSI
+ select FSL_ENETC_IERB
select FSL_ENETC_MDIO
select PHYLINK
select PCS_LYNX
@@ -25,6 +26,14 @@ config FSL_ENETC_VF
If compiled as module (M), the module name is fsl-enetc-vf.
+config FSL_ENETC_IERB
+ tristate "ENETC IERB driver"
+ help
+ This driver configures the Integrated Endpoint Register Block on NXP
+ LS1028A.
+
+ If compiled as module (M), the module name is fsl-enetc-ierb.
+
config FSL_ENETC_MDIO
tristate "ENETC MDIO driver"
depends on PCI && MDIO_DEVRES && MDIO_BUS
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index 74f7ac253b8b..a139f2e9d59f 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -11,6 +11,9 @@ obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
fsl-enetc-vf-y := enetc_vf.o $(common-objs)
fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
+obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o
+fsl-enetc-ierb-y := enetc_ierb.o
+
obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o
fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 09471329f3a3..3ca93adb9662 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2,88 +2,138 @@
/* Copyright 2017-2019 NXP */
#include "enetc.h"
+#include <linux/bpf_trace.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/vmalloc.h>
+#include <linux/ptp_classify.h>
+#include <net/pkt_sched.h>
-/* ENETC overhead: optional extension BD + 1 BD gap */
-#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
-/* max # of chained Tx BDs is 15, including head and extension BD */
-#define ENETC_MAX_SKB_FRAGS 13
-#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
-
-static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
- int active_offloads);
-
-netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
+static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_bdr *tx_ring;
- int count;
+ int num_tx_rings = priv->num_tx_rings;
+ int i;
- tx_ring = priv->tx_ring[skb->queue_mapping];
+ for (i = 0; i < priv->num_rx_rings; i++)
+ if (priv->rx_ring[i]->xdp.prog)
+ return num_tx_rings - num_possible_cpus();
- if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
- if (unlikely(skb_linearize(skb)))
- goto drop_packet_err;
+ return num_tx_rings;
+}
- count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
- if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
- netif_stop_subqueue(ndev, tx_ring->index);
- return NETDEV_TX_BUSY;
- }
+static struct enetc_bdr *enetc_rx_ring_from_xdp_tx_ring(struct enetc_ndev_priv *priv,
+ struct enetc_bdr *tx_ring)
+{
+ int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring;
- enetc_lock_mdio();
- count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
- enetc_unlock_mdio();
+ return priv->rx_ring[index];
+}
- if (unlikely(!count))
- goto drop_packet_err;
+static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd)
+{
+ if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect)
+ return NULL;
- if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
- netif_stop_subqueue(ndev, tx_ring->index);
+ return tx_swbd->skb;
+}
- return NETDEV_TX_OK;
+static struct xdp_frame *
+enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd)
+{
+ if (tx_swbd->is_xdp_redirect)
+ return tx_swbd->xdp_frame;
-drop_packet_err:
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
+ return NULL;
}
static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
struct enetc_tx_swbd *tx_swbd)
{
+ /* For XDP_TX, pages come from RX, whereas for the other contexts where
+ * we have is_dma_page_set, those come from skb_frag_dma_map. We need
+ * to match the DMA mapping length, so we need to differentiate those.
+ */
if (tx_swbd->is_dma_page)
dma_unmap_page(tx_ring->dev, tx_swbd->dma,
- tx_swbd->len, DMA_TO_DEVICE);
+ tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len,
+ tx_swbd->dir);
else
dma_unmap_single(tx_ring->dev, tx_swbd->dma,
- tx_swbd->len, DMA_TO_DEVICE);
+ tx_swbd->len, tx_swbd->dir);
tx_swbd->dma = 0;
}
-static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
- struct enetc_tx_swbd *tx_swbd)
+static void enetc_free_tx_frame(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *tx_swbd)
{
+ struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
+ struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
+
if (tx_swbd->dma)
enetc_unmap_tx_buff(tx_ring, tx_swbd);
- if (tx_swbd->skb) {
- dev_kfree_skb_any(tx_swbd->skb);
+ if (xdp_frame) {
+ xdp_return_frame(tx_swbd->xdp_frame);
+ tx_swbd->xdp_frame = NULL;
+ } else if (skb) {
+ dev_kfree_skb_any(skb);
tx_swbd->skb = NULL;
}
}
-static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
- int active_offloads)
+/* Let H/W know BD ring has been updated */
+static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring)
+{
+ /* includes wmb() */
+ enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use);
+}
+
+static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
+ u8 *msgtype, u8 *twostep,
+ u16 *correction_offset, u16 *body_offset)
+{
+ unsigned int ptp_class;
+ struct ptp_header *hdr;
+ unsigned int type;
+ u8 *base;
+
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ return -EINVAL;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return -EINVAL;
+
+ type = ptp_class & PTP_CLASS_PMASK;
+ if (type == PTP_CLASS_IPV4 || type == PTP_CLASS_IPV6)
+ *udp = 1;
+ else
+ *udp = 0;
+
+ *msgtype = ptp_get_msgtype(hdr, ptp_class);
+ *twostep = hdr->flag_field[0] & 0x2;
+
+ base = skb_mac_header(skb);
+ *correction_offset = (u8 *)&hdr->correction - base;
+ *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
+
+ return 0;
+}
+
+static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
+ bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
+ struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
+ struct enetc_hw *hw = &priv->si->hw;
struct enetc_tx_swbd *tx_swbd;
- skb_frag_t *frag;
int len = skb_headlen(skb);
union enetc_tx_bd temp_bd;
+ u8 msgtype, twostep, udp;
union enetc_tx_bd *txbd;
- bool do_vlan, do_tstamp;
+ u16 offset1, offset2;
int i, count = 0;
+ skb_frag_t *frag;
unsigned int f;
dma_addr_t dma;
u8 flags = 0;
@@ -104,15 +154,25 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
tx_swbd->dma = dma;
tx_swbd->len = len;
tx_swbd->is_dma_page = 0;
+ tx_swbd->dir = DMA_TO_DEVICE;
count++;
do_vlan = skb_vlan_tag_present(skb);
- do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) &&
- (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP);
- tx_swbd->do_tstamp = do_tstamp;
- tx_swbd->check_wb = tx_swbd->do_tstamp;
+ if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1,
+ &offset2) ||
+ msgtype != PTP_MSGTYPE_SYNC || twostep)
+ WARN_ONCE(1, "Bad packet for one-step timestamping\n");
+ else
+ do_onestep_tstamp = true;
+ } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) {
+ do_twostep_tstamp = true;
+ }
- if (do_vlan || do_tstamp)
+ tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
+ tx_swbd->check_wb = tx_swbd->do_twostep_tstamp;
+
+ if (do_vlan || do_onestep_tstamp || do_twostep_tstamp)
flags |= ENETC_TXBD_FLAGS_EX;
if (tx_ring->tsd_enable)
@@ -149,7 +209,40 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
}
- if (do_tstamp) {
+ if (do_onestep_tstamp) {
+ u32 lo, hi, val;
+ u64 sec, nsec;
+ u8 *data;
+
+ lo = enetc_rd_hot(hw, ENETC_SICTR0);
+ hi = enetc_rd_hot(hw, ENETC_SICTR1);
+ sec = (u64)hi << 32 | lo;
+ nsec = do_div(sec, 1000000000);
+
+ /* Configure extension BD */
+ temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff);
+ e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP;
+
+ /* Update originTimestamp field of Sync packet
+ * - 48 bits seconds field
+ * - 32 bits nanseconds field
+ */
+ data = skb_mac_header(skb);
+ *(__be16 *)(data + offset2) =
+ htons((sec >> 32) & 0xffff);
+ *(__be32 *)(data + offset2 + 2) =
+ htonl(sec & 0xffffffff);
+ *(__be32 *)(data + offset2 + 6) = htonl(nsec);
+
+ /* Configure single-step register */
+ val = ENETC_PM0_SINGLE_STEP_EN;
+ val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1);
+ if (udp)
+ val |= ENETC_PM0_SINGLE_STEP_CH;
+
+ enetc_port_wr(hw, ENETC_PM0_SINGLE_STEP, val);
+ enetc_port_wr(hw, ENETC_PM1_SINGLE_STEP, val);
+ } else if (do_twostep_tstamp) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
}
@@ -186,6 +279,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
tx_swbd->dma = dma;
tx_swbd->len = len;
tx_swbd->is_dma_page = 1;
+ tx_swbd->dir = DMA_TO_DEVICE;
count++;
}
@@ -194,6 +288,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
temp_bd.flags = flags;
*txbd = temp_bd;
+ tx_ring->tx_swbd[i].is_eof = true;
tx_ring->tx_swbd[i].skb = skb;
enetc_bdr_idx_inc(tx_ring, &i);
@@ -201,8 +296,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
skb_tx_timestamp(skb);
- /* let H/W know BD ring has been updated */
- enetc_wr_reg_hot(tx_ring->tpir, i); /* includes wmb() */
+ enetc_update_tx_ring_tail(tx_ring);
return count;
@@ -211,7 +305,7 @@ dma_err:
do {
tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_skb(tx_ring, tx_swbd);
+ enetc_free_tx_frame(tx_ring, tx_swbd);
if (i == 0)
i = tx_ring->bd_count;
i--;
@@ -220,6 +314,76 @@ dma_err:
return 0;
}
+static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_bdr *tx_ring;
+ int count;
+
+ /* Queue one-step Sync packet if already locked */
+ if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS,
+ &priv->flags)) {
+ skb_queue_tail(&priv->tx_skbs, skb);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ tx_ring = priv->tx_ring[skb->queue_mapping];
+
+ if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+ if (unlikely(skb_linearize(skb)))
+ goto drop_packet_err;
+
+ count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
+
+ enetc_lock_mdio();
+ count = enetc_map_tx_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+
+ if (unlikely(!count))
+ goto drop_packet_err;
+
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
+ netif_stop_subqueue(ndev, tx_ring->index);
+
+ return NETDEV_TX_OK;
+
+drop_packet_err:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ u8 udp, msgtype, twostep;
+ u16 offset1, offset2;
+
+ /* Mark tx timestamp type on skb->cb[0] if requires */
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) {
+ skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
+ } else {
+ skb->cb[0] = 0;
+ }
+
+ /* Fall back to two-step timestamp if not one-step Sync packet */
+ if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep,
+ &offset1, &offset2) ||
+ msgtype != PTP_MSGTYPE_SYNC || twostep != 0)
+ skb->cb[0] = ENETC_F_TX_TSTAMP;
+ }
+
+ return enetc_start_xmit(skb, ndev);
+}
+
static irqreturn_t enetc_msix(int irq, void *data)
{
struct enetc_int_vector *v = data;
@@ -241,10 +405,6 @@ static irqreturn_t enetc_msix(int irq, void *data)
return IRQ_HANDLED;
}
-static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
-static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
- struct napi_struct *napi, int work_limit);
-
static void enetc_rx_dim_work(struct work_struct *w)
{
struct dim *dim = container_of(w, struct dim, work);
@@ -273,55 +433,30 @@ static void enetc_rx_net_dim(struct enetc_int_vector *v)
net_dim(&v->rx_dim, dim_sample);
}
-static int enetc_poll(struct napi_struct *napi, int budget)
+static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
{
- struct enetc_int_vector
- *v = container_of(napi, struct enetc_int_vector, napi);
- bool complete = true;
- int work_done;
- int i;
-
- enetc_lock_mdio();
-
- for (i = 0; i < v->count_tx_rings; i++)
- if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
- complete = false;
-
- work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
- if (work_done == budget)
- complete = false;
- if (work_done)
- v->rx_napi_work = true;
-
- if (!complete) {
- enetc_unlock_mdio();
- return budget;
- }
-
- napi_complete_done(napi, work_done);
-
- if (likely(v->rx_dim_en))
- enetc_rx_net_dim(v);
-
- v->rx_napi_work = false;
-
- /* enable interrupts */
- enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
-
- for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
- enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
- ENETC_TBIER_TXTIE);
+ int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
- enetc_unlock_mdio();
+ return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
+}
- return work_done;
+static bool enetc_page_reusable(struct page *page)
+{
+ return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
}
-static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
+static void enetc_reuse_page(struct enetc_bdr *rx_ring,
+ struct enetc_rx_swbd *old)
{
- int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
+ struct enetc_rx_swbd *new;
- return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
+ new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
+
+ /* next buf that may reuse a page */
+ enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
+
+ /* copy page reference */
+ *new = *old;
}
static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
@@ -344,23 +479,58 @@ static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
- /* Ensure skb_mstamp_ns, which might have been populated with
- * the txtime, is not mistaken for a software timestamp,
- * because this will prevent the dispatch of our hardware
- * timestamp to the socket.
- */
- skb->tstamp = ktime_set(0, 0);
+ skb_txtime_consumed(skb);
skb_tstamp_tx(skb, &shhwtstamps);
}
}
+static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *tx_swbd)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
+ struct enetc_rx_swbd rx_swbd = {
+ .dma = tx_swbd->dma,
+ .page = tx_swbd->page,
+ .page_offset = tx_swbd->page_offset,
+ .dir = tx_swbd->dir,
+ .len = tx_swbd->len,
+ };
+ struct enetc_bdr *rx_ring;
+
+ rx_ring = enetc_rx_ring_from_xdp_tx_ring(priv, tx_ring);
+
+ if (likely(enetc_swbd_unused(rx_ring))) {
+ enetc_reuse_page(rx_ring, &rx_swbd);
+
+ /* sync for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma,
+ rx_swbd.page_offset,
+ ENETC_RXB_DMA_SIZE_XDP,
+ rx_swbd.dir);
+
+ rx_ring->stats.recycles++;
+ } else {
+ /* RX ring is already full, we need to unmap and free the
+ * page, since there's nothing useful we can do with it.
+ */
+ rx_ring->stats.recycle_failures++;
+
+ dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE,
+ rx_swbd.dir);
+ __free_page(rx_swbd.page);
+ }
+
+ rx_ring->xdp.xdp_tx_in_flight--;
+}
+
static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
{
struct net_device *ndev = tx_ring->ndev;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
int tx_frm_cnt = 0, tx_byte_cnt = 0;
struct enetc_tx_swbd *tx_swbd;
int i, bds_to_clean;
- bool do_tstamp;
+ bool do_twostep_tstamp;
u64 tstamp = 0;
i = tx_ring->next_to_clean;
@@ -368,10 +538,12 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
bds_to_clean = enetc_bd_ready_count(tx_ring, i);
- do_tstamp = false;
+ do_twostep_tstamp = false;
while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
- bool is_eof = !!tx_swbd->skb;
+ struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
+ struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
+ bool is_eof = tx_swbd->is_eof;
if (unlikely(tx_swbd->check_wb)) {
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -380,26 +552,40 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
txbd = ENETC_TXBD(*tx_ring, i);
if (txbd->flags & ENETC_TXBD_FLAGS_W &&
- tx_swbd->do_tstamp) {
+ tx_swbd->do_twostep_tstamp) {
enetc_get_tx_tstamp(&priv->si->hw, txbd,
&tstamp);
- do_tstamp = true;
+ do_twostep_tstamp = true;
}
}
- if (likely(tx_swbd->dma))
+ if (tx_swbd->is_xdp_tx)
+ enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd);
+ else if (likely(tx_swbd->dma))
enetc_unmap_tx_buff(tx_ring, tx_swbd);
- if (is_eof) {
- if (unlikely(do_tstamp)) {
- enetc_tstamp_tx(tx_swbd->skb, tstamp);
- do_tstamp = false;
+ if (xdp_frame) {
+ xdp_return_frame(xdp_frame);
+ } else if (skb) {
+ if (unlikely(tx_swbd->skb->cb[0] &
+ ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
+ /* Start work to release lock for next one-step
+ * timestamping packet. And send one skb in
+ * tx_skbs queue if has.
+ */
+ schedule_work(&priv->tx_onestep_tstamp);
+ } else if (unlikely(do_twostep_tstamp)) {
+ enetc_tstamp_tx(skb, tstamp);
+ do_twostep_tstamp = false;
}
- napi_consume_skb(tx_swbd->skb, napi_budget);
- tx_swbd->skb = NULL;
+ napi_consume_skb(skb, napi_budget);
}
tx_byte_cnt += tx_swbd->len;
+ /* Scrub the swbd here so we don't have to do that
+ * when we reuse it during xmit
+ */
+ memset(tx_swbd, 0, sizeof(*tx_swbd));
bds_to_clean--;
tx_swbd++;
@@ -437,6 +623,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
static bool enetc_new_page(struct enetc_bdr *rx_ring,
struct enetc_rx_swbd *rx_swbd)
{
+ bool xdp = !!(rx_ring->xdp.prog);
struct page *page;
dma_addr_t addr;
@@ -444,7 +631,10 @@ static bool enetc_new_page(struct enetc_bdr *rx_ring,
if (unlikely(!page))
return false;
- addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ /* For XDP_TX, we forgo dma_unmap -> dma_map */
+ rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+
+ addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir);
if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
__free_page(page);
@@ -453,7 +643,7 @@ static bool enetc_new_page(struct enetc_bdr *rx_ring,
rx_swbd->dma = addr;
rx_swbd->page = page;
- rx_swbd->page_offset = ENETC_RXB_PAD;
+ rx_swbd->page_offset = rx_ring->buffer_offset;
return true;
}
@@ -483,18 +673,16 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
/* clear 'R" as well */
rxbd->r.lstatus = 0;
- rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
- rx_swbd++;
- i++;
- if (unlikely(i == rx_ring->bd_count)) {
- i = 0;
- rx_swbd = rx_ring->rx_swbd;
- }
+ enetc_rxbd_next(rx_ring, &rxbd, &i);
+ rx_swbd = &rx_ring->rx_swbd[i];
}
if (likely(j)) {
rx_ring->next_to_alloc = i; /* keep track from page reuse */
rx_ring->next_to_use = i;
+
+ /* update ENETC's consumer index */
+ enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
}
return j;
@@ -570,32 +758,10 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
#endif
}
-static void enetc_process_skb(struct enetc_bdr *rx_ring,
- struct sk_buff *skb)
-{
- skb_record_rx_queue(skb, rx_ring->index);
- skb->protocol = eth_type_trans(skb, rx_ring->ndev);
-}
-
-static bool enetc_page_reusable(struct page *page)
-{
- return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
-}
-
-static void enetc_reuse_page(struct enetc_bdr *rx_ring,
- struct enetc_rx_swbd *old)
-{
- struct enetc_rx_swbd *new;
-
- new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
-
- /* next buf that may reuse a page */
- enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
-
- /* copy page reference */
- *new = *old;
-}
-
+/* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS,
+ * so it needs to work with both DMA_FROM_DEVICE as well as DMA_BIDIRECTIONAL
+ * mapped buffers.
+ */
static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
int i, u16 size)
{
@@ -603,30 +769,39 @@ static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
rx_swbd->page_offset,
- size, DMA_FROM_DEVICE);
+ size, rx_swbd->dir);
return rx_swbd;
}
+/* Reuse the current page without performing half-page buffer flipping */
static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
struct enetc_rx_swbd *rx_swbd)
{
+ size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset;
+
+ enetc_reuse_page(rx_ring, rx_swbd);
+
+ dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
+ rx_swbd->page_offset,
+ buffer_size, rx_swbd->dir);
+
+ rx_swbd->page = NULL;
+}
+
+/* Reuse the current page by performing half-page buffer flipping */
+static void enetc_flip_rx_buff(struct enetc_bdr *rx_ring,
+ struct enetc_rx_swbd *rx_swbd)
+{
if (likely(enetc_page_reusable(rx_swbd->page))) {
rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
page_ref_inc(rx_swbd->page);
- enetc_reuse_page(rx_ring, rx_swbd);
-
- /* sync for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
- rx_swbd->page_offset,
- ENETC_RXB_DMA_SIZE,
- DMA_FROM_DEVICE);
+ enetc_put_rx_buff(rx_ring, rx_swbd);
} else {
- dma_unmap_page(rx_ring->dev, rx_swbd->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
+ rx_swbd->dir);
+ rx_swbd->page = NULL;
}
-
- rx_swbd->page = NULL;
}
static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
@@ -637,16 +812,16 @@ static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
void *ba;
ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
- skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
+ skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE);
if (unlikely(!skb)) {
rx_ring->stats.rx_alloc_errs++;
return NULL;
}
- skb_reserve(skb, ENETC_RXB_PAD);
+ skb_reserve(skb, rx_ring->buffer_offset);
__skb_put(skb, size);
- enetc_put_rx_buff(rx_ring, rx_swbd);
+ enetc_flip_rx_buff(rx_ring, rx_swbd);
return skb;
}
@@ -659,7 +834,72 @@ static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
- enetc_put_rx_buff(rx_ring, rx_swbd);
+ enetc_flip_rx_buff(rx_ring, rx_swbd);
+}
+
+static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring,
+ u32 bd_status,
+ union enetc_rx_bd **rxbd, int *i)
+{
+ if (likely(!(bd_status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))))
+ return false;
+
+ enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
+ enetc_rxbd_next(rx_ring, rxbd, i);
+
+ while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ dma_rmb();
+ bd_status = le32_to_cpu((*rxbd)->r.lstatus);
+
+ enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]);
+ enetc_rxbd_next(rx_ring, rxbd, i);
+ }
+
+ rx_ring->ndev->stats.rx_dropped++;
+ rx_ring->ndev->stats.rx_errors++;
+
+ return true;
+}
+
+static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring,
+ u32 bd_status, union enetc_rx_bd **rxbd,
+ int *i, int *cleaned_cnt, int buffer_size)
+{
+ struct sk_buff *skb;
+ u16 size;
+
+ size = le16_to_cpu((*rxbd)->r.buf_len);
+ skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size);
+ if (!skb)
+ return NULL;
+
+ enetc_get_offloads(rx_ring, *rxbd, skb);
+
+ (*cleaned_cnt)++;
+
+ enetc_rxbd_next(rx_ring, rxbd, i);
+
+ /* not last BD in frame? */
+ while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ bd_status = le32_to_cpu((*rxbd)->r.lstatus);
+ size = buffer_size;
+
+ if (bd_status & ENETC_RXBD_LSTATUS_F) {
+ dma_rmb();
+ size = le16_to_cpu((*rxbd)->r.buf_len);
+ }
+
+ enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb);
+
+ (*cleaned_cnt)++;
+
+ enetc_rxbd_next(rx_ring, rxbd, i);
+ }
+
+ skb_record_rx_queue(skb, rx_ring->index);
+ skb->protocol = eth_type_trans(skb, rx_ring->ndev);
+
+ return skb;
}
#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
@@ -678,15 +918,10 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
union enetc_rx_bd *rxbd;
struct sk_buff *skb;
u32 bd_status;
- u16 size;
- if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
- int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
-
- /* update ENETC's consumer index */
- enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
- cleaned_cnt -= count;
- }
+ if (cleaned_cnt >= ENETC_RXBD_BUNDLE)
+ cleaned_cnt -= enetc_refill_rx_ring(rx_ring,
+ cleaned_cnt);
rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
@@ -695,73 +930,511 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
dma_rmb(); /* for reading other rxbd fields */
- size = le16_to_cpu(rxbd->r.buf_len);
- skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
+
+ if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
+ &rxbd, &i))
+ break;
+
+ skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i,
+ &cleaned_cnt, ENETC_RXB_DMA_SIZE);
if (!skb)
break;
- enetc_get_offloads(rx_ring, rxbd, skb);
+ rx_byte_cnt += skb->len;
+ rx_frm_cnt++;
- cleaned_cnt++;
+ napi_gro_receive(napi, skb);
+ }
- rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
- if (unlikely(++i == rx_ring->bd_count))
- i = 0;
+ rx_ring->next_to_clean = i;
- if (unlikely(bd_status &
- ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
- dev_kfree_skb(skb);
- while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
- dma_rmb();
- bd_status = le32_to_cpu(rxbd->r.lstatus);
+ rx_ring->stats.packets += rx_frm_cnt;
+ rx_ring->stats.bytes += rx_byte_cnt;
- rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
- if (unlikely(++i == rx_ring->bd_count))
- i = 0;
- }
+ return rx_frm_cnt;
+}
+
+static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i,
+ struct enetc_tx_swbd *tx_swbd,
+ int frm_len)
+{
+ union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
+
+ prefetchw(txbd);
+
+ enetc_clear_tx_bd(txbd);
+ txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset);
+ txbd->buf_len = cpu_to_le16(tx_swbd->len);
+ txbd->frm_len = cpu_to_le16(frm_len);
+
+ memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd));
+}
+
+/* Puts in the TX ring one XDP frame, mapped as an array of TX software buffer
+ * descriptors.
+ */
+static bool enetc_xdp_tx(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd)
+{
+ struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr;
+ int i, k, frm_len = tmp_tx_swbd->len;
+
+ if (unlikely(enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(num_tx_swbd)))
+ return false;
+
+ while (unlikely(!tmp_tx_swbd->is_eof)) {
+ tmp_tx_swbd++;
+ frm_len += tmp_tx_swbd->len;
+ }
+
+ i = tx_ring->next_to_use;
+
+ for (k = 0; k < num_tx_swbd; k++) {
+ struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k];
+
+ enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len);
+
+ /* last BD needs 'F' bit set */
+ if (xdp_tx_swbd->is_eof) {
+ union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
+
+ txbd->flags = ENETC_TXBD_FLAGS_F;
+ }
+
+ enetc_bdr_idx_inc(tx_ring, &i);
+ }
+
+ tx_ring->next_to_use = i;
+
+ return true;
+}
+
+static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
+ struct enetc_tx_swbd *xdp_tx_arr,
+ struct xdp_frame *xdp_frame)
+{
+ struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0];
+ struct skb_shared_info *shinfo;
+ void *data = xdp_frame->data;
+ int len = xdp_frame->len;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ unsigned int f;
+ int n = 0;
+
+ dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
+ netdev_err(tx_ring->ndev, "DMA map error\n");
+ return -1;
+ }
+
+ xdp_tx_swbd->dma = dma;
+ xdp_tx_swbd->dir = DMA_TO_DEVICE;
+ xdp_tx_swbd->len = len;
+ xdp_tx_swbd->is_xdp_redirect = true;
+ xdp_tx_swbd->is_eof = false;
+ xdp_tx_swbd->xdp_frame = NULL;
+
+ n++;
+ xdp_tx_swbd = &xdp_tx_arr[n];
+
+ shinfo = xdp_get_shared_info_from_frame(xdp_frame);
+
+ for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags;
+ f++, frag++) {
+ data = skb_frag_address(frag);
+ len = skb_frag_size(frag);
+
+ dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
+ /* Undo the DMA mapping for all fragments */
+ while (--n >= 0)
+ enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]);
+
+ netdev_err(tx_ring->ndev, "DMA map error\n");
+ return -1;
+ }
+
+ xdp_tx_swbd->dma = dma;
+ xdp_tx_swbd->dir = DMA_TO_DEVICE;
+ xdp_tx_swbd->len = len;
+ xdp_tx_swbd->is_xdp_redirect = true;
+ xdp_tx_swbd->is_eof = false;
+ xdp_tx_swbd->xdp_frame = NULL;
+
+ n++;
+ xdp_tx_swbd = &xdp_tx_arr[n];
+ }
+
+ xdp_tx_arr[n - 1].is_eof = true;
+ xdp_tx_arr[n - 1].xdp_frame = xdp_frame;
+
+ return n;
+}
+
+int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0};
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_bdr *tx_ring;
+ int xdp_tx_bd_cnt, i, k;
+ int xdp_tx_frm_cnt = 0;
+
+ enetc_lock_mdio();
+
+ tx_ring = priv->xdp_tx_ring[smp_processor_id()];
- rx_ring->ndev->stats.rx_dropped++;
- rx_ring->ndev->stats.rx_errors++;
+ prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use));
+ for (k = 0; k < num_frames; k++) {
+ xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring,
+ xdp_redirect_arr,
+ frames[k]);
+ if (unlikely(xdp_tx_bd_cnt < 0))
+ break;
+
+ if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr,
+ xdp_tx_bd_cnt))) {
+ for (i = 0; i < xdp_tx_bd_cnt; i++)
+ enetc_unmap_tx_buff(tx_ring,
+ &xdp_redirect_arr[i]);
+ tx_ring->stats.xdp_tx_drops++;
break;
}
- /* not last BD in frame? */
- while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
- bd_status = le32_to_cpu(rxbd->r.lstatus);
- size = ENETC_RXB_DMA_SIZE;
+ xdp_tx_frm_cnt++;
+ }
- if (bd_status & ENETC_RXBD_LSTATUS_F) {
- dma_rmb();
- size = le16_to_cpu(rxbd->r.buf_len);
- }
+ if (unlikely((flags & XDP_XMIT_FLUSH) || k != xdp_tx_frm_cnt))
+ enetc_update_tx_ring_tail(tx_ring);
+
+ tx_ring->stats.xdp_tx += xdp_tx_frm_cnt;
+
+ enetc_unlock_mdio();
+
+ return xdp_tx_frm_cnt;
+}
+
+static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
+ struct xdp_buff *xdp_buff, u16 size)
+{
+ struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+ void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset;
+ struct skb_shared_info *shinfo;
+
+ /* To be used for XDP_TX */
+ rx_swbd->len = size;
- enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
+ xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset,
+ rx_ring->buffer_offset, size, false);
- cleaned_cnt++;
+ shinfo = xdp_get_shared_info_from_buff(xdp_buff);
+ shinfo->nr_frags = 0;
+}
+
+static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
+ u16 size, struct xdp_buff *xdp_buff)
+{
+ struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff);
+ struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
+ skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags];
+
+ /* To be used for XDP_TX */
+ rx_swbd->len = size;
+
+ skb_frag_off_set(frag, rx_swbd->page_offset);
+ skb_frag_size_set(frag, size);
+ __skb_frag_set_page(frag, rx_swbd->page);
+
+ shinfo->nr_frags++;
+}
+
+static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status,
+ union enetc_rx_bd **rxbd, int *i,
+ int *cleaned_cnt, struct xdp_buff *xdp_buff)
+{
+ u16 size = le16_to_cpu((*rxbd)->r.buf_len);
+
+ xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq);
+
+ enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size);
+ (*cleaned_cnt)++;
+ enetc_rxbd_next(rx_ring, rxbd, i);
- rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
- if (unlikely(++i == rx_ring->bd_count))
- i = 0;
+ /* not last BD in frame? */
+ while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
+ bd_status = le32_to_cpu((*rxbd)->r.lstatus);
+ size = ENETC_RXB_DMA_SIZE_XDP;
+
+ if (bd_status & ENETC_RXBD_LSTATUS_F) {
+ dma_rmb();
+ size = le16_to_cpu((*rxbd)->r.buf_len);
}
- rx_byte_cnt += skb->len;
+ enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff);
+ (*cleaned_cnt)++;
+ enetc_rxbd_next(rx_ring, rxbd, i);
+ }
+}
- enetc_process_skb(rx_ring, skb);
+/* Convert RX buffer descriptors to TX buffer descriptors. These will be
+ * recycled back into the RX ring in enetc_clean_tx_ring.
+ */
+static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr,
+ struct enetc_bdr *rx_ring,
+ int rx_ring_first, int rx_ring_last)
+{
+ int n = 0;
+
+ for (; rx_ring_first != rx_ring_last;
+ n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) {
+ struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
+ struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n];
+
+ /* No need to dma_map, we already have DMA_BIDIRECTIONAL */
+ tx_swbd->dma = rx_swbd->dma;
+ tx_swbd->dir = rx_swbd->dir;
+ tx_swbd->page = rx_swbd->page;
+ tx_swbd->page_offset = rx_swbd->page_offset;
+ tx_swbd->len = rx_swbd->len;
+ tx_swbd->is_dma_page = true;
+ tx_swbd->is_xdp_tx = true;
+ tx_swbd->is_eof = false;
+ }
- napi_gro_receive(napi, skb);
+ /* We rely on caller providing an rx_ring_last > rx_ring_first */
+ xdp_tx_arr[n - 1].is_eof = true;
+
+ return n;
+}
+
+static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
+ int rx_ring_last)
+{
+ while (rx_ring_first != rx_ring_last) {
+ enetc_put_rx_buff(rx_ring,
+ &rx_ring->rx_swbd[rx_ring_first]);
+ enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+ }
+ rx_ring->stats.xdp_drops++;
+}
+
+static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first,
+ int rx_ring_last)
+{
+ while (rx_ring_first != rx_ring_last) {
+ struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
+
+ if (rx_swbd->page) {
+ dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
+ rx_swbd->dir);
+ __free_page(rx_swbd->page);
+ rx_swbd->page = NULL;
+ }
+ enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+ }
+ rx_ring->stats.xdp_redirect_failures++;
+}
+
+static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ struct napi_struct *napi, int work_limit,
+ struct bpf_prog *prog)
+{
+ int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0;
+ struct enetc_tx_swbd xdp_tx_arr[ENETC_MAX_SKB_FRAGS] = {0};
+ struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
+ int rx_frm_cnt = 0, rx_byte_cnt = 0;
+ struct enetc_bdr *tx_ring;
+ int cleaned_cnt, i;
+ u32 xdp_act;
+
+ cleaned_cnt = enetc_bd_unused(rx_ring);
+ /* next descriptor to process */
+ i = rx_ring->next_to_clean;
+
+ while (likely(rx_frm_cnt < work_limit)) {
+ union enetc_rx_bd *rxbd, *orig_rxbd;
+ int orig_i, orig_cleaned_cnt;
+ struct xdp_buff xdp_buff;
+ struct sk_buff *skb;
+ int tmp_orig_i, err;
+ u32 bd_status;
+
+ rxbd = enetc_rxbd(rx_ring, i);
+ bd_status = le32_to_cpu(rxbd->r.lstatus);
+ if (!bd_status)
+ break;
+
+ enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
+ dma_rmb(); /* for reading other rxbd fields */
+
+ if (enetc_check_bd_errors_and_consume(rx_ring, bd_status,
+ &rxbd, &i))
+ break;
+
+ orig_rxbd = rxbd;
+ orig_cleaned_cnt = cleaned_cnt;
+ orig_i = i;
+
+ enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
+ &cleaned_cnt, &xdp_buff);
+
+ xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
+
+ switch (xdp_act) {
+ default:
+ bpf_warn_invalid_xdp_action(xdp_act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->ndev, prog, xdp_act);
+ fallthrough;
+ case XDP_DROP:
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ break;
+ case XDP_PASS:
+ rxbd = orig_rxbd;
+ cleaned_cnt = orig_cleaned_cnt;
+ i = orig_i;
+
+ skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
+ &i, &cleaned_cnt,
+ ENETC_RXB_DMA_SIZE_XDP);
+ if (unlikely(!skb))
+ goto out;
+
+ napi_gro_receive(napi, skb);
+ break;
+ case XDP_TX:
+ tx_ring = priv->xdp_tx_ring[rx_ring->index];
+ xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr,
+ rx_ring,
+ orig_i, i);
+
+ if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ tx_ring->stats.xdp_tx_drops++;
+ } else {
+ tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
+ rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
+ xdp_tx_frm_cnt++;
+ /* The XDP_TX enqueue was successful, so we
+ * need to scrub the RX software BDs because
+ * the ownership of the buffers no longer
+ * belongs to the RX ring, and we must prevent
+ * enetc_refill_rx_ring() from reusing
+ * rx_swbd->page.
+ */
+ while (orig_i != i) {
+ rx_ring->rx_swbd[orig_i].page = NULL;
+ enetc_bdr_idx_inc(rx_ring, &orig_i);
+ }
+ }
+ break;
+ case XDP_REDIRECT:
+ /* xdp_return_frame does not support S/G in the sense
+ * that it leaks the fragments (__xdp_return should not
+ * call page_frag_free only for the initial buffer).
+ * Until XDP_REDIRECT gains support for S/G let's keep
+ * the code structure in place, but dead. We drop the
+ * S/G frames ourselves to avoid memory leaks which
+ * would otherwise leave the kernel OOM.
+ */
+ if (unlikely(cleaned_cnt - orig_cleaned_cnt != 1)) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_redirect_sg++;
+ break;
+ }
+
+ tmp_orig_i = orig_i;
+
+ while (orig_i != i) {
+ enetc_flip_rx_buff(rx_ring,
+ &rx_ring->rx_swbd[orig_i]);
+ enetc_bdr_idx_inc(rx_ring, &orig_i);
+ }
+
+ err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
+ if (unlikely(err)) {
+ enetc_xdp_free(rx_ring, tmp_orig_i, i);
+ } else {
+ xdp_redirect_frm_cnt++;
+ rx_ring->stats.xdp_redirect++;
+ }
+ }
rx_frm_cnt++;
}
+out:
rx_ring->next_to_clean = i;
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
+ if (xdp_redirect_frm_cnt)
+ xdp_do_flush_map();
+
+ if (xdp_tx_frm_cnt)
+ enetc_update_tx_ring_tail(tx_ring);
+
+ if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight)
+ enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
+ rx_ring->xdp.xdp_tx_in_flight);
+
return rx_frm_cnt;
}
+static int enetc_poll(struct napi_struct *napi, int budget)
+{
+ struct enetc_int_vector
+ *v = container_of(napi, struct enetc_int_vector, napi);
+ struct enetc_bdr *rx_ring = &v->rx_ring;
+ struct bpf_prog *prog;
+ bool complete = true;
+ int work_done;
+ int i;
+
+ enetc_lock_mdio();
+
+ for (i = 0; i < v->count_tx_rings; i++)
+ if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
+ complete = false;
+
+ prog = rx_ring->xdp.prog;
+ if (prog)
+ work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog);
+ else
+ work_done = enetc_clean_rx_ring(rx_ring, napi, budget);
+ if (work_done == budget)
+ complete = false;
+ if (work_done)
+ v->rx_napi_work = true;
+
+ if (!complete) {
+ enetc_unlock_mdio();
+ return budget;
+ }
+
+ napi_complete_done(napi, work_done);
+
+ if (likely(v->rx_dim_en))
+ enetc_rx_net_dim(v);
+
+ v->rx_napi_work = false;
+
+ /* enable interrupts */
+ enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
+
+ for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
+ enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
+ ENETC_TBIER_TXTIE);
+
+ enetc_unlock_mdio();
+
+ return work_done;
+}
+
/* Probing and Init */
#define ENETC_MAX_RFS_SIZE 64
void enetc_get_si_caps(struct enetc_si *si)
@@ -836,7 +1509,7 @@ static void enetc_free_txbdr(struct enetc_bdr *txr)
int size, i;
for (i = 0; i < txr->bd_count; i++)
- enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
+ enetc_free_tx_frame(txr, &txr->tx_swbd[i]);
size = txr->bd_count * sizeof(union enetc_tx_bd);
@@ -953,7 +1626,7 @@ static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
for (i = 0; i < tx_ring->bd_count; i++) {
struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_skb(tx_ring, tx_swbd);
+ enetc_free_tx_frame(tx_ring, tx_swbd);
}
tx_ring->next_to_clean = 0;
@@ -973,8 +1646,8 @@ static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
if (!rx_swbd->page)
continue;
- dma_unmap_page(rx_ring->dev, rx_swbd->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
+ rx_swbd->dir);
__free_page(rx_swbd->page);
rx_swbd->page = NULL;
}
@@ -995,60 +1668,6 @@ static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
enetc_free_tx_ring(priv->tx_ring[i]);
}
-int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
-{
- int size = cbdr->bd_count * sizeof(struct enetc_cbd);
-
- cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
- GFP_KERNEL);
- if (!cbdr->bd_base)
- return -ENOMEM;
-
- /* h/w requires 128B alignment */
- if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
- dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
- return -EINVAL;
- }
-
- cbdr->next_to_clean = 0;
- cbdr->next_to_use = 0;
-
- return 0;
-}
-
-void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
-{
- int size = cbdr->bd_count * sizeof(struct enetc_cbd);
-
- dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
- cbdr->bd_base = NULL;
-}
-
-void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
-{
- /* set CBDR cache attributes */
- enetc_wr(hw, ENETC_SICAR2,
- ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
-
- enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
- enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
- enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
-
- enetc_wr(hw, ENETC_SICBDRPIR, 0);
- enetc_wr(hw, ENETC_SICBDRCIR, 0);
-
- /* enable ring */
- enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
-
- cbdr->pir = hw->reg + ENETC_SICBDRPIR;
- cbdr->cir = hw->reg + ENETC_SICBDRCIR;
-}
-
-void enetc_clear_cbdr(struct enetc_hw *hw)
-{
- enetc_wr(hw, ENETC_SICBDRMR, 0);
-}
-
static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
{
int *rss_table;
@@ -1108,45 +1727,22 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
priv->bdr_int_num = cpus;
priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
priv->tx_ictt = ENETC_TXIC_TIMETHR;
-
- /* SI specific */
- si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
}
int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
- int err;
-
- err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
- if (err)
- return err;
-
- enetc_setup_cbdr(&si->hw, &si->cbd_ring);
priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
GFP_KERNEL);
- if (!priv->cls_rules) {
- err = -ENOMEM;
- goto err_alloc_cls;
- }
+ if (!priv->cls_rules)
+ return -ENOMEM;
return 0;
-
-err_alloc_cls:
- enetc_clear_cbdr(&si->hw);
- enetc_free_cbdr(priv->dev, &si->cbd_ring);
-
- return err;
}
void enetc_free_si_resources(struct enetc_ndev_priv *priv)
{
- struct enetc_si *si = priv->si;
-
- enetc_clear_cbdr(&si->hw);
- enetc_free_cbdr(priv->dev, &si->cbd_ring);
-
kfree(priv->cls_rules);
}
@@ -1199,7 +1795,10 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
ENETC_RTBLENR_LEN(rx_ring->bd_count));
- enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
+ if (rx_ring->xdp.prog)
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE_XDP);
+ else
+ enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
@@ -1217,9 +1816,9 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
rx_ring->idr = hw->reg + ENETC_SIRXIDR;
+ enetc_lock_mdio();
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
- /* update ENETC's consumer index */
- enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, rx_ring->next_to_use);
+ enetc_unlock_mdio();
/* enable ring */
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
@@ -1408,6 +2007,29 @@ static int enetc_phylink_connect(struct net_device *ndev)
return 0;
}
+static void enetc_tx_onestep_tstamp(struct work_struct *work)
+{
+ struct enetc_ndev_priv *priv;
+ struct sk_buff *skb;
+
+ priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
+
+ netif_tx_lock(priv->ndev);
+
+ clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
+ skb = skb_dequeue(&priv->tx_skbs);
+ if (skb)
+ enetc_start_xmit(skb, priv->ndev);
+
+ netif_tx_unlock(priv->ndev);
+}
+
+static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
+{
+ INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp);
+ skb_queue_head_init(&priv->tx_skbs);
+}
+
void enetc_start(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -1434,6 +2056,7 @@ void enetc_start(struct net_device *ndev)
int enetc_open(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int num_stack_tx_queues;
int err;
err = enetc_setup_irqs(priv);
@@ -1452,7 +2075,9 @@ int enetc_open(struct net_device *ndev)
if (err)
goto err_alloc_rx;
- err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
+
+ err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
if (err)
goto err_set_queues;
@@ -1460,6 +2085,7 @@ int enetc_open(struct net_device *ndev)
if (err)
goto err_set_queues;
+ enetc_tx_onestep_tstamp_init(priv);
enetc_setup_bdrs(priv);
enetc_start(ndev);
@@ -1524,15 +2150,17 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_mqprio_qopt *mqprio = type_data;
struct enetc_bdr *tx_ring;
+ int num_stack_tx_queues;
u8 num_tc;
int i;
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
num_tc = mqprio->num_tc;
if (!num_tc) {
netdev_reset_tc(ndev);
- netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
+ netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
/* Reset all ring priorities to 0 */
for (i = 0; i < priv->num_tx_rings; i++) {
@@ -1544,7 +2172,7 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
}
/* Check if we have enough BD rings available to accommodate all TCs */
- if (num_tc > priv->num_tx_rings) {
+ if (num_tc > num_stack_tx_queues) {
netdev_err(ndev, "Max %d traffic classes supported\n",
priv->num_tx_rings);
return -EINVAL;
@@ -1590,6 +2218,54 @@ int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
}
}
+static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+ bool is_up;
+ int i;
+
+ /* The buffer layout is changing, so we need to drain the old
+ * RX buffers and seed new ones.
+ */
+ is_up = netif_running(dev);
+ if (is_up)
+ dev_close(dev);
+
+ old_prog = xchg(&priv->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ struct enetc_bdr *rx_ring = priv->rx_ring[i];
+
+ rx_ring->xdp.prog = prog;
+
+ if (prog)
+ rx_ring->buffer_offset = XDP_PACKET_HEADROOM;
+ else
+ rx_ring->buffer_offset = ENETC_RXB_PAD;
+ }
+
+ if (is_up)
+ return dev_open(dev, extack);
+
+ return 0;
+}
+
+int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return enetc_setup_xdp_prog(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
struct net_device_stats *enetc_get_stats(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -1710,11 +2386,16 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
- priv->active_offloads &= ~ENETC_F_TX_TSTAMP;
+ priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
break;
case HWTSTAMP_TX_ON:
+ priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
priv->active_offloads |= ENETC_F_TX_TSTAMP;
break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ priv->active_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
+ break;
default:
return -ERANGE;
}
@@ -1745,7 +2426,9 @@ static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
config.flags = 0;
- if (priv->active_offloads & ENETC_F_TX_TSTAMP)
+ if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
+ config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ else if (priv->active_offloads & ENETC_F_TX_TSTAMP)
config.tx_type = HWTSTAMP_TX_ON;
else
config.tx_type = HWTSTAMP_TX_OFF;
@@ -1777,8 +2460,9 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
- int v_tx_rings;
+ int first_xdp_tx_ring;
int i, n, err, nvec;
+ int v_tx_rings;
nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
/* allocate MSIX for both messaging and Rx/Tx interrupts */
@@ -1806,6 +2490,28 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
priv->int_vector[i] = v;
+ bdr = &v->rx_ring;
+ bdr->index = i;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->rx_bd_count;
+ bdr->buffer_offset = ENETC_RXB_PAD;
+ priv->rx_ring[i] = bdr;
+
+ err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
+ if (err) {
+ kfree(v);
+ goto fail;
+ }
+
+ err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
+ MEM_TYPE_PAGE_SHARED, NULL);
+ if (err) {
+ xdp_rxq_info_unreg(&bdr->xdp.rxq);
+ kfree(v);
+ goto fail;
+ }
+
/* init defaults for adaptive IC */
if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
v->rx_ictt = 0x1;
@@ -1820,11 +2526,7 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
int idx;
/* default tx ring mapping policy */
- if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
- idx = 2 * j + i; /* 2 CPUs */
- else
- idx = j + i * v_tx_rings; /* default */
-
+ idx = priv->bdr_int_num * j + i;
__set_bit(idx, &v->tx_rings_map);
bdr = &v->tx_ring[j];
bdr->index = idx;
@@ -1833,22 +2535,23 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
bdr->bd_count = priv->tx_bd_count;
priv->tx_ring[idx] = bdr;
}
-
- bdr = &v->rx_ring;
- bdr->index = i;
- bdr->ndev = priv->ndev;
- bdr->dev = priv->dev;
- bdr->bd_count = priv->rx_bd_count;
- priv->rx_ring[i] = bdr;
}
+ first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
+ priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
+
return 0;
fail:
while (i--) {
- netif_napi_del(&priv->int_vector[i]->napi);
- cancel_work_sync(&priv->int_vector[i]->rx_dim.work);
- kfree(priv->int_vector[i]);
+ struct enetc_int_vector *v = priv->int_vector[i];
+ struct enetc_bdr *rx_ring = &v->rx_ring;
+
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
+ netif_napi_del(&v->napi);
+ cancel_work_sync(&v->rx_dim.work);
+ kfree(v);
}
pci_free_irq_vectors(pdev);
@@ -1862,7 +2565,10 @@ void enetc_free_msix(struct enetc_ndev_priv *priv)
for (i = 0; i < priv->bdr_int_num; i++) {
struct enetc_int_vector *v = priv->int_vector[i];
+ struct enetc_bdr *rx_ring = &v->rx_ring;
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
netif_napi_del(&v->napi);
cancel_work_sync(&v->rx_dim.work);
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 8b380fc13314..08b283347d9c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -19,12 +19,21 @@
(ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
struct enetc_tx_swbd {
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdp_frame;
+ };
dma_addr_t dma;
+ struct page *page; /* valid only if is_xdp_tx */
+ u16 page_offset; /* valid only if is_xdp_tx */
u16 len;
+ enum dma_data_direction dir;
u8 is_dma_page:1;
u8 check_wb:1;
- u8 do_tstamp:1;
+ u8 do_twostep_tstamp:1;
+ u8 is_eof:1;
+ u8 is_xdp_tx:1;
+ u8 is_xdp_redirect:1;
};
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
@@ -32,21 +41,45 @@ struct enetc_tx_swbd {
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
#define ENETC_RXB_DMA_SIZE \
(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
+#define ENETC_RXB_DMA_SIZE_XDP \
+ (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - XDP_PACKET_HEADROOM)
struct enetc_rx_swbd {
dma_addr_t dma;
struct page *page;
u16 page_offset;
+ enum dma_data_direction dir;
+ u16 len;
};
+/* ENETC overhead: optional extension BD + 1 BD gap */
+#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
+/* max # of chained Tx BDs is 15, including head and extension BD */
+#define ENETC_MAX_SKB_FRAGS 13
+#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
+
struct enetc_ring_stats {
unsigned int packets;
unsigned int bytes;
unsigned int rx_alloc_errs;
+ unsigned int xdp_drops;
+ unsigned int xdp_tx;
+ unsigned int xdp_tx_drops;
+ unsigned int xdp_redirect;
+ unsigned int xdp_redirect_failures;
+ unsigned int xdp_redirect_sg;
+ unsigned int recycles;
+ unsigned int recycle_failures;
+};
+
+struct enetc_xdp_data {
+ struct xdp_rxq_info rxq;
+ struct bpf_prog *prog;
+ int xdp_tx_in_flight;
};
-#define ENETC_RX_RING_DEFAULT_SIZE 512
-#define ENETC_TX_RING_DEFAULT_SIZE 256
+#define ENETC_RX_RING_DEFAULT_SIZE 2048
+#define ENETC_TX_RING_DEFAULT_SIZE 2048
#define ENETC_DEFAULT_TX_WORK (ENETC_TX_RING_DEFAULT_SIZE / 2)
struct enetc_bdr {
@@ -71,6 +104,9 @@ struct enetc_bdr {
};
void __iomem *idr; /* Interrupt Detect Register pointer */
+ int buffer_offset;
+ struct enetc_xdp_data xdp;
+
struct enetc_ring_stats stats;
dma_addr_t bd_dma_base;
@@ -92,18 +128,28 @@ static inline int enetc_bd_unused(struct enetc_bdr *bdr)
return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1;
}
+static inline int enetc_swbd_unused(struct enetc_bdr *bdr)
+{
+ if (bdr->next_to_clean > bdr->next_to_alloc)
+ return bdr->next_to_clean - bdr->next_to_alloc - 1;
+
+ return bdr->bd_count + bdr->next_to_clean - bdr->next_to_alloc - 1;
+}
+
/* Control BD ring */
#define ENETC_CBDR_DEFAULT_SIZE 64
struct enetc_cbdr {
void *bd_base; /* points to Rx or Tx BD ring */
void __iomem *pir;
void __iomem *cir;
+ void __iomem *mr; /* mode register */
int bd_count; /* # of BDs */
int next_to_use;
int next_to_clean;
dma_addr_t bd_dma_base;
+ struct device *dma_dev;
};
#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i]))
@@ -119,19 +165,26 @@ static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
}
-static inline union enetc_rx_bd *enetc_rxbd_next(struct enetc_bdr *rx_ring,
- union enetc_rx_bd *rxbd,
- int i)
+static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring,
+ union enetc_rx_bd **old_rxbd, int *old_index)
{
- rxbd++;
+ union enetc_rx_bd *new_rxbd = *old_rxbd;
+ int new_index = *old_index;
+
+ new_rxbd++;
+
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
if (rx_ring->ext_en)
- rxbd++;
+ new_rxbd++;
#endif
- if (unlikely(++i == rx_ring->bd_count))
- rxbd = rx_ring->bd_base;
- return rxbd;
+ if (unlikely(++new_index == rx_ring->bd_count)) {
+ new_rxbd = rx_ring->bd_base;
+ new_index = 0;
+ }
+
+ *old_rxbd = new_rxbd;
+ *old_index = new_index;
}
static inline union enetc_rx_bd *enetc_rxbd_ext(union enetc_rx_bd *rxbd)
@@ -184,6 +237,22 @@ static inline bool enetc_si_is_pf(struct enetc_si *si)
return !!(si->hw.port);
}
+static inline int enetc_pf_to_port(struct pci_dev *pf_pdev)
+{
+ switch (pf_pdev->devfn) {
+ case 0:
+ return 0;
+ case 1:
+ return 1;
+ case 2:
+ return 2;
+ case 6:
+ return 3;
+ default:
+ return -1;
+ }
+}
+
#define ENETC_MAX_NUM_TXQS 8
#define ENETC_INT_NAME_MAX (IFNAMSIZ + 8)
@@ -218,12 +287,20 @@ struct psfp_cap {
u32 max_psfp_meter;
};
+#define ENETC_F_TX_TSTAMP_MASK 0xff
/* TODO: more hardware offloads */
enum enetc_active_offloads {
- ENETC_F_RX_TSTAMP = BIT(0),
- ENETC_F_TX_TSTAMP = BIT(1),
- ENETC_F_QBV = BIT(2),
- ENETC_F_QCI = BIT(3),
+ /* 8 bits reserved for TX timestamp types (hwtstamp_tx_types) */
+ ENETC_F_TX_TSTAMP = BIT(0),
+ ENETC_F_TX_ONESTEP_SYNC_TSTAMP = BIT(1),
+
+ ENETC_F_RX_TSTAMP = BIT(8),
+ ENETC_F_QBV = BIT(9),
+ ENETC_F_QCI = BIT(10),
+};
+
+enum enetc_flags_bit {
+ ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0,
};
/* interrupt coalescing modes */
@@ -252,10 +329,11 @@ struct enetc_ndev_priv {
u16 rx_bd_count, tx_bd_count;
u16 msg_enable;
- int active_offloads;
+ enum enetc_active_offloads active_offloads;
u32 speed; /* store speed for compare update pspeed */
+ struct enetc_bdr **xdp_tx_ring;
struct enetc_bdr *tx_ring[16];
struct enetc_bdr *rx_ring[16];
@@ -266,6 +344,13 @@ struct enetc_ndev_priv {
struct phylink *phylink;
int ic_mode;
u32 tx_ictt;
+
+ struct bpf_prog *xdp_prog;
+
+ unsigned long flags;
+
+ struct work_struct tx_onestep_tstamp;
+ struct sk_buff_head tx_skbs;
};
/* Messaging */
@@ -305,15 +390,17 @@ int enetc_set_features(struct net_device *ndev,
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data);
+int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
+int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
+ struct xdp_frame **frames, u32 flags);
/* ethtool */
void enetc_set_ethtool_ops(struct net_device *ndev);
/* control buffer descriptor ring (CBDR) */
-int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr);
-void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr);
-void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr);
-void enetc_clear_cbdr(struct enetc_hw *hw);
+int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
+ struct enetc_cbdr *cbdr);
+void enetc_teardown_cbdr(struct enetc_cbdr *cbdr);
int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
char *mac_addr, int si_map);
int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index 201cbc362e33..073e56dcca4e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -3,9 +3,63 @@
#include "enetc.h"
-static void enetc_clean_cbdr(struct enetc_si *si)
+int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
+ struct enetc_cbdr *cbdr)
+{
+ int size = bd_count * sizeof(struct enetc_cbd);
+
+ cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
+ GFP_KERNEL);
+ if (!cbdr->bd_base)
+ return -ENOMEM;
+
+ /* h/w requires 128B alignment */
+ if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
+ dma_free_coherent(dev, size, cbdr->bd_base,
+ cbdr->bd_dma_base);
+ return -EINVAL;
+ }
+
+ cbdr->next_to_clean = 0;
+ cbdr->next_to_use = 0;
+ cbdr->dma_dev = dev;
+ cbdr->bd_count = bd_count;
+
+ cbdr->pir = hw->reg + ENETC_SICBDRPIR;
+ cbdr->cir = hw->reg + ENETC_SICBDRCIR;
+ cbdr->mr = hw->reg + ENETC_SICBDRMR;
+
+ /* set CBDR cache attributes */
+ enetc_wr(hw, ENETC_SICAR2,
+ ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+
+ enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
+ enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
+ enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
+
+ enetc_wr_reg(cbdr->pir, cbdr->next_to_clean);
+ enetc_wr_reg(cbdr->cir, cbdr->next_to_use);
+ /* enable ring */
+ enetc_wr_reg(cbdr->mr, BIT(31));
+
+ return 0;
+}
+
+void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
+{
+ int size = cbdr->bd_count * sizeof(struct enetc_cbd);
+
+ /* disable ring */
+ enetc_wr_reg(cbdr->mr, 0);
+
+ dma_free_coherent(cbdr->dma_dev, size, cbdr->bd_base,
+ cbdr->bd_dma_base);
+ cbdr->bd_base = NULL;
+ cbdr->dma_dev = NULL;
+}
+
+static void enetc_clean_cbdr(struct enetc_cbdr *ring)
{
- struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd *dest_cbd;
int i, status;
@@ -15,7 +69,7 @@ static void enetc_clean_cbdr(struct enetc_si *si)
dest_cbd = ENETC_CBD(*ring, i);
status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
if (status)
- dev_warn(&si->pdev->dev, "CMD err %04x for cmd %04x\n",
+ dev_warn(ring->dma_dev, "CMD err %04x for cmd %04x\n",
status, dest_cbd->cmd);
memset(dest_cbd, 0, sizeof(*dest_cbd));
@@ -43,7 +97,7 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
return -EIO;
if (unlikely(!enetc_cbd_unused(ring)))
- enetc_clean_cbdr(si);
+ enetc_clean_cbdr(ring);
i = ring->next_to_use;
dest_cbd = ENETC_CBD(*ring, i);
@@ -69,7 +123,7 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
/* CBD may writeback data, feedback up level */
*cbd = *dest_cbd;
- enetc_clean_cbdr(si);
+ enetc_clean_cbdr(ring);
return 0;
}
@@ -117,6 +171,7 @@ int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
int index)
{
+ struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd cbd = {.cmd = 0};
dma_addr_t dma, dma_align;
void *tmp, *tmp_align;
@@ -129,10 +184,10 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
cbd.length = cpu_to_le16(sizeof(*rfse));
cbd.opt[3] = cpu_to_le32(0); /* SI */
- tmp = dma_alloc_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
+ tmp = dma_alloc_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
&dma, GFP_KERNEL);
if (!tmp) {
- dev_err(&si->pdev->dev, "DMA mapping of RFS entry failed!\n");
+ dev_err(ring->dma_dev, "DMA mapping of RFS entry failed!\n");
return -ENOMEM;
}
@@ -145,9 +200,9 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
err = enetc_send_cmd(si, &cbd);
if (err)
- dev_err(&si->pdev->dev, "FS entry add failed (%d)!", err);
+ dev_err(ring->dma_dev, "FS entry add failed (%d)!", err);
- dma_free_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
+ dma_free_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
tmp, dma);
return err;
@@ -157,6 +212,7 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
bool read)
{
+ struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd cbd = {.cmd = 0};
dma_addr_t dma, dma_align;
u8 *tmp, *tmp_align;
@@ -166,10 +222,10 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
/* HW only takes in a full 64 entry table */
return -EINVAL;
- tmp = dma_alloc_coherent(&si->pdev->dev, count + RSSE_ALIGN,
+ tmp = dma_alloc_coherent(ring->dma_dev, count + RSSE_ALIGN,
&dma, GFP_KERNEL);
if (!tmp) {
- dev_err(&si->pdev->dev, "DMA mapping of RSS table failed!\n");
+ dev_err(ring->dma_dev, "DMA mapping of RSS table failed!\n");
return -ENOMEM;
}
dma_align = ALIGN(dma, RSSE_ALIGN);
@@ -189,13 +245,13 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
err = enetc_send_cmd(si, &cbd);
if (err)
- dev_err(&si->pdev->dev, "RSS cmd failed (%d)!", err);
+ dev_err(ring->dma_dev, "RSS cmd failed (%d)!", err);
if (read)
for (i = 0; i < count; i++)
table[i] = tmp_align[i];
- dma_free_coherent(&si->pdev->dev, count + RSSE_ALIGN, tmp, dma);
+ dma_free_coherent(ring->dma_dev, count + RSSE_ALIGN, tmp, dma);
return err;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 89e558135432..ebccaf02411c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -192,10 +192,18 @@ static const struct {
static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
"Rx ring %2d frames",
"Rx ring %2d alloc errors",
+ "Rx ring %2d XDP drops",
+ "Rx ring %2d recycles",
+ "Rx ring %2d recycle failures",
+ "Rx ring %2d redirects",
+ "Rx ring %2d redirect failures",
+ "Rx ring %2d redirect S/G",
};
static const char tx_ring_stats[][ETH_GSTRING_LEN] = {
"Tx ring %2d frames",
+ "Tx ring %2d XDP frames",
+ "Tx ring %2d XDP drops",
};
static int enetc_get_sset_count(struct net_device *ndev, int sset)
@@ -267,12 +275,21 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++)
data[o++] = enetc_rd64(hw, enetc_si_counters[i].reg);
- for (i = 0; i < priv->num_tx_rings; i++)
+ for (i = 0; i < priv->num_tx_rings; i++) {
data[o++] = priv->tx_ring[i]->stats.packets;
+ data[o++] = priv->tx_ring[i]->stats.xdp_tx;
+ data[o++] = priv->tx_ring[i]->stats.xdp_tx_drops;
+ }
for (i = 0; i < priv->num_rx_rings; i++) {
data[o++] = priv->rx_ring[i]->stats.packets;
data[o++] = priv->rx_ring[i]->stats.rx_alloc_errs;
+ data[o++] = priv->rx_ring[i]->stats.xdp_drops;
+ data[o++] = priv->rx_ring[i]->stats.recycles;
+ data[o++] = priv->rx_ring[i]->stats.recycle_failures;
+ data[o++] = priv->rx_ring[i]->stats.xdp_redirect;
+ data[o++] = priv->rx_ring[i]->stats.xdp_redirect_failures;
+ data[o++] = priv->rx_ring[i]->stats.xdp_redirect_sg;
}
if (!enetc_si_is_pf(priv->si))
@@ -654,7 +671,8 @@ static int enetc_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
+ (1 << HWTSTAMP_TX_ON) |
+ (1 << HWTSTAMP_TX_ONESTEP_SYNC);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
#else
@@ -690,6 +708,22 @@ static int enetc_set_wol(struct net_device *dev,
return ret;
}
+static void enetc_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(priv->phylink, pause);
+}
+
+static int enetc_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(priv->phylink, pause);
+}
+
static int enetc_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
@@ -736,6 +770,8 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_ts_info = enetc_get_ts_info,
.get_wol = enetc_get_wol,
.set_wol = enetc_set_wol,
+ .get_pauseparam = enetc_get_pauseparam,
+ .set_pauseparam = enetc_set_pauseparam,
};
static const struct ethtool_ops enetc_vf_ethtool_ops = {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 00938f7960a4..0f5f081a5baf 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -109,6 +109,7 @@ enum enetc_bdr_type {TX, RX};
/* RX BDR reg offsets */
#define ENETC_RBMR 0
#define ENETC_RBMR_BDS BIT(2)
+#define ENETC_RBMR_CM BIT(4)
#define ENETC_RBMR_VTE BIT(5)
#define ENETC_RBMR_EN BIT(31)
#define ENETC_RBSR 0x4
@@ -180,6 +181,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PSIVLANR(n) (0x0240 + (n) * 4) /* n = SI index */
#define ENETC_PSIVLAN_EN BIT(31)
#define ENETC_PSIVLAN_SET_QOS(val) ((u32)(val) << 12)
+#define ENETC_PPAUONTR 0x0410
+#define ENETC_PPAUOFFTR 0x0414
#define ENETC_PTXMBAR 0x0608
#define ENETC_PCAPR0 0x0900
#define ENETC_PCAPR0_RXBDR(val) ((val) >> 24)
@@ -227,6 +230,7 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_TX_EN BIT(0)
#define ENETC_PM0_RX_EN BIT(1)
#define ENETC_PM0_PROMISC BIT(4)
+#define ENETC_PM0_PAUSE_IGN BIT(8)
#define ENETC_PM0_CMD_XGLP BIT(10)
#define ENETC_PM0_CMD_TXP BIT(11)
#define ENETC_PM0_CMD_PHY_TX_EN BIT(15)
@@ -239,6 +243,17 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM_IMDIO_BASE 0x8030
+#define ENETC_PM0_PAUSE_QUANTA 0x8054
+#define ENETC_PM0_PAUSE_THRESH 0x8064
+#define ENETC_PM1_PAUSE_QUANTA 0x9054
+#define ENETC_PM1_PAUSE_THRESH 0x9064
+
+#define ENETC_PM0_SINGLE_STEP 0x80c0
+#define ENETC_PM1_SINGLE_STEP 0x90c0
+#define ENETC_PM0_SINGLE_STEP_CH BIT(7)
+#define ENETC_PM0_SINGLE_STEP_EN BIT(31)
+#define ENETC_SET_SINGLE_STEP_OFFSET(v) (((v) & 0xff) << 8)
+
#define ENETC_PM0_IF_MODE 0x8300
#define ENETC_PM0_IFM_RG BIT(2)
#define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11))
@@ -548,6 +563,7 @@ static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd)
/* Extension flags */
#define ENETC_TXBD_E_FLAGS_VLAN_INS BIT(0)
+#define ENETC_TXBD_E_FLAGS_ONE_STEP_PTP BIT(1)
#define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP BIT(2)
union enetc_rx_bd {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
new file mode 100644
index 000000000000..8b356c485507
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2021 NXP Semiconductors
+ *
+ * The Integrated Endpoint Register Block (IERB) is configured by pre-boot
+ * software and is supposed to be to ENETC what a NVRAM is to a 'real' PCIe
+ * card. Upon FLR, values from the IERB are transferred to the ENETC PFs, and
+ * are read-only in the PF memory space.
+ *
+ * This driver fixes up the power-on reset values for the ENETC shared FIFO,
+ * such that the TX and RX allocations are sufficient for jumbo frames, and
+ * that intelligent FIFO dropping is enabled before the internal data
+ * structures are corrupted.
+ *
+ * Even though not all ports might be used on a given board, we are not
+ * concerned with partitioning the FIFO, because the default values configure
+ * no strict reservations, so the entire FIFO can be used by the RX of a single
+ * port, or the TX of a single port.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include "enetc.h"
+#include "enetc_ierb.h"
+
+/* IERB registers */
+#define ENETC_IERB_TXMBAR(port) (((port) * 0x100) + 0x8080)
+#define ENETC_IERB_RXMBER(port) (((port) * 0x100) + 0x8090)
+#define ENETC_IERB_RXMBLR(port) (((port) * 0x100) + 0x8094)
+#define ENETC_IERB_RXBCR(port) (((port) * 0x100) + 0x80a0)
+#define ENETC_IERB_TXBCR(port) (((port) * 0x100) + 0x80a8)
+#define ENETC_IERB_FMBDTR 0xa000
+
+#define ENETC_RESERVED_FOR_ICM 1024
+
+struct enetc_ierb {
+ void __iomem *regs;
+};
+
+static void enetc_ierb_write(struct enetc_ierb *ierb, u32 offset, u32 val)
+{
+ iowrite32(val, ierb->regs + offset);
+}
+
+int enetc_ierb_register_pf(struct platform_device *pdev,
+ struct pci_dev *pf_pdev)
+{
+ struct enetc_ierb *ierb = platform_get_drvdata(pdev);
+ int port = enetc_pf_to_port(pf_pdev);
+ u16 tx_credit, rx_credit, tx_alloc;
+
+ if (port < 0)
+ return -ENODEV;
+
+ if (!ierb)
+ return -EPROBE_DEFER;
+
+ /* By default, it is recommended to set the Host Transfer Agent
+ * per port transmit byte credit to "1000 + max_frame_size/2".
+ * The power-on reset value (1800 bytes) is rounded up to the nearest
+ * 100 assuming a maximum frame size of 1536 bytes.
+ */
+ tx_credit = roundup(1000 + ENETC_MAC_MAXFRM_SIZE / 2, 100);
+
+ /* Internal memory allocated for transmit buffering is guaranteed but
+ * not reserved; i.e. if the total transmit allocation is not used,
+ * then the unused portion is not left idle, it can be used for receive
+ * buffering but it will be reclaimed, if required, from receive by
+ * intelligently dropping already stored receive frames in the internal
+ * memory to ensure that the transmit allocation is respected.
+ *
+ * PaTXMBAR must be set to a value larger than
+ * PaTXBCR + 2 * max_frame_size + 32
+ * if frame preemption is not enabled, or to
+ * 2 * PaTXBCR + 2 * p_max_frame_size (pMAC maximum frame size) +
+ * 2 * np_max_frame_size (eMAC maximum frame size) + 64
+ * if frame preemption is enabled.
+ */
+ tx_alloc = roundup(2 * tx_credit + 4 * ENETC_MAC_MAXFRM_SIZE + 64, 16);
+
+ /* Initial credits, in units of 8 bytes, to the Ingress Congestion
+ * Manager for the maximum amount of bytes the port is allocated for
+ * pending traffic.
+ * It is recommended to set the initial credits to 2 times the maximum
+ * frame size (2 frames of maximum size).
+ */
+ rx_credit = DIV_ROUND_UP(ENETC_MAC_MAXFRM_SIZE * 2, 8);
+
+ enetc_ierb_write(ierb, ENETC_IERB_TXBCR(port), tx_credit);
+ enetc_ierb_write(ierb, ENETC_IERB_TXMBAR(port), tx_alloc);
+ enetc_ierb_write(ierb, ENETC_IERB_RXBCR(port), rx_credit);
+
+ return 0;
+}
+EXPORT_SYMBOL(enetc_ierb_register_pf);
+
+static int enetc_ierb_probe(struct platform_device *pdev)
+{
+ struct enetc_ierb *ierb;
+ struct resource *res;
+ void __iomem *regs;
+
+ ierb = devm_kzalloc(&pdev->dev, sizeof(*ierb), GFP_KERNEL);
+ if (!ierb)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ ierb->regs = regs;
+
+ /* Free buffer depletion threshold in bytes.
+ * This sets the minimum amount of free buffer memory that should be
+ * maintained in the datapath sub system, and when the amount of free
+ * buffer memory falls below this threshold, a depletion indication is
+ * asserted, which may trigger "intelligent drop" frame releases from
+ * the ingress queues in the ICM.
+ * It is recommended to set the free buffer depletion threshold to 1024
+ * bytes, since the ICM needs some FIFO memory for its own use.
+ */
+ enetc_ierb_write(ierb, ENETC_IERB_FMBDTR, ENETC_RESERVED_FOR_ICM);
+
+ platform_set_drvdata(pdev, ierb);
+
+ return 0;
+}
+
+static int enetc_ierb_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id enetc_ierb_match[] = {
+ { .compatible = "fsl,ls1028a-enetc-ierb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, enetc_ierb_match);
+
+static struct platform_driver enetc_ierb_driver = {
+ .driver = {
+ .name = "fsl-enetc-ierb",
+ .of_match_table = enetc_ierb_match,
+ },
+ .probe = enetc_ierb_probe,
+ .remove = enetc_ierb_remove,
+};
+
+module_platform_driver(enetc_ierb_driver);
+
+MODULE_DESCRIPTION("NXP ENETC IERB");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.h b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
new file mode 100644
index 000000000000..b3b774e0998a
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2021 NXP Semiconductors */
+
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#if IS_ENABLED(CONFIG_FSL_ENETC_IERB)
+
+int enetc_ierb_register_pf(struct platform_device *pdev,
+ struct pci_dev *pf_pdev);
+
+#else
+
+static inline int enetc_ierb_register_pf(struct platform_device *pdev,
+ struct pci_dev *pf_pdev)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 224fc37a6757..31274325159a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -4,8 +4,10 @@
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/fsl/enetc_mdio.h>
+#include <linux/of_platform.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include "enetc_ierb.h"
#include "enetc_pf.h"
#define ENETC_DRV_NAME_STR "ENETC PF driver"
@@ -129,16 +131,20 @@ static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type)
}
static void enetc_set_mac_ht_flt(struct enetc_si *si, int si_idx, int type,
- u32 *hash)
+ unsigned long hash)
{
bool err = si->errata & ENETC_ERR_UCMCSWP;
if (type == UC) {
- enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), *hash);
- enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), *(hash + 1));
+ enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err),
+ lower_32_bits(hash));
+ enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx),
+ upper_32_bits(hash));
} else { /* MC */
- enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), *hash);
- enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), *(hash + 1));
+ enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err),
+ lower_32_bits(hash));
+ enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx),
+ upper_32_bits(hash));
}
}
@@ -182,7 +188,7 @@ static void enetc_sync_mac_filters(struct enetc_pf *pf)
if (i == UC)
enetc_clear_mac_flt_entry(si, pos);
- enetc_set_mac_ht_flt(si, 0, i, (u32 *)f->mac_hash_table);
+ enetc_set_mac_ht_flt(si, 0, i, *f->mac_hash_table);
}
}
@@ -248,10 +254,10 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev)
}
static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx,
- u32 *hash)
+ unsigned long hash)
{
- enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), *hash);
- enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), *(hash + 1));
+ enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), lower_32_bits(hash));
+ enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), upper_32_bits(hash));
}
static int enetc_vid_hash_idx(unsigned int vid)
@@ -279,7 +285,7 @@ static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash)
}
}
- enetc_set_vlan_ht_filter(&pf->si->hw, 0, (u32 *)pf->vlan_ht_filter);
+ enetc_set_vlan_ht_filter(&pf->si->hw, 0, *pf->vlan_ht_filter);
}
static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
@@ -386,23 +392,54 @@ static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en)
return 0;
}
-static void enetc_port_setup_primary_mac_address(struct enetc_si *si)
+static int enetc_setup_mac_address(struct device_node *np, struct enetc_pf *pf,
+ int si)
{
- unsigned char mac_addr[MAX_ADDR_LEN];
- struct enetc_pf *pf = enetc_si_priv(si);
- struct enetc_hw *hw = &si->hw;
- int i;
+ struct device *dev = &pf->si->pdev->dev;
+ struct enetc_hw *hw = &pf->si->hw;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ int err;
- /* check MAC addresses for PF and all VFs, if any is 0 set it ro rand */
- for (i = 0; i < pf->total_vfs + 1; i++) {
- enetc_pf_get_primary_mac_addr(hw, i, mac_addr);
- if (!is_zero_ether_addr(mac_addr))
- continue;
+ /* (1) try to get the MAC address from the device tree */
+ if (np) {
+ err = of_get_mac_address(np, mac_addr);
+ if (err == -EPROBE_DEFER)
+ return err;
+ }
+
+ /* (2) bootloader supplied MAC address */
+ if (is_zero_ether_addr(mac_addr))
+ enetc_pf_get_primary_mac_addr(hw, si, mac_addr);
+
+ /* (3) choose a random one */
+ if (is_zero_ether_addr(mac_addr)) {
eth_random_addr(mac_addr);
- dev_info(&si->pdev->dev, "no MAC address specified for SI%d, using %pM\n",
- i, mac_addr);
- enetc_pf_set_primary_mac_addr(hw, i, mac_addr);
+ dev_info(dev, "no MAC address specified for SI%d, using %pM\n",
+ si, mac_addr);
+ }
+
+ enetc_pf_set_primary_mac_addr(hw, si, mac_addr);
+
+ return 0;
+}
+
+static int enetc_setup_mac_addresses(struct device_node *np,
+ struct enetc_pf *pf)
+{
+ int err, i;
+
+ /* The PF might take its MAC from the device tree */
+ err = enetc_setup_mac_address(np, pf, 0);
+ if (err)
+ return err;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ err = enetc_setup_mac_address(NULL, pf, i + 1);
+ if (err)
+ return err;
}
+
+ return 0;
}
static void enetc_port_assign_rfs_entries(struct enetc_si *si)
@@ -483,7 +520,6 @@ static void enetc_configure_port_mac(struct enetc_hw *hw)
ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
- enetc_port_wr(hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
@@ -558,9 +594,6 @@ static void enetc_configure_port(struct enetc_pf *pf)
/* split up RFS entries */
enetc_port_assign_rfs_entries(pf->si);
- /* fix-up primary MAC addresses, if not set already */
- enetc_port_setup_primary_mac_address(pf->si);
-
/* enforce VLAN promisc mode for all SIs */
pf->vlan_promisc_simap = ENETC_VLAN_PROMISC_MAP_ALL;
enetc_set_vlan_promisc(hw, pf->vlan_promisc_simap);
@@ -703,6 +736,8 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_features = enetc_pf_set_features,
.ndo_do_ioctl = enetc_ioctl,
.ndo_setup_tc = enetc_setup_tc,
+ .ndo_bpf = enetc_setup_bpf,
+ .ndo_xdp_xmit = enetc_xdp_xmit,
};
static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
@@ -979,7 +1014,12 @@ static void enetc_pl_mac_link_up(struct phylink_config *config,
int duplex, bool tx_pause, bool rx_pause)
{
struct enetc_pf *pf = phylink_to_enetc_pf(config);
+ u32 pause_off_thresh = 0, pause_on_thresh = 0;
+ u32 init_quanta = 0, refresh_quanta = 0;
+ struct enetc_hw *hw = &pf->si->hw;
struct enetc_ndev_priv *priv;
+ u32 rbmr, cmd_cfg;
+ int idx;
priv = netdev_priv(pf->si->ndev);
if (priv->active_offloads & ENETC_F_QBV)
@@ -987,9 +1027,60 @@ static void enetc_pl_mac_link_up(struct phylink_config *config,
if (!phylink_autoneg_inband(mode) &&
phy_interface_mode_is_rgmii(interface))
- enetc_force_rgmii_mac(&pf->si->hw, speed, duplex);
+ enetc_force_rgmii_mac(hw, speed, duplex);
+
+ /* Flow control */
+ for (idx = 0; idx < priv->num_rx_rings; idx++) {
+ rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR);
+
+ if (tx_pause)
+ rbmr |= ENETC_RBMR_CM;
+ else
+ rbmr &= ~ENETC_RBMR_CM;
- enetc_mac_enable(&pf->si->hw, true);
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
+ }
+
+ if (tx_pause) {
+ /* When the port first enters congestion, send a PAUSE request
+ * with the maximum number of quanta. When the port exits
+ * congestion, it will automatically send a PAUSE frame with
+ * zero quanta.
+ */
+ init_quanta = 0xffff;
+
+ /* Also, set up the refresh timer to send follow-up PAUSE
+ * frames at half the quanta value, in case the congestion
+ * condition persists.
+ */
+ refresh_quanta = 0xffff / 2;
+
+ /* Start emitting PAUSE frames when 3 large frames (or more
+ * smaller frames) have accumulated in the FIFO waiting to be
+ * DMAed to the RX ring.
+ */
+ pause_on_thresh = 3 * ENETC_MAC_MAXFRM_SIZE;
+ pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE;
+ }
+
+ enetc_port_wr(hw, ENETC_PM0_PAUSE_QUANTA, init_quanta);
+ enetc_port_wr(hw, ENETC_PM1_PAUSE_QUANTA, init_quanta);
+ enetc_port_wr(hw, ENETC_PM0_PAUSE_THRESH, refresh_quanta);
+ enetc_port_wr(hw, ENETC_PM1_PAUSE_THRESH, refresh_quanta);
+ enetc_port_wr(hw, ENETC_PPAUONTR, pause_on_thresh);
+ enetc_port_wr(hw, ENETC_PPAUOFFTR, pause_off_thresh);
+
+ cmd_cfg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+
+ if (rx_pause)
+ cmd_cfg &= ~ENETC_PM0_PAUSE_IGN;
+ else
+ cmd_cfg |= ENETC_PM0_PAUSE_IGN;
+
+ enetc_port_wr(hw, ENETC_PM0_CMD_CFG, cmd_cfg);
+ enetc_port_wr(hw, ENETC_PM1_CMD_CFG, cmd_cfg);
+
+ enetc_mac_enable(hw, true);
}
static void enetc_pl_mac_link_down(struct phylink_config *config,
@@ -1081,24 +1172,28 @@ static int enetc_init_port_rss_memory(struct enetc_si *si)
return err;
}
-static void enetc_init_unused_port(struct enetc_si *si)
+static int enetc_pf_register_with_ierb(struct pci_dev *pdev)
{
- struct device *dev = &si->pdev->dev;
- struct enetc_hw *hw = &si->hw;
- int err;
+ struct device_node *node = pdev->dev.of_node;
+ struct platform_device *ierb_pdev;
+ struct device_node *ierb_node;
- si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
- err = enetc_alloc_cbdr(dev, &si->cbd_ring);
- if (err)
- return;
+ /* Don't register with the IERB if the PF itself is disabled */
+ if (!node || !of_device_is_available(node))
+ return 0;
+
+ ierb_node = of_find_compatible_node(NULL, NULL,
+ "fsl,ls1028a-enetc-ierb");
+ if (!ierb_node || !of_device_is_available(ierb_node))
+ return -ENODEV;
- enetc_setup_cbdr(hw, &si->cbd_ring);
+ ierb_pdev = of_find_device_by_node(ierb_node);
+ of_node_put(ierb_node);
- enetc_init_port_rfs_memory(si);
- enetc_init_port_rss_memory(si);
+ if (!ierb_pdev)
+ return -EPROBE_DEFER;
- enetc_clear_cbdr(hw);
- enetc_free_cbdr(dev, &si->cbd_ring);
+ return enetc_ierb_register_pf(ierb_pdev, pdev);
}
static int enetc_pf_probe(struct pci_dev *pdev,
@@ -1111,6 +1206,14 @@ static int enetc_pf_probe(struct pci_dev *pdev,
struct enetc_pf *pf;
int err;
+ err = enetc_pf_register_with_ierb(pdev);
+ if (err == -EPROBE_DEFER)
+ return err;
+ if (err)
+ dev_warn(&pdev->dev,
+ "Could not register with IERB driver: %pe, please update the device tree\n",
+ ERR_PTR(err));
+
err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
if (err) {
dev_err(&pdev->dev, "PCI probing failed\n");
@@ -1124,8 +1227,24 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_map_pf_space;
}
+ err = enetc_setup_cbdr(&pdev->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE,
+ &si->cbd_ring);
+ if (err)
+ goto err_setup_cbdr;
+
+ err = enetc_init_port_rfs_memory(si);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
+ goto err_init_port_rfs;
+ }
+
+ err = enetc_init_port_rss_memory(si);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
+ goto err_init_port_rss;
+ }
+
if (node && !of_device_is_available(node)) {
- enetc_init_unused_port(si);
dev_info(&pdev->dev, "device is disabled, skipping\n");
err = -ENODEV;
goto err_device_disabled;
@@ -1135,6 +1254,10 @@ static int enetc_pf_probe(struct pci_dev *pdev,
pf->si = si;
pf->total_vfs = pci_sriov_get_totalvfs(pdev);
+ err = enetc_setup_mac_addresses(node, pf);
+ if (err)
+ goto err_setup_mac_addresses;
+
enetc_configure_port(pf);
enetc_get_si_caps(si);
@@ -1158,18 +1281,6 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_alloc_si_res;
}
- err = enetc_init_port_rfs_memory(si);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
- goto err_init_port_rfs;
- }
-
- err = enetc_init_port_rss_memory(si);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
- goto err_init_port_rss;
- }
-
err = enetc_configure_si(priv);
if (err) {
dev_err(&pdev->dev, "Failed to configure SI\n");
@@ -1205,15 +1316,18 @@ err_phylink_create:
err_mdiobus_create:
enetc_free_msix(priv);
err_config_si:
-err_init_port_rss:
-err_init_port_rfs:
err_alloc_msix:
enetc_free_si_resources(priv);
err_alloc_si_res:
si->ndev = NULL;
free_netdev(ndev);
err_alloc_netdev:
+err_init_port_rss:
+err_init_port_rfs:
err_device_disabled:
+err_setup_mac_addresses:
+ enetc_teardown_cbdr(&si->cbd_ring);
+err_setup_cbdr:
err_map_pf_space:
enetc_pci_remove(pdev);
@@ -1239,6 +1353,7 @@ static void enetc_pf_remove(struct pci_dev *pdev)
enetc_free_msix(priv);
enetc_free_si_resources(priv);
+ enetc_teardown_cbdr(&si->cbd_ring);
free_netdev(si->ndev);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index a9aee219fb58..af699f2ad095 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -455,11 +455,6 @@ static struct enetc_psfp epsfp = {
static LIST_HEAD(enetc_block_cb_list);
-static inline int enetc_get_port(struct enetc_ndev_priv *priv)
-{
- return priv->si->pdev->devfn & 0x7;
-}
-
/* Stream Identity Entry Set Descriptor */
static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
struct enetc_streamid *sid,
@@ -504,7 +499,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
si_conf = &cbd.sid_set;
/* Only one port supported for one entry, set itself */
- si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv));
+ si_conf->iports = cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev));
si_conf->id_type = 1;
si_conf->oui[2] = 0x0;
si_conf->oui[1] = 0x80;
@@ -529,7 +524,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
si_conf->en = 0x80;
si_conf->stream_handle = cpu_to_le32(sid->handle);
- si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv));
+ si_conf->iports = cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev));
si_conf->id_type = sid->filtertype;
si_conf->oui[2] = 0x0;
si_conf->oui[1] = 0x80;
@@ -591,7 +586,8 @@ static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv,
}
sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id);
- sfi_config->input_ports = cpu_to_le32(1 << enetc_get_port(priv));
+ sfi_config->input_ports =
+ cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev));
/* The priority value which may be matched against the
* frame’s priority value to determine a match for this entry.
@@ -1221,6 +1217,11 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
/* Flow meter and max frame size */
if (entryp) {
+ if (entryp->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
+ err = -EOPNOTSUPP;
+ goto free_sfi;
+ }
if (entryp->police.burst) {
fmi = kzalloc(sizeof(*fmi), GFP_KERNEL);
if (!fmi) {
@@ -1557,10 +1558,10 @@ int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
switch (f->command) {
case FLOW_BLOCK_BIND:
- set_bit(enetc_get_port(priv), &epsfp.dev_bitmap);
+ set_bit(enetc_pf_to_port(priv->si->pdev), &epsfp.dev_bitmap);
break;
case FLOW_BLOCK_UNBIND:
- clear_bit(enetc_get_port(priv), &epsfp.dev_bitmap);
+ clear_bit(enetc_pf_to_port(priv->si->pdev), &epsfp.dev_bitmap);
if (!epsfp.dev_bitmap)
clean_psfp_all();
break;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 9b755a84c2d6..03090ba7e226 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -165,6 +165,11 @@ static int enetc_vf_probe(struct pci_dev *pdev,
enetc_init_si_rings_params(priv);
+ err = enetc_setup_cbdr(priv->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE,
+ &si->cbd_ring);
+ if (err)
+ goto err_setup_cbdr;
+
err = enetc_alloc_si_resources(priv);
if (err) {
dev_err(&pdev->dev, "SI resource alloc failed\n");
@@ -197,6 +202,8 @@ err_config_si:
err_alloc_msix:
enetc_free_si_resources(priv);
err_alloc_si_res:
+ enetc_teardown_cbdr(&si->cbd_ring);
+err_setup_cbdr:
si->ndev = NULL;
free_netdev(ndev);
err_alloc_netdev:
@@ -216,6 +223,7 @@ static void enetc_vf_remove(struct pci_dev *pdev)
enetc_free_msix(priv);
enetc_free_si_resources(priv);
+ enetc_teardown_cbdr(&si->cbd_ring);
free_netdev(si->ndev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 3db882322b2b..f2065f9d02e6 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -38,6 +38,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <net/ip.h>
+#include <net/selftests.h>
#include <net/tso.h>
#include <linux/tcp.h>
#include <linux/udp.h>
@@ -1665,6 +1666,7 @@ static void fec_get_mac(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned char *iap, tmpaddr[ETH_ALEN];
+ int ret;
/*
* try to get mac address in following order:
@@ -1680,9 +1682,9 @@ static void fec_get_mac(struct net_device *ndev)
if (!is_valid_ether_addr(iap)) {
struct device_node *np = fep->pdev->dev.of_node;
if (np) {
- const char *mac = of_get_mac_address(np);
- if (!IS_ERR(mac))
- iap = (unsigned char *) mac;
+ ret = of_get_mac_address(np, tmpaddr);
+ if (!ret)
+ iap = tmpaddr;
}
}
@@ -2048,6 +2050,8 @@ static int fec_enet_mii_probe(struct net_device *ndev)
fep->link = 0;
fep->full_duplex = 0;
+ phy_dev->mac_managed_pm = 1;
+
phy_attached_info(phy_dev);
return 0;
@@ -2479,6 +2483,9 @@ static void fec_enet_get_strings(struct net_device *netdev,
memcpy(data + i * ETH_GSTRING_LEN,
fec_stats[i].name, ETH_GSTRING_LEN);
break;
+ case ETH_SS_TEST:
+ net_selftest_get_strings(data);
+ break;
}
}
@@ -2487,6 +2494,8 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(fec_stats);
+ case ETH_SS_TEST:
+ return net_selftest_get_count();
default:
return -EOPNOTSUPP;
}
@@ -2738,6 +2747,7 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.set_wol = fec_enet_set_wol,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .self_test = net_selftest,
};
static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -3864,6 +3874,7 @@ static int __maybe_unused fec_resume(struct device *dev)
netif_device_attach(ndev);
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
+ phy_init_hw(ndev->phydev);
phy_start(ndev->phydev);
}
rtnl_unlock();
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index b3bad429e03b..02c47658a215 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -813,7 +813,6 @@ static int mpc52xx_fec_probe(struct platform_device *op)
const u32 *prop;
int prop_size;
struct device_node *np = op->dev.of_node;
- const char *mac_addr;
phys_addr_t rx_fifo;
phys_addr_t tx_fifo;
@@ -891,10 +890,8 @@ static int mpc52xx_fec_probe(struct platform_device *op)
*
* First try to read MAC address from DT
*/
- mac_addr = of_get_mac_address(np);
- if (!IS_ERR(mac_addr)) {
- ether_addr_copy(ndev->dev_addr, mac_addr);
- } else {
+ rv = of_get_mac_address(np, ndev->dev_addr);
+ if (rv) {
struct mpc52xx_fec __iomem *fec = priv->fec;
/*
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 901749a7a318..46ecb42f2ef8 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -605,7 +605,6 @@ static int mac_probe(struct platform_device *_of_dev)
struct platform_device *of_dev;
struct resource res;
struct mac_priv_s *priv;
- const u8 *mac_addr;
u32 val;
u8 fman_id;
phy_interface_t phy_if;
@@ -723,11 +722,9 @@ static int mac_probe(struct platform_device *_of_dev)
priv->cell_index = (u8)val;
/* Get the MAC address */
- mac_addr = of_get_mac_address(mac_node);
- if (IS_ERR(mac_addr))
+ err = of_get_mac_address(mac_node, mac_dev->addr);
+ if (err)
dev_warn(dev, "of_get_mac_address(%pOF) failed\n", mac_node);
- else
- ether_addr_copy(mac_dev->addr, mac_addr);
/* Get the port handles */
nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
@@ -853,7 +850,7 @@ static int mac_probe(struct platform_device *_of_dev)
if (err < 0)
dev_err(dev, "fman_set_mac_active_pause() = %d\n", err);
- if (!IS_ERR(mac_addr))
+ if (!is_zero_ether_addr(mac_dev->addr))
dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 78e008b81374..6ee325ad35c5 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -918,7 +918,6 @@ static int fs_enet_probe(struct platform_device *ofdev)
const u32 *data;
struct clk *clk;
int err;
- const u8 *mac_addr;
const char *phy_connection_type;
int privsize, len, ret = -ENODEV;
@@ -1006,9 +1005,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
spin_lock_init(&fep->lock);
spin_lock_init(&fep->tx_lock);
- mac_addr = of_get_mac_address(ofdev->dev.of_node);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(ndev->dev_addr, mac_addr);
+ of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr);
ret = fep->ops->allocate_bd(ndev);
if (ret)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3ec4d9fddd52..f2945abdb041 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -175,10 +175,7 @@ static void gfar_mac_rx_config(struct gfar_private *priv)
if (priv->rx_filer_enable) {
rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
/* Program the RIR0 reg with the required distribution */
- if (priv->poll_mode == GFAR_SQ_POLLING)
- gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
- else /* GFAR_MQ_POLLING */
- gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
+ gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
}
/* Restore PROMISC mode */
@@ -521,29 +518,9 @@ static int gfar_parse_group(struct device_node *np,
grp->priv = priv;
spin_lock_init(&grp->grplock);
if (priv->mode == MQ_MG_MODE) {
- u32 rxq_mask, txq_mask;
- int ret;
-
+ /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
-
- ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
- if (!ret) {
- grp->rx_bit_map = rxq_mask ?
- rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
- }
-
- ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
- if (!ret) {
- grp->tx_bit_map = txq_mask ?
- txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
- }
-
- if (priv->poll_mode == GFAR_SQ_POLLING) {
- /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
- grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
- grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
- }
} else {
grp->rx_bit_map = 0xFF;
grp->tx_bit_map = 0xFF;
@@ -640,7 +617,6 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
{
const char *model;
- const void *mac_addr;
int err = 0, i;
phy_interface_t interface;
struct net_device *dev = NULL;
@@ -650,18 +626,15 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
u32 stash_len = 0;
u32 stash_idx = 0;
unsigned int num_tx_qs, num_rx_qs;
- unsigned short mode, poll_mode;
+ unsigned short mode;
if (!np)
return -ENODEV;
- if (of_device_is_compatible(np, "fsl,etsec2")) {
+ if (of_device_is_compatible(np, "fsl,etsec2"))
mode = MQ_MG_MODE;
- poll_mode = GFAR_SQ_POLLING;
- } else {
+ else
mode = SQ_SG_MODE;
- poll_mode = GFAR_SQ_POLLING;
- }
if (mode == SQ_SG_MODE) {
num_tx_qs = 1;
@@ -677,22 +650,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
return -EINVAL;
}
- if (poll_mode == GFAR_SQ_POLLING) {
- num_tx_qs = num_grps; /* one txq per int group */
- num_rx_qs = num_grps; /* one rxq per int group */
- } else { /* GFAR_MQ_POLLING */
- u32 tx_queues, rx_queues;
- int ret;
-
- /* parse the num of HW tx and rx queues */
- ret = of_property_read_u32(np, "fsl,num_tx_queues",
- &tx_queues);
- num_tx_qs = ret ? 1 : tx_queues;
-
- ret = of_property_read_u32(np, "fsl,num_rx_queues",
- &rx_queues);
- num_rx_qs = ret ? 1 : rx_queues;
- }
+ num_tx_qs = num_grps; /* one txq per int group */
+ num_rx_qs = num_grps; /* one rxq per int group */
}
if (num_tx_qs > MAX_TX_QS) {
@@ -718,7 +677,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->ndev = dev;
priv->mode = mode;
- priv->poll_mode = poll_mode;
priv->num_tx_queues = num_tx_qs;
netif_set_real_num_rx_queues(dev, num_rx_qs);
@@ -782,11 +740,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
if (stash_len || stash_idx)
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
- mac_addr = of_get_mac_address(np);
-
- if (!IS_ERR(mac_addr)) {
- ether_addr_copy(dev->dev_addr, mac_addr);
- } else {
+ err = of_get_mac_address(np, dev->dev_addr);
+ if (err) {
eth_hw_addr_random(dev);
dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
}
@@ -2695,106 +2650,6 @@ static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
return 0;
}
-static int gfar_poll_rx(struct napi_struct *napi, int budget)
-{
- struct gfar_priv_grp *gfargrp =
- container_of(napi, struct gfar_priv_grp, napi_rx);
- struct gfar_private *priv = gfargrp->priv;
- struct gfar __iomem *regs = gfargrp->regs;
- struct gfar_priv_rx_q *rx_queue = NULL;
- int work_done = 0, work_done_per_q = 0;
- int i, budget_per_q = 0;
- unsigned long rstat_rxf;
- int num_act_queues;
-
- /* Clear IEVENT, so interrupts aren't called again
- * because of the packets that have already arrived
- */
- gfar_write(&regs->ievent, IEVENT_RX_MASK);
-
- rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
-
- num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
- if (num_act_queues)
- budget_per_q = budget/num_act_queues;
-
- for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
- /* skip queue if not active */
- if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
- continue;
-
- rx_queue = priv->rx_queue[i];
- work_done_per_q =
- gfar_clean_rx_ring(rx_queue, budget_per_q);
- work_done += work_done_per_q;
-
- /* finished processing this queue */
- if (work_done_per_q < budget_per_q) {
- /* clear active queue hw indication */
- gfar_write(&regs->rstat,
- RSTAT_CLEAR_RXF0 >> i);
- num_act_queues--;
-
- if (!num_act_queues)
- break;
- }
- }
-
- if (!num_act_queues) {
- u32 imask;
- napi_complete_done(napi, work_done);
-
- /* Clear the halt bit in RSTAT */
- gfar_write(&regs->rstat, gfargrp->rstat);
-
- spin_lock_irq(&gfargrp->grplock);
- imask = gfar_read(&regs->imask);
- imask |= IMASK_RX_DEFAULT;
- gfar_write(&regs->imask, imask);
- spin_unlock_irq(&gfargrp->grplock);
- }
-
- return work_done;
-}
-
-static int gfar_poll_tx(struct napi_struct *napi, int budget)
-{
- struct gfar_priv_grp *gfargrp =
- container_of(napi, struct gfar_priv_grp, napi_tx);
- struct gfar_private *priv = gfargrp->priv;
- struct gfar __iomem *regs = gfargrp->regs;
- struct gfar_priv_tx_q *tx_queue = NULL;
- int has_tx_work = 0;
- int i;
-
- /* Clear IEVENT, so interrupts aren't called again
- * because of the packets that have already arrived
- */
- gfar_write(&regs->ievent, IEVENT_TX_MASK);
-
- for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
- tx_queue = priv->tx_queue[i];
- /* run Tx cleanup to completion */
- if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
- gfar_clean_tx_ring(tx_queue);
- has_tx_work = 1;
- }
- }
-
- if (!has_tx_work) {
- u32 imask;
- napi_complete(napi);
-
- spin_lock_irq(&gfargrp->grplock);
- imask = gfar_read(&regs->imask);
- imask |= IMASK_TX_DEFAULT;
- gfar_write(&regs->imask, imask);
- spin_unlock_irq(&gfargrp->grplock);
- }
-
- return 0;
-}
-
/* GFAR error interrupt handler */
static irqreturn_t gfar_error(int irq, void *grp_id)
{
@@ -3352,17 +3207,10 @@ static int gfar_probe(struct platform_device *ofdev)
/* Register for napi ...We are registering NAPI for each grp */
for (i = 0; i < priv->num_grps; i++) {
- if (priv->poll_mode == GFAR_SQ_POLLING) {
- netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
- gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
- netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
- gfar_poll_tx_sq, 2);
- } else {
- netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
- gfar_poll_rx, GFAR_DEV_WEIGHT);
- netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
- gfar_poll_tx, 2);
- }
+ netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
+ gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
+ netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
+ gfar_poll_tx_sq, 2);
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 8ced783f5302..5ea47df93e5e 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -909,22 +909,6 @@ enum {
MQ_MG_MODE
};
-/* GFAR_SQ_POLLING: Single Queue NAPI polling mode
- * The driver supports a single pair of RX/Tx queues
- * per interrupt group (Rx/Tx int line). MQ_MG mode
- * devices have 2 interrupt groups, so the device will
- * have a total of 2 Tx and 2 Rx queues in this case.
- * GFAR_MQ_POLLING: Multi Queue NAPI polling mode
- * The driver supports all the 8 Rx and Tx HW queues
- * each queue mapped by the Device Tree to one of
- * the 2 interrupt groups. This mode implies significant
- * processing overhead (CPU and controller level).
- */
-enum gfar_poll_mode {
- GFAR_SQ_POLLING = 0,
- GFAR_MQ_POLLING
-};
-
/*
* Per TX queue stats
*/
@@ -1105,7 +1089,6 @@ struct gfar_private {
unsigned long state;
unsigned short mode;
- unsigned short poll_mode;
unsigned int num_tx_queues;
unsigned int num_rx_queues;
unsigned int num_grps;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index ef4e2febeb5b..e0936510fa34 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3562,7 +3562,6 @@ static int ucc_geth_probe(struct platform_device* ofdev)
struct resource res;
int err, ucc_num, max_speed = 0;
const unsigned int *prop;
- const void *mac_addr;
phy_interface_t phy_interface;
static const int enet_to_speed[] = {
SPEED_10, SPEED_10, SPEED_10,
@@ -3733,9 +3732,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
goto err_free_netdev;
}
- mac_addr = of_get_mac_address(np);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(dev->dev_addr, mac_addr);
+ of_get_mac_address(np, dev->dev_addr);
ugeth->ug_info = ug_info;
ugeth->dev = device;