summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c52
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h57
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c14
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c6
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c24
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c27
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c8
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c3
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.c7
-rw-r--r--drivers/net/ethernet/google/gve/gve.h112
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c8
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h4
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c91
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c719
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c147
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c298
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c6
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c6
-rw-r--r--drivers/net/ethernet/intel/Kconfig17
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c88
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c420
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h20
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c77
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c249
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c11
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c29
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c19
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c52
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c12
-rw-r--r--drivers/net/ethernet/intel/ixgb/Makefile9
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h179
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.c580
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.h79
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c642
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c1229
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h767
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ids.h23
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c2285
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_osdep.h39
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_param.c442
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c24
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c4
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig2
-rw-r--r--drivers/net/ethernet/mediatek/Makefile2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c14
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c192
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h118
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c114
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h25
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c9
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_regs.h14
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c207
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c157
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c528
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c151
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/thermal.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/thermal.h20
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c76
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h43
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_police.c13
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c20
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h36
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c221
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c1402
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c133
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c192
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c209
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c270
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h6
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_ag_api.h217
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.c61
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_client.h11
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c4
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c62
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c52
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c97
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c50
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c30
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c260
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.h32
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c2
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c9
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c187
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c15
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c6
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h1
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100.c3
-rw-r--r--drivers/net/ethernet/sfc/efx.c5
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c9
-rw-r--r--drivers/net/ethernet/sfc/mae.c16
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h5
-rw-r--r--drivers/net/ethernet/sfc/siena/efx.c5
-rw-r--r--drivers/net/ethernet/sfc/tc.c42
-rw-r--r--drivers/net/ethernet/sfc/tc.h4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c2
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_phy.c4
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c52
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c34
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c21
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h7
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c9
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c10
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h1
205 files changed, 6981 insertions, 8595 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 1917da784191..5a274b99f299 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -84,7 +84,6 @@ source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
-source "drivers/net/ethernet/wangxun/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
@@ -189,6 +188,7 @@ source "drivers/net/ethernet/toshiba/Kconfig"
source "drivers/net/ethernet/tundra/Kconfig"
source "drivers/net/ethernet/vertexcom/Kconfig"
source "drivers/net/ethernet/via/Kconfig"
+source "drivers/net/ethernet/wangxun/Kconfig"
source "drivers/net/ethernet/wiznet/Kconfig"
source "drivers/net/ethernet/xilinx/Kconfig"
source "drivers/net/ethernet/xircom/Kconfig"
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 306393f8eeca..49bb9a8f00e6 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -39,7 +39,6 @@
#include <linux/ipv6.h>
#include <linux/if_vlan.h>
#include <linux/mdio.h>
-#include <linux/aer.h>
#include <linux/bitops.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -1745,7 +1744,6 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_pci_disable;
}
- pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
if (!pdev->pm_cap) {
@@ -1879,7 +1877,6 @@ out_free_netdev:
free_netdev(netdev);
out_pci_release:
pci_release_mem_regions(pdev);
- pci_disable_pcie_error_reporting(pdev);
out_pci_disable:
pci_disable_device(pdev);
return err;
@@ -1897,7 +1894,6 @@ static void alx_remove(struct pci_dev *pdev)
iounmap(hw->hw_addr);
pci_release_mem_regions(pdev);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
mutex_destroy(&alx->mtx);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 40c781695d58..4a288799633f 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -207,16 +207,6 @@ static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
synchronize_irq(adapter->pdev->irq);
}
-/**
- * atl1c_irq_reset - reset interrupt confiure on the NIC
- * @adapter: board private structure
- */
-static inline void atl1c_irq_reset(struct atl1c_adapter *adapter)
-{
- atomic_set(&adapter->irq_sem, 1);
- atl1c_irq_enable(adapter);
-}
-
/*
* atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads
* of the idle status register until the device is actually idle
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 9f473854b0f4..466e1d62bcf6 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -48,7 +48,6 @@
#include <linux/cache.h>
#include <linux/firmware.h>
#include <linux/log2.h>
-#include <linux/aer.h>
#include <linux/crash_dump.h>
#if IS_ENABLED(CONFIG_CNIC)
@@ -3829,7 +3828,7 @@ load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
return 0;
}
-static int
+static void
load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
const struct bnx2_mips_fw_file_entry *fw_entry)
{
@@ -3897,48 +3896,34 @@ load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
val &= ~cpu_reg->mode_value_halt;
bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
-
- return 0;
}
-static int
+static void
bnx2_init_cpus(struct bnx2 *bp)
{
const struct bnx2_mips_fw_file *mips_fw =
(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
const struct bnx2_rv2p_fw_file *rv2p_fw =
(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
- int rc;
/* Initialize the RV2P processor. */
load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
/* Initialize the RX Processor. */
- rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
- if (rc)
- goto init_cpu_err;
+ load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
/* Initialize the TX Processor. */
- rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
- if (rc)
- goto init_cpu_err;
+ load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
/* Initialize the TX Patch-up Processor. */
- rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
- if (rc)
- goto init_cpu_err;
+ load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
/* Initialize the Completion Processor. */
- rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
- if (rc)
- goto init_cpu_err;
+ load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
/* Initialize the Command Processor. */
- rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
-
-init_cpu_err:
- return rc;
+ load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
}
static void
@@ -4951,8 +4936,7 @@ bnx2_init_chip(struct bnx2 *bp)
} else
bnx2_init_context(bp);
- if ((rc = bnx2_init_cpus(bp)) != 0)
- return rc;
+ bnx2_init_cpus(bp);
bnx2_init_nvram(bp);
@@ -8093,7 +8077,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
int rc, i, j;
u32 reg;
u64 dma_mask, persist_dma_mask;
- int err;
SET_NETDEV_DEV(dev, &pdev->dev);
bp = netdev_priv(dev);
@@ -8176,12 +8159,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->flags |= BNX2_FLAG_PCIE;
if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
-
- /* AER (Advanced Error Reporting) hooks */
- err = pci_enable_pcie_error_reporting(pdev);
- if (!err)
- bp->flags |= BNX2_FLAG_AER_ENABLED;
-
} else {
bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
if (bp->pcix_cap == 0) {
@@ -8460,11 +8437,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
return 0;
err_out_unmap:
- if (bp->flags & BNX2_FLAG_AER_ENABLED) {
- pci_disable_pcie_error_reporting(pdev);
- bp->flags &= ~BNX2_FLAG_AER_ENABLED;
- }
-
pci_iounmap(pdev, bp->regview);
bp->regview = NULL;
@@ -8638,11 +8610,6 @@ bnx2_remove_one(struct pci_dev *pdev)
bnx2_free_stats_blk(dev);
kfree(bp->temp_stats_blk);
- if (bp->flags & BNX2_FLAG_AER_ENABLED) {
- pci_disable_pcie_error_reporting(pdev);
- bp->flags &= ~BNX2_FLAG_AER_ENABLED;
- }
-
bnx2_release_firmware(bp);
free_netdev(dev);
@@ -8766,9 +8733,6 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
}
rtnl_unlock();
- if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
- return result;
-
return result;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index a09ec47461c9..315b08c64edd 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6808,7 +6808,6 @@ struct bnx2 {
#define BNX2_FLAG_JUMBO_BROKEN 0x00000800
#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000
#define BNX2_FLAG_BROKEN_STATS 0x00002000
-#define BNX2_FLAG_AER_ENABLED 0x00004000
struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index dd5945c4bfec..8bcde0a6e011 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1486,7 +1486,6 @@ struct bnx2x {
#define IS_VF_FLAG (1 << 22)
#define BC_SUPPORTS_RMMOD_CMD (1 << 23)
#define HAS_PHYS_PORT_ID (1 << 24)
-#define AER_ENABLED (1 << 25)
#define PTP_SUPPORTED (1 << 26)
#define TX_TIMESTAMPING_EN (1 << 27)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 5d1e4fe335aa..3bb5ea570c87 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -13037,14 +13036,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_features_check = bnx2x_features_check,
};
-static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
-{
- if (bp->flags & AER_ENABLED) {
- pci_disable_pcie_error_reporting(bp->pdev);
- bp->flags &= ~AER_ENABLED;
- }
-}
-
static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
struct net_device *dev, unsigned long board_type)
{
@@ -13157,13 +13148,6 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
/* Set PCIe reset type to fundamental for EEH recovery */
pdev->needs_freset = 1;
- /* AER (Advanced Error reporting) configuration */
- rc = pci_enable_pcie_error_reporting(pdev);
- if (!rc)
- bp->flags |= AER_ENABLED;
- else
- BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
-
/*
* Clean the following indirect addresses for all functions since it
* is not used by the driver.
@@ -14020,8 +14004,6 @@ init_one_freemem:
bnx2x_free_mem_bp(bp);
init_one_exit:
- bnx2x_disable_pcie_error_reporting(bp);
-
if (bp->regview)
iounmap(bp->regview);
@@ -14102,7 +14084,6 @@ static void __bnx2x_remove(struct pci_dev *pdev,
pci_set_power_state(pdev, PCI_D3hot);
}
- bnx2x_disable_pcie_error_reporting(bp);
if (remove_netdev) {
if (bp->regview)
iounmap(bp->regview);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index e2e2c986c82b..656a28ac2ff0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -48,7 +48,6 @@
#include <linux/prefetch.h>
#include <linux/cache.h>
#include <linux/log2.h>
-#include <linux/aer.h>
#include <linux/bitmap.h>
#include <linux/cpu_rmap.h>
#include <linux/cpumask.h>
@@ -7770,7 +7769,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
- __bnxt_hwrm_ptp_qcfg(bp);
+ bp->fw_cap |= BNXT_FW_CAP_PTP;
} else {
bnxt_ptp_clear(bp);
kfree(bp->ptp_cfg);
@@ -12299,6 +12298,8 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
bnxt_hwrm_vnic_qcaps(bp);
bnxt_hwrm_port_led_qcaps(bp);
bnxt_ethtool_init(bp);
+ if (bp->fw_cap & BNXT_FW_CAP_PTP)
+ __bnxt_hwrm_ptp_qcfg(bp);
bnxt_dcb_init(bp);
return 0;
}
@@ -12705,8 +12706,6 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
goto init_err_release;
}
- pci_enable_pcie_error_reporting(pdev);
-
INIT_WORK(&bp->sp_task, bnxt_sp_task);
INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
@@ -13186,7 +13185,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_rdma_aux_device_uninit(bp);
bnxt_ptp_clear(bp);
- pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
/* Flush any pending tasks */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index c0628ac1b798..0eeaed95a4ac 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1968,34 +1968,35 @@ struct bnxt {
u32 msg_enable;
- u32 fw_cap;
- #define BNXT_FW_CAP_SHORT_CMD 0x00000001
- #define BNXT_FW_CAP_LLDP_AGENT 0x00000002
- #define BNXT_FW_CAP_DCBX_AGENT 0x00000004
- #define BNXT_FW_CAP_NEW_RM 0x00000008
- #define BNXT_FW_CAP_IF_CHANGE 0x00000010
- #define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080
- #define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400
- #define BNXT_FW_CAP_TRUSTED_VF 0x00000800
- #define BNXT_FW_CAP_ERROR_RECOVERY 0x00002000
- #define BNXT_FW_CAP_PKG_VER 0x00004000
- #define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000
- #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 0x00010000
- #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000
- #define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
- #define BNXT_FW_CAP_RSS_HASH_TYPE_DELTA 0x00080000
- #define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
- #define BNXT_FW_CAP_HOT_RESET 0x00200000
- #define BNXT_FW_CAP_PTP_RTC 0x00400000
- #define BNXT_FW_CAP_RX_ALL_PKT_TS 0x00800000
- #define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
- #define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
- #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
- #define BNXT_FW_CAP_LIVEPATCH 0x08000000
- #define BNXT_FW_CAP_PTP_PPS 0x10000000
- #define BNXT_FW_CAP_HOT_RESET_IF 0x20000000
- #define BNXT_FW_CAP_RING_MONITOR 0x40000000
- #define BNXT_FW_CAP_DBG_QCAPS 0x80000000
+ u64 fw_cap;
+ #define BNXT_FW_CAP_SHORT_CMD BIT_ULL(0)
+ #define BNXT_FW_CAP_LLDP_AGENT BIT_ULL(1)
+ #define BNXT_FW_CAP_DCBX_AGENT BIT_ULL(2)
+ #define BNXT_FW_CAP_NEW_RM BIT_ULL(3)
+ #define BNXT_FW_CAP_IF_CHANGE BIT_ULL(4)
+ #define BNXT_FW_CAP_KONG_MB_CHNL BIT_ULL(7)
+ #define BNXT_FW_CAP_OVS_64BIT_HANDLE BIT_ULL(10)
+ #define BNXT_FW_CAP_TRUSTED_VF BIT_ULL(11)
+ #define BNXT_FW_CAP_ERROR_RECOVERY BIT_ULL(13)
+ #define BNXT_FW_CAP_PKG_VER BIT_ULL(14)
+ #define BNXT_FW_CAP_CFA_ADV_FLOW BIT_ULL(15)
+ #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 BIT_ULL(16)
+ #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED BIT_ULL(17)
+ #define BNXT_FW_CAP_EXT_STATS_SUPPORTED BIT_ULL(18)
+ #define BNXT_FW_CAP_RSS_HASH_TYPE_DELTA BIT_ULL(19)
+ #define BNXT_FW_CAP_ERR_RECOVER_RELOAD BIT_ULL(20)
+ #define BNXT_FW_CAP_HOT_RESET BIT_ULL(21)
+ #define BNXT_FW_CAP_PTP_RTC BIT_ULL(22)
+ #define BNXT_FW_CAP_RX_ALL_PKT_TS BIT_ULL(23)
+ #define BNXT_FW_CAP_VLAN_RX_STRIP BIT_ULL(24)
+ #define BNXT_FW_CAP_VLAN_TX_INSERT BIT_ULL(25)
+ #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED BIT_ULL(26)
+ #define BNXT_FW_CAP_LIVEPATCH BIT_ULL(27)
+ #define BNXT_FW_CAP_PTP_PPS BIT_ULL(28)
+ #define BNXT_FW_CAP_HOT_RESET_IF BIT_ULL(29)
+ #define BNXT_FW_CAP_RING_MONITOR BIT_ULL(30)
+ #define BNXT_FW_CAP_DBG_QCAPS BIT_ULL(31)
+ #define BNXT_FW_CAP_PTP BIT_ULL(32)
u32 fw_dbg_cap;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index ec573127b707..696f32dfe41f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2862,7 +2862,7 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
if (rc)
return rc;
- buflen = dir_entries * entry_length;
+ buflen = mul_u32_u32(dir_entries, entry_length);
buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle);
if (!buf) {
hwrm_req_drop(bp, req);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index a3a3978a4d1c..e46689128e32 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -230,7 +230,7 @@ static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
ptp_info);
struct bnxt *bp = ptp->bp;
- if (BNXT_PTP_USE_RTC(bp))
+ if (!BNXT_MH(bp))
return bnxt_ptp_adjfine_rtc(bp, scaled_ppm);
spin_lock_bh(&ptp->ptp_lock);
@@ -861,9 +861,15 @@ static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc)
memset(&ptp->cc, 0, sizeof(ptp->cc));
ptp->cc.read = bnxt_cc_read;
ptp->cc.mask = CYCLECOUNTER_MASK(48);
- ptp->cc.shift = BNXT_CYCLES_SHIFT;
- ptp->cc.mult = clocksource_khz2mult(BNXT_DEVCLK_FREQ, ptp->cc.shift);
- ptp->cmult = ptp->cc.mult;
+ if (BNXT_MH(bp)) {
+ /* Use timecounter based non-real time mode */
+ ptp->cc.shift = BNXT_CYCLES_SHIFT;
+ ptp->cc.mult = clocksource_khz2mult(BNXT_DEVCLK_FREQ, ptp->cc.shift);
+ ptp->cmult = ptp->cc.mult;
+ } else {
+ ptp->cc.shift = 0;
+ ptp->cc.mult = 1;
+ }
ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
}
if (init_tc)
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index f02facb60fd1..3a6763c5e8b3 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
#include <asm/sibyte/board.h>
#include <asm/sibyte/sb1250.h>
-#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#if defined(CONFIG_SIBYTE_BCM1x80)
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_int.h>
#define R_MAC_DMA_OODPKTLOST_RX R_MAC_DMA_OODPKTLOST
@@ -87,7 +87,7 @@ MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
#include <asm/sibyte/sb1250_mac.h>
#include <asm/sibyte/sb1250_dma.h>
-#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#if defined(CONFIG_SIBYTE_BCM1x80)
#define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2))
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
#define UNIT_INT(n) (K_INT_MAC_0 + (n))
@@ -1527,7 +1527,7 @@ static void sbmac_channel_start(struct sbmac_softc *s)
* Turn on the rest of the bits in the enable register
*/
-#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#if defined(CONFIG_SIBYTE_BCM1x80)
__raw_writeq(M_MAC_RXDMA_EN0 |
M_MAC_TXDMA_EN0, s->sbm_macenable);
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 14dfec4db8f9..c1fc91c97cee 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -692,6 +692,8 @@
#define GEM_CLK_DIV48 3
#define GEM_CLK_DIV64 4
#define GEM_CLK_DIV96 5
+#define GEM_CLK_DIV128 6
+#define GEM_CLK_DIV224 7
/* Constants for MAN register */
#define MACB_MAN_C22_SOF 1
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 66e30561569e..f77bd1223c8f 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -94,8 +94,7 @@ struct sifive_fu540_macb_mgmt {
/* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
*/
-#define MACB_HALT_TIMEOUT 1230
-
+#define MACB_HALT_TIMEOUT 14000
#define MACB_PM_TIMEOUT 100 /* ms */
#define MACB_MDIO_TIMEOUT 1000000 /* in usecs */
@@ -1071,6 +1070,7 @@ static void macb_tx_error_task(struct work_struct *work)
{
struct macb_queue *queue = container_of(work, struct macb_queue,
tx_error_task);
+ bool halt_timeout = false;
struct macb *bp = queue->bp;
struct macb_tx_skb *tx_skb;
struct macb_dma_desc *desc;
@@ -1098,9 +1098,11 @@ static void macb_tx_error_task(struct work_struct *work)
* (in case we have just queued new packets)
* macb/gem must be halted to write TBQP register
*/
- if (macb_halt_tx(bp))
- /* Just complain for now, reinitializing TX path can be good */
+ if (macb_halt_tx(bp)) {
netdev_err(bp->dev, "BUG: halt tx timed out\n");
+ macb_writel(bp, NCR, macb_readl(bp, NCR) & (~MACB_BIT(TE)));
+ halt_timeout = true;
+ }
/* Treat frames in TX queue including the ones that caused the error.
* Free transmit buffers in upper layer.
@@ -1171,6 +1173,9 @@ static void macb_tx_error_task(struct work_struct *work)
macb_writel(bp, TSR, macb_readl(bp, TSR));
queue_writel(queue, IER, MACB_TX_INT_FLAGS);
+ if (halt_timeout)
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
+
/* Now we are ready to start transmission again */
netif_tx_start_all_queues(bp->dev);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
@@ -2641,8 +2646,12 @@ static u32 gem_mdc_clk_div(struct macb *bp)
config = GEM_BF(CLK, GEM_CLK_DIV48);
else if (pclk_hz <= 160000000)
config = GEM_BF(CLK, GEM_CLK_DIV64);
- else
+ else if (pclk_hz <= 240000000)
config = GEM_BF(CLK, GEM_CLK_DIV96);
+ else if (pclk_hz <= 320000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV128);
+ else
+ config = GEM_BF(CLK, GEM_CLK_DIV224);
return config;
}
@@ -4844,7 +4853,7 @@ static const struct macb_config mpfs_config = {
static const struct macb_config sama7g5_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
- MACB_CAPS_MIIONRGMII,
+ MACB_CAPS_MIIONRGMII | MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4853,7 +4862,8 @@ static const struct macb_config sama7g5_gem_config = {
static const struct macb_config sama7g5_emac_config = {
.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
- MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII,
+ MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII |
+ MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index fd7c80edb6e8..9bd1d2d7027d 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1129,7 +1129,6 @@ static void octeon_destroy_resources(struct octeon_device *oct)
fallthrough;
case OCT_DEV_PCI_ENABLE_DONE:
- pci_clear_master(oct->pci_dev);
/* Disable the device, releasing the PCI INT */
pci_disable_device(oct->pci_dev);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index ac196883f07e..e2921aec3da0 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -577,7 +577,6 @@ static void octeon_destroy_resources(struct octeon_device *oct)
fallthrough;
case OCT_DEV_PCI_ENABLE_DONE:
- pci_clear_master(oct->pci_dev);
/* Disable the device, releasing the PCI INT */
pci_disable_device(oct->pci_dev);
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 8e59c2825533..32f854c0cd79 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -40,15 +40,6 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
-static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
-{
- struct octeon_instr_queue *iq =
- (struct octeon_instr_queue *)oct->instr_queue[iq_no];
- return iq->iqcmd_64B;
-}
-
-#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
-
/* Define this to return the request status comaptible to old code */
/*#define OCTEON_USE_OLD_REQ_STATUS*/
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 62dfbdd33365..efa7f401529e 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -166,11 +166,6 @@ static u8 flit_desc_map[] = {
#endif
};
-static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
-{
- return container_of(q, struct sge_qset, fl[qidx]);
-}
-
static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
{
return container_of(q, struct sge_qset, rspq);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 7db2403c4c9c..f0bc7396ce2b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -51,7 +51,6 @@
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
@@ -6687,7 +6686,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_adapter;
}
- pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
pci_save_state(pdev);
adap_idx++;
@@ -7092,7 +7090,6 @@ fw_attach_fail:
out_unmap_bar0:
iounmap(regs);
out_disable_device:
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
out_release_regions:
pci_release_regions(pdev);
@@ -7171,7 +7168,6 @@ static void remove_one(struct pci_dev *pdev)
}
#endif
iounmap(adapter->regs);
- pci_disable_pcie_error_reporting(pdev);
if ((adapter->flags & CXGB4_DEV_ENABLED)) {
pci_disable_device(pdev);
adapter->flags &= ~CXGB4_DEV_ENABLED;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 63b2bd084130..9ba0864592e8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -3258,7 +3258,6 @@ err_free_adapter:
err_release_regions:
pci_release_regions(pdev);
- pci_clear_master(pdev);
err_disable_device:
pci_disable_device(pdev);
@@ -3338,7 +3337,6 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
* Disable the device and release its PCI resources.
*/
pci_disable_device(pdev);
- pci_clear_master(pdev);
pci_release_regions(pdev);
}
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 46e3a05e9582..c2c5c589a5e3 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -558,7 +558,6 @@ err_unmap:
err_release_regions:
pci_release_regions(dev);
err_disable_dev:
- pci_clear_master(dev);
pci_disable_device(dev);
return err;
@@ -577,7 +576,6 @@ static void ec_bhf_remove(struct pci_dev *dev)
free_netdev(net_dev);
pci_release_regions(dev);
- pci_clear_master(dev);
pci_disable_device(dev);
}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 08ec84cd21c0..61adcebeef01 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -135,7 +135,8 @@ static int be_mcc_notify(struct be_adapter *adapter)
/* To check if valid bit is set, check the entire word as we don't know
* the endianness of the data (old entry is host endian while a new entry is
- * little endian) */
+ * little endian)
+ */
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
{
u32 flags;
@@ -248,7 +249,8 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
u8 opcode = 0, subsystem = 0;
/* Just swap the status to host endian; mcc tag is opaquely copied
- * from mcc_wrb */
+ * from mcc_wrb
+ */
be_dws_le_to_cpu(compl, 4);
base_status = base_status(compl->status);
@@ -657,8 +659,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
return 0;
}
-/*
- * Insert the mailbox address into the doorbell in two steps
+/* Insert the mailbox address into the doorbell in two steps
* Polls on the mbox doorbell till a command completion (or a timeout) occurs
*/
static int be_mbox_notify_wait(struct be_adapter *adapter)
@@ -802,7 +803,7 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
req_hdr->subsystem = subsystem;
req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
req_hdr->version = 0;
- fill_wrb_tags(wrb, (ulong) req_hdr);
+ fill_wrb_tags(wrb, (ulong)req_hdr);
wrb->payload_length = cmd_len;
if (mem) {
wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
@@ -832,8 +833,8 @@ static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
{
struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
- struct be_mcc_wrb *wrb
- = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
+ struct be_mcc_wrb *wrb = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
+
memset(wrb, 0, sizeof(*wrb));
return wrb;
}
@@ -896,7 +897,7 @@ static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
memcpy(dest_wrb, wrb, sizeof(*wrb));
if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
- fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
+ fill_wrb_tags(dest_wrb, (ulong)embedded_payload(wrb));
return dest_wrb;
}
@@ -1114,7 +1115,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
err:
mutex_unlock(&adapter->mcc_lock);
- if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
+ if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
return status;
@@ -1803,7 +1804,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
total_size = buf_len;
- get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
+ get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60 * 1024;
get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
get_fat_cmd.size,
&get_fat_cmd.dma, GFP_ATOMIC);
@@ -1813,7 +1814,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
mutex_lock(&adapter->mcc_lock);
while (total_size) {
- buf_size = min(total_size, (u32)60*1024);
+ buf_size = min(total_size, (u32)60 * 1024);
total_size -= buf_size;
wrb = wrb_from_mccq(adapter);
@@ -3362,7 +3363,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
req->pattern = cpu_to_le64(pattern);
req->byte_count = cpu_to_le32(byte_cnt);
for (i = 0; i < byte_cnt; i++) {
- req->snd_buff[i] = (u8)(pattern >> (j*8));
+ req->snd_buff[i] = (u8)(pattern >> (j * 8));
j++;
if (j > 7)
j = 0;
@@ -3846,7 +3847,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
req->hdr.domain = domain;
req->mac_count = mac_count;
if (mac_count)
- memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
+ memcpy(req->mac, mac_array, ETH_ALEN * mac_count);
status = be_mcc_notify_wait(adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 46fe3d74e2e9..aed1b622f51f 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -16,7 +16,6 @@
#include "be.h"
#include "be_cmds.h"
#include <asm/div64.h>
-#include <linux/aer.h>
#include <linux/if_bridge.h>
#include <net/busy_poll.h>
#include <net/vxlan.h>
@@ -5726,8 +5725,6 @@ static void be_remove(struct pci_dev *pdev)
be_unmap_pci_bars(adapter);
be_drv_cleanup(adapter);
- pci_disable_pcie_error_reporting(pdev);
-
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -5845,10 +5842,6 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
goto free_netdev;
}
- status = pci_enable_pcie_error_reporting(pdev);
- if (!status)
- dev_info(&pdev->dev, "PCIe error reporting enabled\n");
-
status = be_map_pci_bars(adapter);
if (status)
goto free_netdev;
@@ -5893,7 +5886,6 @@ drv_cleanup:
unmap_bars:
be_unmap_pci_bars(adapter);
free_netdev:
- pci_disable_pcie_error_reporting(pdev);
free_netdev(netdev);
rel_reg:
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 6982aaa928b5..ed1b6102cfeb 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -246,7 +246,6 @@ static void tsnep_phy_close(struct tsnep_adapter *adapter)
{
phy_stop(adapter->netdev->phydev);
phy_disconnect(adapter->netdev->phydev);
- adapter->netdev->phydev = NULL;
}
static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index c886f33f8c6f..b1871e6c4006 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -159,7 +159,8 @@ static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
struct dpmac_link_state *dpmac_state = &mac->state;
int err;
- if (state->an_enabled)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ state->advertising))
dpmac_state->options |= DPMAC_LINK_OPT_AUTONEG;
else
dpmac_state->options &= ~DPMAC_LINK_OPT_AUTONEG;
diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.c b/drivers/net/ethernet/fungible/funcore/fun_dev.c
index fb5120d90f26..a7fbd4cd560a 100644
--- a/drivers/net/ethernet/fungible/funcore/fun_dev.c
+++ b/drivers/net/ethernet/fungible/funcore/fun_dev.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
-#include <linux/aer.h>
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -747,8 +746,6 @@ void fun_dev_disable(struct fun_dev *fdev)
bitmap_free(fdev->irq_map);
pci_free_irq_vectors(pdev);
- pci_clear_master(pdev);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
fun_unmap_bars(fdev);
@@ -781,8 +778,6 @@ int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev,
goto unmap;
}
- pci_enable_pcie_error_reporting(pdev);
-
rc = sanitize_dev(fdev);
if (rc)
goto disable_dev;
@@ -825,12 +820,10 @@ int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev,
disable_admin:
fun_disable_admin_queue(fdev);
free_irq_mgr:
- pci_clear_master(pdev);
bitmap_free(fdev->irq_map);
free_irqs:
pci_free_irq_vectors(pdev);
disable_dev:
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
unmap:
fun_unmap_bars(fdev);
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 64eb0442c82f..e214b51d3c8b 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -47,6 +47,10 @@
#define GVE_RX_BUFFER_SIZE_DQO 2048
+#define GVE_XDP_ACTIONS 5
+
+#define GVE_TX_MAX_HEADER_SIZE 182
+
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
@@ -230,7 +234,10 @@ struct gve_rx_ring {
u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
-
+ u64 xdp_tx_errors;
+ u64 xdp_redirect_errors;
+ u64 xdp_alloc_fails;
+ u64 xdp_actions[GVE_XDP_ACTIONS];
u32 q_num; /* queue index */
u32 ntfy_id; /* notification block index */
struct gve_queue_resources *q_resources; /* head and tail pointer idx */
@@ -238,6 +245,12 @@ struct gve_rx_ring {
struct u64_stats_sync statss; /* sync stats for 32bit archs */
struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
+
+ /* XDP stuff */
+ struct xdp_rxq_info xdp_rxq;
+ struct xdp_rxq_info xsk_rxq;
+ struct xsk_buff_pool *xsk_pool;
+ struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
};
/* A TX desc ring entry */
@@ -258,7 +271,14 @@ struct gve_tx_iovec {
* ring entry but only used for a pkt_desc not a seg_desc
*/
struct gve_tx_buffer_state {
- struct sk_buff *skb; /* skb for this pkt */
+ union {
+ struct sk_buff *skb; /* skb for this pkt */
+ struct xdp_frame *xdp_frame; /* xdp_frame */
+ };
+ struct {
+ u16 size; /* size of xmitted xdp pkt */
+ u8 is_xsk; /* xsk buff */
+ } xdp;
union {
struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
struct {
@@ -373,6 +393,8 @@ struct gve_tx_ring {
struct {
/* Spinlock for when cleanup in progress */
spinlock_t clean_lock;
+ /* Spinlock for XDP tx traffic */
+ spinlock_t xdp_lock;
};
/* DQO fields. */
@@ -450,6 +472,12 @@ struct gve_tx_ring {
dma_addr_t q_resources_bus; /* dma address of the queue resources */
dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
struct u64_stats_sync statss; /* sync stats for 32bit archs */
+ struct xsk_buff_pool *xsk_pool;
+ u32 xdp_xsk_wakeup;
+ u32 xdp_xsk_done;
+ u64 xdp_xsk_sent;
+ u64 xdp_xmit;
+ u64 xdp_xmit_errors;
} ____cacheline_aligned;
/* Wraps the info for one irq including the napi struct and the queues
@@ -526,9 +554,11 @@ struct gve_priv {
u16 rx_data_slot_cnt; /* rx buffer length */
u64 max_registered_pages;
u64 num_registered_pages; /* num pages registered with NIC */
+ struct bpf_prog *xdp_prog; /* XDP BPF program */
u32 rx_copybreak; /* copy packets smaller than this */
u16 default_num_queues; /* default num queues to set up */
+ u16 num_xdp_queues;
struct gve_queue_config tx_cfg;
struct gve_queue_config rx_cfg;
struct gve_qpl_config qpl_cfg; /* map used QPL ids */
@@ -785,7 +815,17 @@ static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
if (priv->queue_format != GVE_GQI_QPL_FORMAT)
return 0;
- return priv->tx_cfg.num_queues;
+ return priv->tx_cfg.num_queues + priv->num_xdp_queues;
+}
+
+/* Returns the number of XDP tx queue page lists
+ */
+static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
+{
+ if (priv->queue_format != GVE_GQI_QPL_FORMAT)
+ return 0;
+
+ return priv->num_xdp_queues;
}
/* Returns the number of rx queue page lists
@@ -798,16 +838,35 @@ static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
return priv->rx_cfg.num_queues;
}
+static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
+{
+ return tx_qid;
+}
+
+static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
+{
+ return priv->tx_cfg.max_queues + rx_qid;
+}
+
+static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
+{
+ return gve_tx_qpl_id(priv, 0);
+}
+
+static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv)
+{
+ return gve_rx_qpl_id(priv, 0);
+}
+
/* Returns a pointer to the next available tx qpl in the list of qpls
*/
static inline
-struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
+struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid)
{
- int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
- priv->qpl_cfg.qpl_map_size);
+ int id = gve_tx_qpl_id(priv, tx_qid);
- /* we are out of tx qpls */
- if (id >= gve_num_tx_qpls(priv))
+ /* QPL already in use */
+ if (test_bit(id, priv->qpl_cfg.qpl_id_map))
return NULL;
set_bit(id, priv->qpl_cfg.qpl_id_map);
@@ -817,14 +876,12 @@ struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
/* Returns a pointer to the next available rx qpl in the list of qpls
*/
static inline
-struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
+struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid)
{
- int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
- priv->qpl_cfg.qpl_map_size,
- gve_num_tx_qpls(priv));
+ int id = gve_rx_qpl_id(priv, rx_qid);
- /* we are out of rx qpls */
- if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
+ /* QPL already in use */
+ if (test_bit(id, priv->qpl_cfg.qpl_id_map))
return NULL;
set_bit(id, priv->qpl_cfg.qpl_id_map);
@@ -843,7 +900,7 @@ static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
int id)
{
- if (id < gve_num_tx_qpls(priv))
+ if (id < gve_rx_start_qpl_id(priv))
return DMA_TO_DEVICE;
else
return DMA_FROM_DEVICE;
@@ -855,6 +912,21 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
priv->queue_format == GVE_GQI_QPL_FORMAT;
}
+static inline u32 gve_num_tx_queues(struct gve_priv *priv)
+{
+ return priv->tx_cfg.num_queues + priv->num_xdp_queues;
+}
+
+static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
+{
+ return priv->tx_cfg.num_queues + queue_id;
+}
+
+static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
+{
+ return gve_xdp_tx_queue_id(priv, 0);
+}
+
/* buffers */
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
@@ -863,9 +935,15 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
/* tx handling */
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
+int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
+ void *data, int len, void *frame_p);
+void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
-int gve_tx_alloc_rings(struct gve_priv *priv);
-void gve_tx_free_rings_gqi(struct gve_priv *priv);
+bool gve_xdp_poll(struct gve_notify_block *block, int budget);
+int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings);
+void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings);
u32 gve_tx_load_event_counter(struct gve_priv *priv,
struct gve_tx_ring *tx);
bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 60061288ad9d..252974202a3f 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -516,12 +516,12 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
return gve_adminq_issue_cmd(priv, &cmd);
}
-int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues)
+int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
{
int err;
int i;
- for (i = 0; i < num_queues; i++) {
+ for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_create_tx_queue(priv, i);
if (err)
return err;
@@ -604,12 +604,12 @@ static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
return 0;
}
-int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 num_queues)
+int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
{
int err;
int i;
- for (i = 0; i < num_queues; i++) {
+ for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_destroy_tx_queue(priv, i);
if (err)
return err;
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index cf29662e6ad1..f894beb3deaf 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -410,8 +410,8 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
dma_addr_t db_array_bus_addr,
u32 num_ntfy_blks);
int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
-int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues);
-int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 queue_id);
+int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
+int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 5f81470843b4..cfd4b8d284d1 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -34,6 +34,11 @@ static u32 gve_get_msglevel(struct net_device *netdev)
return priv->msg_enable;
}
+/* For the following stats column string names, make sure the order
+ * matches how it is filled in the code. For xdp_aborted, xdp_drop,
+ * xdp_pass, xdp_tx, xdp_redirect, make sure it also matches the order
+ * as declared in enum xdp_action inside file uapi/linux/bpf.h .
+ */
static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
"rx_dropped", "tx_dropped", "tx_timeouts",
@@ -49,12 +54,16 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
+ "rx_xdp_aborted[%u]", "rx_xdp_drop[%u]", "rx_xdp_pass[%u]",
+ "rx_xdp_tx[%u]", "rx_xdp_redirect[%u]",
+ "rx_xdp_tx_errors[%u]", "rx_xdp_redirect_errors[%u]", "rx_xdp_alloc_fails[%u]",
};
static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
"tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
- "tx_dma_mapping_error[%u]",
+ "tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]",
+ "tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
};
static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
@@ -81,8 +90,10 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct gve_priv *priv = netdev_priv(netdev);
char *s = (char *)data;
+ int num_tx_queues;
int i, j;
+ num_tx_queues = gve_num_tx_queues(priv);
switch (stringset) {
case ETH_SS_STATS:
memcpy(s, *gve_gstrings_main_stats,
@@ -97,7 +108,7 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
}
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ for (i = 0; i < num_tx_queues; i++) {
for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
snprintf(s, ETH_GSTRING_LEN,
gve_gstrings_tx_stats[j], i);
@@ -124,12 +135,14 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
static int gve_get_sset_count(struct net_device *netdev, int sset)
{
struct gve_priv *priv = netdev_priv(netdev);
+ int num_tx_queues;
+ num_tx_queues = gve_num_tx_queues(priv);
switch (sset) {
case ETH_SS_STATS:
return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
(priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
- (priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS);
+ (num_tx_queues * NUM_GVE_TX_CNTS);
case ETH_SS_PRIV_FLAGS:
return GVE_PRIV_FLAGS_STR_LEN;
default:
@@ -153,18 +166,20 @@ gve_get_ethtool_stats(struct net_device *netdev,
struct gve_priv *priv;
bool skip_nic_stats;
unsigned int start;
+ int num_tx_queues;
int ring;
int i, j;
ASSERT_RTNL();
priv = netdev_priv(netdev);
+ num_tx_queues = gve_num_tx_queues(priv);
report_stats = priv->stats_report->stats;
rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues,
sizeof(int), GFP_KERNEL);
if (!rx_qid_to_stats_idx)
return;
- tx_qid_to_stats_idx = kmalloc_array(priv->tx_cfg.num_queues,
+ tx_qid_to_stats_idx = kmalloc_array(num_tx_queues,
sizeof(int), GFP_KERNEL);
if (!tx_qid_to_stats_idx) {
kfree(rx_qid_to_stats_idx);
@@ -195,7 +210,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
}
}
for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
- ring < priv->tx_cfg.num_queues; ring++) {
+ ring < num_tx_queues; ring++) {
if (priv->tx) {
do {
start =
@@ -232,7 +247,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
i = GVE_MAIN_STATS_LEN;
/* For rx cross-reporting stats, start from nic rx stats in report */
- base_stats_idx = GVE_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
+ base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
base_stats_idx;
@@ -283,14 +298,26 @@ gve_get_ethtool_stats(struct net_device *netdev,
if (skip_nic_stats) {
/* skip NIC rx stats */
i += NIC_RX_STATS_REPORT_NUM;
- continue;
- }
- for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
- u64 value =
- be64_to_cpu(report_stats[rx_qid_to_stats_idx[ring] + j].value);
+ } else {
+ stats_idx = rx_qid_to_stats_idx[ring];
+ for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
+ u64 value =
+ be64_to_cpu(report_stats[stats_idx + j].value);
- data[i++] = value;
+ data[i++] = value;
+ }
}
+ /* XDP rx counters */
+ do {
+ start = u64_stats_fetch_begin(&priv->rx[ring].statss);
+ for (j = 0; j < GVE_XDP_ACTIONS; j++)
+ data[i + j] = rx->xdp_actions[j];
+ data[i + j++] = rx->xdp_tx_errors;
+ data[i + j++] = rx->xdp_redirect_errors;
+ data[i + j++] = rx->xdp_alloc_fails;
+ } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ start));
+ i += GVE_XDP_ACTIONS + 3; /* XDP rx counters */
}
} else {
i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
@@ -298,7 +325,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
/* For tx cross-reporting stats, start from nic tx stats in report */
base_stats_idx = max_stats_idx;
- max_stats_idx = NIC_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
+ max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues +
max_stats_idx;
/* Preprocess the stats report for tx, map queue id to start index */
skip_nic_stats = false;
@@ -316,7 +343,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
}
/* walk TX rings */
if (priv->tx) {
- for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
+ for (ring = 0; ring < num_tx_queues; ring++) {
struct gve_tx_ring *tx = &priv->tx[ring];
if (gve_is_gqi(priv)) {
@@ -346,16 +373,28 @@ gve_get_ethtool_stats(struct net_device *netdev,
if (skip_nic_stats) {
/* skip NIC tx stats */
i += NIC_TX_STATS_REPORT_NUM;
- continue;
- }
- for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
- u64 value =
- be64_to_cpu(report_stats[tx_qid_to_stats_idx[ring] + j].value);
- data[i++] = value;
+ } else {
+ stats_idx = tx_qid_to_stats_idx[ring];
+ for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
+ u64 value =
+ be64_to_cpu(report_stats[stats_idx + j].value);
+ data[i++] = value;
+ }
}
+ /* XDP xsk counters */
+ data[i++] = tx->xdp_xsk_wakeup;
+ data[i++] = tx->xdp_xsk_done;
+ do {
+ start = u64_stats_fetch_begin(&priv->tx[ring].statss);
+ data[i] = tx->xdp_xsk_sent;
+ data[i + 1] = tx->xdp_xmit;
+ data[i + 2] = tx->xdp_xmit_errors;
+ } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ start));
+ i += 3; /* XDP tx counters */
}
} else {
- i += priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS;
+ i += num_tx_queues * NUM_GVE_TX_CNTS;
}
kfree(rx_qid_to_stats_idx);
@@ -412,6 +451,12 @@ static int gve_set_channels(struct net_device *netdev,
if (!new_rx || !new_tx)
return -EINVAL;
+ if (priv->num_xdp_queues &&
+ (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) {
+ dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
+ return -EINVAL;
+ }
+
if (!netif_carrier_ok(netdev)) {
priv->tx_cfg.num_queues = new_tx;
priv->rx_cfg.num_queues = new_rx;
@@ -502,7 +547,9 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
{
struct gve_priv *priv = netdev_priv(netdev);
u64 ori_flags, new_flags;
+ int num_tx_queues;
+ num_tx_queues = gve_num_tx_queues(priv);
ori_flags = READ_ONCE(priv->ethtool_flags);
new_flags = ori_flags;
@@ -522,7 +569,7 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
/* delete report stats timer. */
if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
- priv->tx_cfg.num_queues;
+ num_tx_queues;
int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
priv->rx_cfg.num_queues;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 07111c241e0e..57ce74315eba 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -4,8 +4,10 @@
* Copyright (C) 2015-2021 Google, Inc.
*/
+#include <linux/bpf.h>
#include <linux/cpumask.h>
#include <linux/etherdevice.h>
+#include <linux/filter.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -15,6 +17,7 @@
#include <linux/utsname.h>
#include <linux/version.h>
#include <net/sch_generic.h>
+#include <net/xdp_sock_drv.h>
#include "gve.h"
#include "gve_dqo.h"
#include "gve_adminq.h"
@@ -90,8 +93,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
struct gve_priv *priv = netdev_priv(dev);
unsigned int start;
u64 packets, bytes;
+ int num_tx_queues;
int ring;
+ num_tx_queues = gve_num_tx_queues(priv);
if (priv->rx) {
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
do {
@@ -106,7 +111,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
}
}
if (priv->tx) {
- for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
+ for (ring = 0; ring < num_tx_queues; ring++) {
do {
start =
u64_stats_fetch_begin(&priv->tx[ring].statss);
@@ -180,7 +185,7 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
int tx_stats_num, rx_stats_num;
tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
- priv->tx_cfg.num_queues;
+ gve_num_tx_queues(priv);
rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
priv->rx_cfg.num_queues;
priv->stats_report_len = struct_size(priv->stats_report, stats,
@@ -245,8 +250,13 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
block = container_of(napi, struct gve_notify_block, napi);
priv = block->priv;
- if (block->tx)
- reschedule |= gve_tx_poll(block, budget);
+ if (block->tx) {
+ if (block->tx->q_num < priv->tx_cfg.num_queues)
+ reschedule |= gve_tx_poll(block, budget);
+ else
+ reschedule |= gve_xdp_poll(block, budget);
+ }
+
if (block->rx) {
work_done = gve_rx_poll(block, budget);
reschedule |= work_done == budget;
@@ -580,13 +590,36 @@ static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
netif_napi_del(&block->napi);
}
+static int gve_register_xdp_qpls(struct gve_priv *priv)
+{
+ int start_id;
+ int err;
+ int i;
+
+ start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
+ for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
+ err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to register queue page list %d\n",
+ priv->qpls[i].id);
+ /* This failure will trigger a reset - no need to clean
+ * up
+ */
+ return err;
+ }
+ }
+ return 0;
+}
+
static int gve_register_qpls(struct gve_priv *priv)
{
- int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
+ int start_id;
int err;
int i;
- for (i = 0; i < num_qpls; i++) {
+ start_id = gve_tx_start_qpl_id(priv);
+ for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -598,16 +631,63 @@ static int gve_register_qpls(struct gve_priv *priv)
return err;
}
}
+
+ start_id = gve_rx_start_qpl_id(priv);
+ for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
+ err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to register queue page list %d\n",
+ priv->qpls[i].id);
+ /* This failure will trigger a reset - no need to clean
+ * up
+ */
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int gve_unregister_xdp_qpls(struct gve_priv *priv)
+{
+ int start_id;
+ int err;
+ int i;
+
+ start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
+ for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
+ err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
+ /* This failure will trigger a reset - no need to clean up */
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Failed to unregister queue page list %d\n",
+ priv->qpls[i].id);
+ return err;
+ }
+ }
return 0;
}
static int gve_unregister_qpls(struct gve_priv *priv)
{
- int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
+ int start_id;
int err;
int i;
- for (i = 0; i < num_qpls; i++) {
+ start_id = gve_tx_start_qpl_id(priv);
+ for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
+ err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
+ /* This failure will trigger a reset - no need to clean up */
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Failed to unregister queue page list %d\n",
+ priv->qpls[i].id);
+ return err;
+ }
+ }
+
+ start_id = gve_rx_start_qpl_id(priv);
+ for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
/* This failure will trigger a reset - no need to clean up */
if (err) {
@@ -620,22 +700,44 @@ static int gve_unregister_qpls(struct gve_priv *priv)
return 0;
}
+static int gve_create_xdp_rings(struct gve_priv *priv)
+{
+ int err;
+
+ err = gve_adminq_create_tx_queues(priv,
+ gve_xdp_tx_start_queue_id(priv),
+ priv->num_xdp_queues);
+ if (err) {
+ netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n",
+ priv->num_xdp_queues);
+ /* This failure will trigger a reset - no need to clean
+ * up
+ */
+ return err;
+ }
+ netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n",
+ priv->num_xdp_queues);
+
+ return 0;
+}
+
static int gve_create_rings(struct gve_priv *priv)
{
+ int num_tx_queues = gve_num_tx_queues(priv);
int err;
int i;
- err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
+ err = gve_adminq_create_tx_queues(priv, 0, num_tx_queues);
if (err) {
netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
- priv->tx_cfg.num_queues);
+ num_tx_queues);
/* This failure will trigger a reset - no need to clean
* up
*/
return err;
}
netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
- priv->tx_cfg.num_queues);
+ num_tx_queues);
err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
if (err) {
@@ -668,6 +770,23 @@ static int gve_create_rings(struct gve_priv *priv)
return 0;
}
+static void add_napi_init_xdp_sync_stats(struct gve_priv *priv,
+ int (*napi_poll)(struct napi_struct *napi,
+ int budget))
+{
+ int start_id = gve_xdp_tx_start_queue_id(priv);
+ int i;
+
+ /* Add xdp tx napi & init sync stats*/
+ for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
+
+ u64_stats_init(&priv->tx[i].statss);
+ priv->tx[i].ntfy_id = ntfy_idx;
+ gve_add_napi(priv, ntfy_idx, napi_poll);
+ }
+}
+
static void add_napi_init_sync_stats(struct gve_priv *priv,
int (*napi_poll)(struct napi_struct *napi,
int budget))
@@ -675,7 +794,7 @@ static void add_napi_init_sync_stats(struct gve_priv *priv,
int i;
/* Add tx napi & init sync stats*/
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ for (i = 0; i < gve_num_tx_queues(priv); i++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
u64_stats_init(&priv->tx[i].statss);
@@ -692,34 +811,51 @@ static void add_napi_init_sync_stats(struct gve_priv *priv,
}
}
-static void gve_tx_free_rings(struct gve_priv *priv)
+static void gve_tx_free_rings(struct gve_priv *priv, int start_id, int num_rings)
{
if (gve_is_gqi(priv)) {
- gve_tx_free_rings_gqi(priv);
+ gve_tx_free_rings_gqi(priv, start_id, num_rings);
} else {
gve_tx_free_rings_dqo(priv);
}
}
+static int gve_alloc_xdp_rings(struct gve_priv *priv)
+{
+ int start_id;
+ int err = 0;
+
+ if (!priv->num_xdp_queues)
+ return 0;
+
+ start_id = gve_xdp_tx_start_queue_id(priv);
+ err = gve_tx_alloc_rings(priv, start_id, priv->num_xdp_queues);
+ if (err)
+ return err;
+ add_napi_init_xdp_sync_stats(priv, gve_napi_poll);
+
+ return 0;
+}
+
static int gve_alloc_rings(struct gve_priv *priv)
{
int err;
/* Setup tx rings */
- priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx),
+ priv->tx = kvcalloc(priv->tx_cfg.max_queues, sizeof(*priv->tx),
GFP_KERNEL);
if (!priv->tx)
return -ENOMEM;
if (gve_is_gqi(priv))
- err = gve_tx_alloc_rings(priv);
+ err = gve_tx_alloc_rings(priv, 0, gve_num_tx_queues(priv));
else
err = gve_tx_alloc_rings_dqo(priv);
if (err)
goto free_tx;
/* Setup rx rings */
- priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx),
+ priv->rx = kvcalloc(priv->rx_cfg.max_queues, sizeof(*priv->rx),
GFP_KERNEL);
if (!priv->rx) {
err = -ENOMEM;
@@ -744,18 +880,39 @@ free_rx:
kvfree(priv->rx);
priv->rx = NULL;
free_tx_queue:
- gve_tx_free_rings(priv);
+ gve_tx_free_rings(priv, 0, gve_num_tx_queues(priv));
free_tx:
kvfree(priv->tx);
priv->tx = NULL;
return err;
}
+static int gve_destroy_xdp_rings(struct gve_priv *priv)
+{
+ int start_id;
+ int err;
+
+ start_id = gve_xdp_tx_start_queue_id(priv);
+ err = gve_adminq_destroy_tx_queues(priv,
+ start_id,
+ priv->num_xdp_queues);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to destroy XDP queues\n");
+ /* This failure will trigger a reset - no need to clean up */
+ return err;
+ }
+ netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n");
+
+ return 0;
+}
+
static int gve_destroy_rings(struct gve_priv *priv)
{
+ int num_tx_queues = gve_num_tx_queues(priv);
int err;
- err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
+ err = gve_adminq_destroy_tx_queues(priv, 0, num_tx_queues);
if (err) {
netif_err(priv, drv, priv->dev,
"failed to destroy tx queues\n");
@@ -782,17 +939,33 @@ static void gve_rx_free_rings(struct gve_priv *priv)
gve_rx_free_rings_dqo(priv);
}
+static void gve_free_xdp_rings(struct gve_priv *priv)
+{
+ int ntfy_idx, start_id;
+ int i;
+
+ start_id = gve_xdp_tx_start_queue_id(priv);
+ if (priv->tx) {
+ for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
+ ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
+ gve_remove_napi(priv, ntfy_idx);
+ }
+ gve_tx_free_rings(priv, start_id, priv->num_xdp_queues);
+ }
+}
+
static void gve_free_rings(struct gve_priv *priv)
{
+ int num_tx_queues = gve_num_tx_queues(priv);
int ntfy_idx;
int i;
if (priv->tx) {
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ for (i = 0; i < num_tx_queues; i++) {
ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
gve_remove_napi(priv, ntfy_idx);
}
- gve_tx_free_rings(priv);
+ gve_tx_free_rings(priv, 0, num_tx_queues);
kvfree(priv->tx);
priv->tx = NULL;
}
@@ -889,40 +1062,68 @@ static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
kvfree(qpl->page_buses);
+ qpl->page_buses = NULL;
free_pages:
kvfree(qpl->pages);
+ qpl->pages = NULL;
priv->num_registered_pages -= qpl->num_entries;
}
+static int gve_alloc_xdp_qpls(struct gve_priv *priv)
+{
+ int start_id;
+ int i, j;
+ int err;
+
+ start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
+ for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
+ err = gve_alloc_queue_page_list(priv, i,
+ priv->tx_pages_per_qpl);
+ if (err)
+ goto free_qpls;
+ }
+
+ return 0;
+
+free_qpls:
+ for (j = start_id; j <= i; j++)
+ gve_free_queue_page_list(priv, j);
+ return err;
+}
+
static int gve_alloc_qpls(struct gve_priv *priv)
{
- int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
+ int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
+ int start_id;
int i, j;
int err;
- if (num_qpls == 0)
+ if (priv->queue_format != GVE_GQI_QPL_FORMAT)
return 0;
- priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL);
+ priv->qpls = kvcalloc(max_queues, sizeof(*priv->qpls), GFP_KERNEL);
if (!priv->qpls)
return -ENOMEM;
- for (i = 0; i < gve_num_tx_qpls(priv); i++) {
+ start_id = gve_tx_start_qpl_id(priv);
+ for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
err = gve_alloc_queue_page_list(priv, i,
priv->tx_pages_per_qpl);
if (err)
goto free_qpls;
}
- for (; i < num_qpls; i++) {
+
+ start_id = gve_rx_start_qpl_id(priv);
+ for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
err = gve_alloc_queue_page_list(priv, i,
priv->rx_data_slot_cnt);
if (err)
goto free_qpls;
}
- priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
+ priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(max_queues) *
sizeof(unsigned long) * BITS_PER_BYTE;
- priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls),
+ priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
sizeof(unsigned long), GFP_KERNEL);
if (!priv->qpl_cfg.qpl_id_map) {
err = -ENOMEM;
@@ -935,23 +1136,36 @@ free_qpls:
for (j = 0; j <= i; j++)
gve_free_queue_page_list(priv, j);
kvfree(priv->qpls);
+ priv->qpls = NULL;
return err;
}
+static void gve_free_xdp_qpls(struct gve_priv *priv)
+{
+ int start_id;
+ int i;
+
+ start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
+ for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++)
+ gve_free_queue_page_list(priv, i);
+}
+
static void gve_free_qpls(struct gve_priv *priv)
{
- int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
+ int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
int i;
- if (num_qpls == 0)
+ if (!priv->qpls)
return;
kvfree(priv->qpl_cfg.qpl_id_map);
+ priv->qpl_cfg.qpl_id_map = NULL;
- for (i = 0; i < num_qpls; i++)
+ for (i = 0; i < max_queues; i++)
gve_free_queue_page_list(priv, i);
kvfree(priv->qpls);
+ priv->qpls = NULL;
}
/* Use this to schedule a reset when the device is capable of continuing
@@ -969,11 +1183,109 @@ static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
static void gve_turndown(struct gve_priv *priv);
static void gve_turnup(struct gve_priv *priv);
+static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
+{
+ struct napi_struct *napi;
+ struct gve_rx_ring *rx;
+ int err = 0;
+ int i, j;
+ u32 tx_qid;
+
+ if (!priv->num_xdp_queues)
+ return 0;
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ rx = &priv->rx[i];
+ napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
+
+ err = xdp_rxq_info_reg(&rx->xdp_rxq, dev, i,
+ napi->napi_id);
+ if (err)
+ goto err;
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL);
+ if (err)
+ goto err;
+ rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
+ if (rx->xsk_pool) {
+ err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, i,
+ napi->napi_id);
+ if (err)
+ goto err;
+ err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
+ MEM_TYPE_XSK_BUFF_POOL, NULL);
+ if (err)
+ goto err;
+ xsk_pool_set_rxq_info(rx->xsk_pool,
+ &rx->xsk_rxq);
+ }
+ }
+
+ for (i = 0; i < priv->num_xdp_queues; i++) {
+ tx_qid = gve_xdp_tx_queue_id(priv, i);
+ priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i);
+ }
+ return 0;
+
+err:
+ for (j = i; j >= 0; j--) {
+ rx = &priv->rx[j];
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg(&rx->xdp_rxq);
+ if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
+ xdp_rxq_info_unreg(&rx->xsk_rxq);
+ }
+ return err;
+}
+
+static void gve_unreg_xdp_info(struct gve_priv *priv)
+{
+ int i, tx_qid;
+
+ if (!priv->num_xdp_queues)
+ return;
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ struct gve_rx_ring *rx = &priv->rx[i];
+
+ xdp_rxq_info_unreg(&rx->xdp_rxq);
+ if (rx->xsk_pool) {
+ xdp_rxq_info_unreg(&rx->xsk_rxq);
+ rx->xsk_pool = NULL;
+ }
+ }
+
+ for (i = 0; i < priv->num_xdp_queues; i++) {
+ tx_qid = gve_xdp_tx_queue_id(priv, i);
+ priv->tx[tx_qid].xsk_pool = NULL;
+ }
+}
+
+static void gve_drain_page_cache(struct gve_priv *priv)
+{
+ struct page_frag_cache *nc;
+ int i;
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ nc = &priv->rx[i].page_cache;
+ if (nc->va) {
+ __page_frag_cache_drain(virt_to_page(nc->va),
+ nc->pagecnt_bias);
+ nc->va = NULL;
+ }
+ }
+}
+
static int gve_open(struct net_device *dev)
{
struct gve_priv *priv = netdev_priv(dev);
int err;
+ if (priv->xdp_prog)
+ priv->num_xdp_queues = priv->rx_cfg.num_queues;
+ else
+ priv->num_xdp_queues = 0;
+
err = gve_alloc_qpls(priv);
if (err)
return err;
@@ -989,6 +1301,10 @@ static int gve_open(struct net_device *dev)
if (err)
goto free_rings;
+ err = gve_reg_xdp_info(priv, dev);
+ if (err)
+ goto free_rings;
+
err = gve_register_qpls(priv);
if (err)
goto reset;
@@ -1043,6 +1359,7 @@ static int gve_close(struct net_device *dev)
netif_carrier_off(dev);
if (gve_get_device_rings_ok(priv)) {
gve_turndown(priv);
+ gve_drain_page_cache(priv);
err = gve_destroy_rings(priv);
if (err)
goto err;
@@ -1053,6 +1370,7 @@ static int gve_close(struct net_device *dev)
}
del_timer_sync(&priv->stats_report_timer);
+ gve_unreg_xdp_info(priv);
gve_free_rings(priv);
gve_free_qpls(priv);
priv->interface_down_cnt++;
@@ -1069,6 +1387,306 @@ err:
return gve_reset_recovery(priv, false);
}
+static int gve_remove_xdp_queues(struct gve_priv *priv)
+{
+ int err;
+
+ err = gve_destroy_xdp_rings(priv);
+ if (err)
+ return err;
+
+ err = gve_unregister_xdp_qpls(priv);
+ if (err)
+ return err;
+
+ gve_unreg_xdp_info(priv);
+ gve_free_xdp_rings(priv);
+ gve_free_xdp_qpls(priv);
+ priv->num_xdp_queues = 0;
+ return 0;
+}
+
+static int gve_add_xdp_queues(struct gve_priv *priv)
+{
+ int err;
+
+ priv->num_xdp_queues = priv->tx_cfg.num_queues;
+
+ err = gve_alloc_xdp_qpls(priv);
+ if (err)
+ goto err;
+
+ err = gve_alloc_xdp_rings(priv);
+ if (err)
+ goto free_xdp_qpls;
+
+ err = gve_reg_xdp_info(priv, priv->dev);
+ if (err)
+ goto free_xdp_rings;
+
+ err = gve_register_xdp_qpls(priv);
+ if (err)
+ goto free_xdp_rings;
+
+ err = gve_create_xdp_rings(priv);
+ if (err)
+ goto free_xdp_rings;
+
+ return 0;
+
+free_xdp_rings:
+ gve_free_xdp_rings(priv);
+free_xdp_qpls:
+ gve_free_xdp_qpls(priv);
+err:
+ priv->num_xdp_queues = 0;
+ return err;
+}
+
+static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
+{
+ if (!gve_get_napi_enabled(priv))
+ return;
+
+ if (link_status == netif_carrier_ok(priv->dev))
+ return;
+
+ if (link_status) {
+ netdev_info(priv->dev, "Device link is up.\n");
+ netif_carrier_on(priv->dev);
+ } else {
+ netdev_info(priv->dev, "Device link is down.\n");
+ netif_carrier_off(priv->dev);
+ }
+}
+
+static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct bpf_prog *old_prog;
+ int err = 0;
+ u32 status;
+
+ old_prog = READ_ONCE(priv->xdp_prog);
+ if (!netif_carrier_ok(priv->dev)) {
+ WRITE_ONCE(priv->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ return 0;
+ }
+
+ gve_turndown(priv);
+ if (!old_prog && prog) {
+ // Allocate XDP TX queues if an XDP program is
+ // being installed
+ err = gve_add_xdp_queues(priv);
+ if (err)
+ goto out;
+ } else if (old_prog && !prog) {
+ // Remove XDP TX queues if an XDP program is
+ // being uninstalled
+ err = gve_remove_xdp_queues(priv);
+ if (err)
+ goto out;
+ }
+ WRITE_ONCE(priv->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+out:
+ gve_turnup(priv);
+ status = ioread32be(&priv->reg_bar0->device_status);
+ gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
+ return err;
+}
+
+static int gve_xsk_pool_enable(struct net_device *dev,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct napi_struct *napi;
+ struct gve_rx_ring *rx;
+ int tx_qid;
+ int err;
+
+ if (qid >= priv->rx_cfg.num_queues) {
+ dev_err(&priv->pdev->dev, "xsk pool invalid qid %d", qid);
+ return -EINVAL;
+ }
+ if (xsk_pool_get_rx_frame_size(pool) <
+ priv->dev->max_mtu + sizeof(struct ethhdr)) {
+ dev_err(&priv->pdev->dev, "xsk pool frame_len too small");
+ return -EINVAL;
+ }
+
+ err = xsk_pool_dma_map(pool, &priv->pdev->dev,
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ if (err)
+ return err;
+
+ /* If XDP prog is not installed, return */
+ if (!priv->xdp_prog)
+ return 0;
+
+ rx = &priv->rx[qid];
+ napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
+ err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id);
+ if (err)
+ goto err;
+
+ err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
+ MEM_TYPE_XSK_BUFF_POOL, NULL);
+ if (err)
+ goto err;
+
+ xsk_pool_set_rxq_info(pool, &rx->xsk_rxq);
+ rx->xsk_pool = pool;
+
+ tx_qid = gve_xdp_tx_queue_id(priv, qid);
+ priv->tx[tx_qid].xsk_pool = pool;
+
+ return 0;
+err:
+ if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
+ xdp_rxq_info_unreg(&rx->xsk_rxq);
+
+ xsk_pool_dma_unmap(pool,
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ return err;
+}
+
+static int gve_xsk_pool_disable(struct net_device *dev,
+ u16 qid)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct napi_struct *napi_rx;
+ struct napi_struct *napi_tx;
+ struct xsk_buff_pool *pool;
+ int tx_qid;
+
+ pool = xsk_get_pool_from_qid(dev, qid);
+ if (!pool)
+ return -EINVAL;
+ if (qid >= priv->rx_cfg.num_queues)
+ return -EINVAL;
+
+ /* If XDP prog is not installed, unmap DMA and return */
+ if (!priv->xdp_prog)
+ goto done;
+
+ tx_qid = gve_xdp_tx_queue_id(priv, qid);
+ if (!netif_running(dev)) {
+ priv->rx[qid].xsk_pool = NULL;
+ xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
+ priv->tx[tx_qid].xsk_pool = NULL;
+ goto done;
+ }
+
+ napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
+ napi_disable(napi_rx); /* make sure current rx poll is done */
+
+ napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
+ napi_disable(napi_tx); /* make sure current tx poll is done */
+
+ priv->rx[qid].xsk_pool = NULL;
+ xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
+ priv->tx[tx_qid].xsk_pool = NULL;
+ smp_mb(); /* Make sure it is visible to the workers on datapath */
+
+ napi_enable(napi_rx);
+ if (gve_rx_work_pending(&priv->rx[qid]))
+ napi_schedule(napi_rx);
+
+ napi_enable(napi_tx);
+ if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
+ napi_schedule(napi_tx);
+
+done:
+ xsk_pool_dma_unmap(pool,
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ return 0;
+}
+
+static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
+
+ if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
+ return -EINVAL;
+
+ if (flags & XDP_WAKEUP_TX) {
+ struct gve_tx_ring *tx = &priv->tx[tx_queue_id];
+ struct napi_struct *napi =
+ &priv->ntfy_blocks[tx->ntfy_id].napi;
+
+ if (!napi_if_scheduled_mark_missed(napi)) {
+ /* Call local_bh_enable to trigger SoftIRQ processing */
+ local_bh_disable();
+ napi_schedule(napi);
+ local_bh_enable();
+ }
+
+ tx->xdp_xsk_wakeup++;
+ }
+
+ return 0;
+}
+
+static int verify_xdp_configuration(struct net_device *dev)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ if (dev->features & NETIF_F_LRO) {
+ netdev_warn(dev, "XDP is not supported when LRO is on.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->queue_format != GVE_GQI_QPL_FORMAT) {
+ netdev_warn(dev, "XDP is not supported in mode %d.\n",
+ priv->queue_format);
+ return -EOPNOTSUPP;
+ }
+
+ if (dev->mtu > (PAGE_SIZE / 2) - sizeof(struct ethhdr) - GVE_RX_PAD) {
+ netdev_warn(dev, "XDP is not supported for mtu %d.\n",
+ dev->mtu);
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues ||
+ (2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) {
+ netdev_warn(dev, "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d",
+ priv->rx_cfg.num_queues,
+ priv->tx_cfg.num_queues,
+ priv->tx_cfg.max_queues);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = verify_xdp_configuration(dev);
+ if (err)
+ return err;
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return gve_set_xdp(priv, xdp->prog, xdp->extack);
+ case XDP_SETUP_XSK_POOL:
+ if (xdp->xsk.pool)
+ return gve_xsk_pool_enable(dev, xdp->xsk.pool, xdp->xsk.queue_id);
+ else
+ return gve_xsk_pool_disable(dev, xdp->xsk.queue_id);
+ default:
+ return -EINVAL;
+ }
+}
+
int gve_adjust_queues(struct gve_priv *priv,
struct gve_queue_config new_rx_config,
struct gve_queue_config new_tx_config)
@@ -1118,7 +1736,7 @@ static void gve_turndown(struct gve_priv *priv)
return;
/* Disable napi to prevent more work from coming in */
- for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
+ for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
@@ -1146,7 +1764,7 @@ static void gve_turnup(struct gve_priv *priv)
netif_tx_start_all_queues(priv->dev);
/* Enable napi and unmask interrupts for all queues */
- for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
+ for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
@@ -1263,6 +1881,9 @@ static const struct net_device_ops gve_netdev_ops = {
.ndo_get_stats64 = gve_get_stats,
.ndo_tx_timeout = gve_tx_timeout,
.ndo_set_features = gve_set_features,
+ .ndo_bpf = gve_xdp,
+ .ndo_xdp_xmit = gve_xdp_xmit,
+ .ndo_xsk_wakeup = gve_xsk_wakeup,
};
static void gve_handle_status(struct gve_priv *priv, u32 status)
@@ -1306,7 +1927,7 @@ void gve_handle_report_stats(struct gve_priv *priv)
be64_add_cpu(&priv->stats_report->written_count, 1);
/* tx stats */
if (priv->tx) {
- for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
+ for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
u32 last_completion = 0;
u32 tx_frames = 0;
@@ -1369,23 +1990,6 @@ void gve_handle_report_stats(struct gve_priv *priv)
}
}
-static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
-{
- if (!gve_get_napi_enabled(priv))
- return;
-
- if (link_status == netif_carrier_ok(priv->dev))
- return;
-
- if (link_status) {
- netdev_info(priv->dev, "Device link is up.\n");
- netif_carrier_on(priv->dev);
- } else {
- netdev_info(priv->dev, "Device link is down.\n");
- netif_carrier_off(priv->dev);
- }
-}
-
/* Handle NIC status register changes, reset requests and report stats */
static void gve_service_task(struct work_struct *work)
{
@@ -1399,6 +2003,18 @@ static void gve_service_task(struct work_struct *work)
gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
}
+static void gve_set_netdev_xdp_features(struct gve_priv *priv)
+{
+ if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
+ priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC;
+ priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
+ priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
+ priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ } else {
+ priv->dev->xdp_features = 0;
+ }
+}
+
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
{
int num_ntfy;
@@ -1477,6 +2093,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
}
setup_device:
+ gve_set_netdev_xdp_features(priv);
err = gve_setup_device_resources(priv);
if (!err)
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 1f55137722b0..d1da7413dc4d 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -8,6 +8,9 @@
#include "gve_adminq.h"
#include "gve_utils.h"
#include <linux/etherdevice.h>
+#include <linux/filter.h>
+#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
static void gve_rx_free_buffer(struct device *dev,
struct gve_rx_slot_page_info *page_info,
@@ -124,7 +127,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
return -ENOMEM;
if (!rx->data.raw_addressing) {
- rx->data.qpl = gve_assign_rx_qpl(priv);
+ rx->data.qpl = gve_assign_rx_qpl(priv, rx->q_num);
if (!rx->data.qpl) {
kvfree(rx->data.page_info);
rx->data.page_info = NULL;
@@ -556,7 +559,7 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
if (len <= priv->rx_copybreak && is_only_frag) {
/* Just copy small packets */
- skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD);
+ skb = gve_rx_copy(netdev, napi, page_info, len);
if (skb) {
u64_stats_update_begin(&rx->statss);
rx->rx_copied_pkt++;
@@ -591,6 +594,107 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
return skb;
}
+static int gve_xsk_pool_redirect(struct net_device *dev,
+ struct gve_rx_ring *rx,
+ void *data, int len,
+ struct bpf_prog *xdp_prog)
+{
+ struct xdp_buff *xdp;
+ int err;
+
+ if (rx->xsk_pool->frame_len < len)
+ return -E2BIG;
+ xdp = xsk_buff_alloc(rx->xsk_pool);
+ if (!xdp) {
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_alloc_fails++;
+ u64_stats_update_end(&rx->statss);
+ return -ENOMEM;
+ }
+ xdp->data_end = xdp->data + len;
+ memcpy(xdp->data, data, len);
+ err = xdp_do_redirect(dev, xdp, xdp_prog);
+ if (err)
+ xsk_buff_free(xdp);
+ return err;
+}
+
+static int gve_xdp_redirect(struct net_device *dev, struct gve_rx_ring *rx,
+ struct xdp_buff *orig, struct bpf_prog *xdp_prog)
+{
+ int total_len, len = orig->data_end - orig->data;
+ int headroom = XDP_PACKET_HEADROOM;
+ struct xdp_buff new;
+ void *frame;
+ int err;
+
+ if (rx->xsk_pool)
+ return gve_xsk_pool_redirect(dev, rx, orig->data,
+ len, xdp_prog);
+
+ total_len = headroom + SKB_DATA_ALIGN(len) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ frame = page_frag_alloc(&rx->page_cache, total_len, GFP_ATOMIC);
+ if (!frame) {
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_alloc_fails++;
+ u64_stats_update_end(&rx->statss);
+ return -ENOMEM;
+ }
+ xdp_init_buff(&new, total_len, &rx->xdp_rxq);
+ xdp_prepare_buff(&new, frame, headroom, len, false);
+ memcpy(new.data, orig->data, len);
+
+ err = xdp_do_redirect(dev, &new, xdp_prog);
+ if (err)
+ page_frag_free(frame);
+
+ return err;
+}
+
+static void gve_xdp_done(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp, struct bpf_prog *xprog,
+ int xdp_act)
+{
+ struct gve_tx_ring *tx;
+ int tx_qid;
+ int err;
+
+ switch (xdp_act) {
+ case XDP_ABORTED:
+ case XDP_DROP:
+ default:
+ break;
+ case XDP_TX:
+ tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num);
+ tx = &priv->tx[tx_qid];
+ spin_lock(&tx->xdp_lock);
+ err = gve_xdp_xmit_one(priv, tx, xdp->data,
+ xdp->data_end - xdp->data, NULL);
+ spin_unlock(&tx->xdp_lock);
+
+ if (unlikely(err)) {
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_tx_errors++;
+ u64_stats_update_end(&rx->statss);
+ }
+ break;
+ case XDP_REDIRECT:
+ err = gve_xdp_redirect(priv->dev, rx, xdp, xprog);
+
+ if (unlikely(err)) {
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_redirect_errors++;
+ u64_stats_update_end(&rx->statss);
+ }
+ break;
+ }
+ u64_stats_update_begin(&rx->statss);
+ if ((u32)xdp_act < GVE_XDP_ACTIONS)
+ rx->xdp_actions[xdp_act]++;
+ u64_stats_update_end(&rx->statss);
+}
+
#define GVE_PKTCONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x))
static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
struct gve_rx_desc *desc, u32 idx,
@@ -603,9 +707,12 @@ static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
union gve_rx_data_slot *data_slot;
struct gve_priv *priv = rx->gve;
struct sk_buff *skb = NULL;
+ struct bpf_prog *xprog;
+ struct xdp_buff xdp;
dma_addr_t page_bus;
void *va;
+ u16 len = frag_size;
struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
bool is_first_frag = ctx->frag_cnt == 0;
@@ -645,9 +752,35 @@ static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
dma_sync_single_for_cpu(&priv->pdev->dev, page_bus,
PAGE_SIZE, DMA_FROM_DEVICE);
page_info->pad = is_first_frag ? GVE_RX_PAD : 0;
+ len -= page_info->pad;
frag_size -= page_info->pad;
- skb = gve_rx_skb(priv, rx, page_info, napi, frag_size,
+ xprog = READ_ONCE(priv->xdp_prog);
+ if (xprog && is_only_frag) {
+ void *old_data;
+ int xdp_act;
+
+ xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_info->page_address +
+ page_info->page_offset, GVE_RX_PAD,
+ len, false);
+ old_data = xdp.data;
+ xdp_act = bpf_prog_run_xdp(xprog, &xdp);
+ if (xdp_act != XDP_PASS) {
+ gve_xdp_done(priv, rx, &xdp, xprog, xdp_act);
+ ctx->total_size += frag_size;
+ goto finish_ok_pkt;
+ }
+
+ page_info->pad += xdp.data - old_data;
+ len = xdp.data_end - xdp.data;
+
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_actions[XDP_PASS]++;
+ u64_stats_update_end(&rx->statss);
+ }
+
+ skb = gve_rx_skb(priv, rx, page_info, napi, len,
data_slot, is_only_frag);
if (!skb) {
u64_stats_update_begin(&rx->statss);
@@ -773,6 +906,8 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
netdev_features_t feat)
{
+ u64 xdp_redirects = rx->xdp_actions[XDP_REDIRECT];
+ u64 xdp_txs = rx->xdp_actions[XDP_TX];
struct gve_rx_ctx *ctx = &rx->ctx;
struct gve_priv *priv = rx->gve;
struct gve_rx_cnts cnts = {0};
@@ -820,6 +955,12 @@ static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
u64_stats_update_end(&rx->statss);
}
+ if (xdp_txs != rx->xdp_actions[XDP_TX])
+ gve_xdp_tx_flush(priv, rx->q_num);
+
+ if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT])
+ xdp_do_flush();
+
/* restock ring slots */
if (!rx->data.raw_addressing) {
/* In QPL mode buffs are refilled as the desc are processed */
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 630f42a3037b..e57b73eb70f6 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -568,7 +568,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
if (eop && buf_len <= priv->rx_copybreak) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
- &buf_state->page_info, buf_len, 0);
+ &buf_state->page_info, buf_len);
if (unlikely(!rx->ctx.skb_head))
goto error;
rx->ctx.skb_tail = rx->ctx.skb_head;
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 4888bf05fbed..e50510b8e784 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -11,6 +11,7 @@
#include <linux/tcp.h>
#include <linux/vmalloc.h>
#include <linux/skbuff.h>
+#include <net/xdp_sock_drv.h>
static inline void gve_tx_put_doorbell(struct gve_priv *priv,
struct gve_queue_resources *q_resources,
@@ -19,6 +20,14 @@ static inline void gve_tx_put_doorbell(struct gve_priv *priv,
iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]);
}
+void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid)
+{
+ u32 tx_qid = gve_xdp_tx_queue_id(priv, xdp_qid);
+ struct gve_tx_ring *tx = &priv->tx[tx_qid];
+
+ gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
+}
+
/* gvnic can only transmit from a Registered Segment.
* We copy skb payloads into the registered segment before writing Tx
* descriptors and ringing the Tx doorbell.
@@ -132,6 +141,58 @@ static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes)
atomic_add(bytes, &fifo->available);
}
+static size_t gve_tx_clear_buffer_state(struct gve_tx_buffer_state *info)
+{
+ size_t space_freed = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(info->iov); i++) {
+ space_freed += info->iov[i].iov_len + info->iov[i].iov_padding;
+ info->iov[i].iov_len = 0;
+ info->iov[i].iov_padding = 0;
+ }
+ return space_freed;
+}
+
+static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
+ u32 to_do)
+{
+ struct gve_tx_buffer_state *info;
+ u32 clean_end = tx->done + to_do;
+ u64 pkts = 0, bytes = 0;
+ size_t space_freed = 0;
+ u32 xsk_complete = 0;
+ u32 idx;
+
+ for (; tx->done < clean_end; tx->done++) {
+ idx = tx->done & tx->mask;
+ info = &tx->info[idx];
+
+ if (unlikely(!info->xdp.size))
+ continue;
+
+ bytes += info->xdp.size;
+ pkts++;
+ xsk_complete += info->xdp.is_xsk;
+
+ info->xdp.size = 0;
+ if (info->xdp_frame) {
+ xdp_return_frame(info->xdp_frame);
+ info->xdp_frame = NULL;
+ }
+ space_freed += gve_tx_clear_buffer_state(info);
+ }
+
+ gve_tx_free_fifo(&tx->tx_fifo, space_freed);
+ if (xsk_complete > 0 && tx->xsk_pool)
+ xsk_tx_completed(tx->xsk_pool, xsk_complete);
+ u64_stats_update_begin(&tx->statss);
+ tx->bytes_done += bytes;
+ tx->pkt_done += pkts;
+ u64_stats_update_end(&tx->statss);
+ return pkts;
+}
+
static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 to_do, bool try_to_wake);
@@ -144,8 +205,12 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx)
gve_tx_remove_from_block(priv, idx);
slots = tx->mask + 1;
- gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
- netdev_tx_reset_queue(tx->netdev_txq);
+ if (tx->q_num < priv->tx_cfg.num_queues) {
+ gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
+ netdev_tx_reset_queue(tx->netdev_txq);
+ } else {
+ gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
+ }
dma_free_coherent(hdev, sizeof(*tx->q_resources),
tx->q_resources, tx->q_resources_bus);
@@ -177,6 +242,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
/* Make sure everything is zeroed to start */
memset(tx, 0, sizeof(*tx));
spin_lock_init(&tx->clean_lock);
+ spin_lock_init(&tx->xdp_lock);
tx->q_num = idx;
tx->mask = slots - 1;
@@ -195,7 +261,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
tx->dev = &priv->pdev->dev;
if (!tx->raw_addressing) {
- tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
+ tx->tx_fifo.qpl = gve_assign_tx_qpl(priv, idx);
if (!tx->tx_fifo.qpl)
goto abort_with_desc;
/* map Tx FIFO */
@@ -213,7 +279,8 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx,
(unsigned long)tx->bus);
- tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ if (idx < priv->tx_cfg.num_queues)
+ tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
gve_tx_add_to_block(priv, idx);
return 0;
@@ -233,12 +300,12 @@ abort_with_info:
return -ENOMEM;
}
-int gve_tx_alloc_rings(struct gve_priv *priv)
+int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings)
{
int err = 0;
int i;
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ for (i = start_id; i < start_id + num_rings; i++) {
err = gve_tx_alloc_ring(priv, i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -251,17 +318,17 @@ int gve_tx_alloc_rings(struct gve_priv *priv)
if (err) {
int j;
- for (j = 0; j < i; j++)
+ for (j = start_id; j < i; j++)
gve_tx_free_ring(priv, j);
}
return err;
}
-void gve_tx_free_rings_gqi(struct gve_priv *priv)
+void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings)
{
int i;
- for (i = 0; i < priv->tx_cfg.num_queues; i++)
+ for (i = start_id; i < start_id + num_rings; i++)
gve_tx_free_ring(priv, i);
}
@@ -374,18 +441,18 @@ static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
}
static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
- struct sk_buff *skb, bool is_gso,
+ u16 csum_offset, u8 ip_summed, bool is_gso,
int l4_hdr_offset, u32 desc_cnt,
- u16 hlen, u64 addr)
+ u16 hlen, u64 addr, u16 pkt_len)
{
/* l4_hdr_offset and csum_offset are in units of 16-bit words */
if (is_gso) {
pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM;
- pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
+ pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
- } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ } else if (likely(ip_summed == CHECKSUM_PARTIAL)) {
pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM;
- pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
+ pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
} else {
pkt_desc->pkt.type_flags = GVE_TXD_STD;
@@ -393,7 +460,7 @@ static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
pkt_desc->pkt.l4_hdr_offset = 0;
}
pkt_desc->pkt.desc_cnt = desc_cnt;
- pkt_desc->pkt.len = cpu_to_be16(skb->len);
+ pkt_desc->pkt.len = cpu_to_be16(pkt_len);
pkt_desc->pkt.seg_len = cpu_to_be16(hlen);
pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
}
@@ -412,15 +479,16 @@ static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc,
}
static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
- struct sk_buff *skb, bool is_gso,
+ u16 l3_offset, u16 gso_size,
+ bool is_gso_v6, bool is_gso,
u16 len, u64 addr)
{
seg_desc->seg.type_flags = GVE_TXD_SEG;
if (is_gso) {
- if (skb_is_gso_v6(skb))
+ if (is_gso_v6)
seg_desc->seg.type_flags |= GVE_TXSF_IPV6;
- seg_desc->seg.l3_offset = skb_network_offset(skb) >> 1;
- seg_desc->seg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ seg_desc->seg.l3_offset = l3_offset >> 1;
+ seg_desc->seg.mss = cpu_to_be16(gso_size);
}
seg_desc->seg.seg_len = cpu_to_be16(len);
seg_desc->seg.seg_addr = cpu_to_be64(addr);
@@ -473,9 +541,10 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen,
&info->iov[payload_iov]);
- gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
+ gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
+ is_gso, l4_hdr_offset,
1 + mtd_desc_nr + payload_nfrags, hlen,
- info->iov[hdr_nfrags - 1].iov_offset);
+ info->iov[hdr_nfrags - 1].iov_offset, skb->len);
skb_copy_bits(skb, 0,
tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
@@ -494,7 +563,9 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask;
seg_desc = &tx->desc[next_idx];
- gve_tx_fill_seg_desc(seg_desc, skb, is_gso,
+ gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
+ skb_shinfo(skb)->gso_size,
+ skb_is_gso_v6(skb), is_gso,
info->iov[i].iov_len,
info->iov[i].iov_offset);
@@ -552,8 +623,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
if (mtd_desc_nr)
num_descriptors++;
- gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
- num_descriptors, hlen, addr);
+ gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
+ is_gso, l4_hdr_offset,
+ num_descriptors, hlen, addr, skb->len);
if (mtd_desc_nr) {
idx = (idx + 1) & tx->mask;
@@ -569,7 +641,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
addr += hlen;
idx = (idx + 1) & tx->mask;
seg_desc = &tx->desc[idx];
- gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
+ gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
+ skb_shinfo(skb)->gso_size,
+ skb_is_gso_v6(skb), is_gso, len, addr);
}
for (i = 0; i < shinfo->nr_frags; i++) {
@@ -587,7 +661,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
dma_unmap_len_set(&tx->info[idx], len, len);
dma_unmap_addr_set(&tx->info[idx], dma, addr);
- gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
+ gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
+ skb_shinfo(skb)->gso_size,
+ skb_is_gso_v6(skb), is_gso, len, addr);
}
return num_descriptors;
@@ -648,6 +724,103 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
+ void *data, int len, void *frame_p, bool is_xsk)
+{
+ int pad, nfrags, ndescs, iovi, offset;
+ struct gve_tx_buffer_state *info;
+ u32 reqi = tx->req;
+
+ pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len);
+ if (pad >= GVE_TX_MAX_HEADER_SIZE)
+ pad = 0;
+ info = &tx->info[reqi & tx->mask];
+ info->xdp_frame = frame_p;
+ info->xdp.size = len;
+ info->xdp.is_xsk = is_xsk;
+
+ nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len,
+ &info->iov[0]);
+ iovi = pad > 0;
+ ndescs = nfrags - iovi;
+ offset = 0;
+
+ while (iovi < nfrags) {
+ if (!offset)
+ gve_tx_fill_pkt_desc(&tx->desc[reqi & tx->mask], 0,
+ CHECKSUM_NONE, false, 0, ndescs,
+ info->iov[iovi].iov_len,
+ info->iov[iovi].iov_offset, len);
+ else
+ gve_tx_fill_seg_desc(&tx->desc[reqi & tx->mask],
+ 0, 0, false, false,
+ info->iov[iovi].iov_len,
+ info->iov[iovi].iov_offset);
+
+ memcpy(tx->tx_fifo.base + info->iov[iovi].iov_offset,
+ data + offset, info->iov[iovi].iov_len);
+ gve_dma_sync_for_device(&priv->pdev->dev,
+ tx->tx_fifo.qpl->page_buses,
+ info->iov[iovi].iov_offset,
+ info->iov[iovi].iov_len);
+ offset += info->iov[iovi].iov_len;
+ iovi++;
+ reqi++;
+ }
+
+ return ndescs;
+}
+
+int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_tx_ring *tx;
+ int i, err = 0, qid;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ qid = gve_xdp_tx_queue_id(priv,
+ smp_processor_id() % priv->num_xdp_queues);
+
+ tx = &priv->tx[qid];
+
+ spin_lock(&tx->xdp_lock);
+ for (i = 0; i < n; i++) {
+ err = gve_xdp_xmit_one(priv, tx, frames[i]->data,
+ frames[i]->len, frames[i]);
+ if (err)
+ break;
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
+
+ spin_unlock(&tx->xdp_lock);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xmit += n;
+ tx->xdp_xmit_errors += n - i;
+ u64_stats_update_end(&tx->statss);
+
+ return i ? i : err;
+}
+
+int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
+ void *data, int len, void *frame_p)
+{
+ int nsegs;
+
+ if (!gve_can_tx(tx, len + GVE_TX_MAX_HEADER_SIZE - 1))
+ return -EBUSY;
+
+ nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false);
+ tx->req += nsegs;
+
+ return 0;
+}
+
#define GVE_TX_START_THRESH PAGE_SIZE
static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
@@ -657,8 +830,8 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u64 pkts = 0, bytes = 0;
size_t space_freed = 0;
struct sk_buff *skb;
- int i, j;
u32 idx;
+ int j;
for (j = 0; j < to_do; j++) {
idx = tx->done & tx->mask;
@@ -680,12 +853,7 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
dev_consume_skb_any(skb);
if (tx->raw_addressing)
continue;
- /* FIFO free */
- for (i = 0; i < ARRAY_SIZE(info->iov); i++) {
- space_freed += info->iov[i].iov_len + info->iov[i].iov_padding;
- info->iov[i].iov_len = 0;
- info->iov[i].iov_padding = 0;
- }
+ space_freed += gve_tx_clear_buffer_state(info);
}
}
@@ -720,6 +888,70 @@ u32 gve_tx_load_event_counter(struct gve_priv *priv,
return be32_to_cpu(counter);
}
+static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
+ int budget)
+{
+ struct xdp_desc desc;
+ int sent = 0, nsegs;
+ void *data;
+
+ spin_lock(&tx->xdp_lock);
+ while (sent < budget) {
+ if (!gve_can_tx(tx, GVE_TX_START_THRESH))
+ goto out;
+
+ if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) {
+ tx->xdp_xsk_done = tx->xdp_xsk_wakeup;
+ goto out;
+ }
+
+ data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr);
+ nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true);
+ tx->req += nsegs;
+ sent++;
+ }
+out:
+ if (sent > 0) {
+ gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
+ xsk_tx_release(tx->xsk_pool);
+ }
+ spin_unlock(&tx->xdp_lock);
+ return sent;
+}
+
+bool gve_xdp_poll(struct gve_notify_block *block, int budget)
+{
+ struct gve_priv *priv = block->priv;
+ struct gve_tx_ring *tx = block->tx;
+ u32 nic_done;
+ bool repoll;
+ u32 to_do;
+
+ /* If budget is 0, do all the work */
+ if (budget == 0)
+ budget = INT_MAX;
+
+ /* Find out how much work there is to be done */
+ nic_done = gve_tx_load_event_counter(priv, tx);
+ to_do = min_t(u32, (nic_done - tx->done), budget);
+ gve_clean_xdp_done(priv, tx, to_do);
+ repoll = nic_done != tx->done;
+
+ if (tx->xsk_pool) {
+ int sent = gve_xsk_tx(priv, tx, budget);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xsk_sent += sent;
+ u64_stats_update_end(&tx->statss);
+ repoll |= (sent == budget);
+ if (xsk_uses_need_wakeup(tx->xsk_pool))
+ xsk_set_tx_need_wakeup(tx->xsk_pool);
+ }
+
+ /* If we still have work we want to repoll */
+ return repoll;
+}
+
bool gve_tx_poll(struct gve_notify_block *block, int budget)
{
struct gve_priv *priv = block->priv;
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 6ba46adaaee3..26e08d753270 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -49,10 +49,10 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
}
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
- struct gve_rx_slot_page_info *page_info, u16 len,
- u16 padding)
+ struct gve_rx_slot_page_info *page_info, u16 len)
{
- void *va = page_info->page_address + padding + page_info->page_offset;
+ void *va = page_info->page_address + page_info->page_offset +
+ page_info->pad;
struct sk_buff *skb;
skb = napi_alloc_skb(napi, len);
diff --git a/drivers/net/ethernet/google/gve/gve_utils.h b/drivers/net/ethernet/google/gve/gve_utils.h
index 79595940b351..324fd98a6112 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.h
+++ b/drivers/net/ethernet/google/gve/gve_utils.h
@@ -18,8 +18,7 @@ void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx);
void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
- struct gve_rx_slot_page_info *page_info, u16 len,
- u16 pad);
+ struct gve_rx_slot_page_info *page_info, u16 len);
/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 25be7f8ac7cd..5caea154362f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -13,7 +13,6 @@
#include <linux/ipv6.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/skbuff.h>
#include <linux/sctp.h>
#include <net/gre.h>
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 07ad5f35219e..c3851a6e10c0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -11365,7 +11365,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
if (!hw->hw.io_base) {
dev_err(&pdev->dev, "Can't map configuration register space\n");
ret = -ENOMEM;
- goto err_clr_master;
+ goto err_release_regions;
}
ret = hclge_dev_mem_map(hdev);
@@ -11378,8 +11378,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
err_unmap_io_base:
pcim_iounmap(pdev, hdev->hw.hw.io_base);
-err_clr_master:
- pci_clear_master(pdev);
+err_release_regions:
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
@@ -11396,7 +11395,6 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_free_irq_vectors(pdev);
- pci_clear_master(pdev);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
}
@@ -11743,7 +11741,6 @@ err_devlink_uninit:
hclge_devlink_uninit(hdev);
err_pci_uninit:
pcim_iounmap(pdev, hdev->hw.hw.io_base);
- pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
out:
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index e84e5be8e59e..f24046250341 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2598,7 +2598,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
if (!hw->hw.io_base) {
dev_err(&pdev->dev, "can't map configuration register space\n");
ret = -ENOMEM;
- goto err_clr_master;
+ goto err_release_regions;
}
ret = hclgevf_dev_mem_map(hdev);
@@ -2609,8 +2609,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
err_unmap_io_base:
pci_iounmap(pdev, hdev->hw.hw.io_base);
-err_clr_master:
- pci_clear_master(pdev);
+err_release_regions:
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
@@ -2626,7 +2625,6 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
pci_iounmap(pdev, hdev->hw.hw.io_base);
- pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index c18c3b373846..9bc0a9519899 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -139,23 +139,6 @@ config IGBVF
To compile this driver as a module, choose M here. The module
will be called igbvf.
-config IXGB
- tristate "Intel(R) PRO/10GbE support"
- depends on PCI
- help
- This driver supports Intel(R) PRO/10GbE family of adapters for
- PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver
- instead. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide that can be located at:
-
- <http://support.intel.com>
-
- More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/ethernet/intel/ixgb.rst>.
-
- To compile this driver as a module, choose M here. The module
- will be called ixgb.
-
config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index 3075290063f6..d80d04132073 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -12,7 +12,6 @@ obj-$(CONFIG_IGBVF) += igbvf/
obj-$(CONFIG_IXGBE) += ixgbe/
obj-$(CONFIG_IXGBEVF) += ixgbevf/
obj-$(CONFIG_I40E) += i40e/
-obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_IAVF) += iavf/
obj-$(CONFIG_FM10K) += fm10k/
obj-$(CONFIG_ICE) += ice/
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e1eb1de88bf9..6f5c16aebcbf 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -23,7 +23,6 @@
#include <linux/smp.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
-#include <linux/aer.h>
#include <linux/prefetch.h>
#include <linux/suspend.h>
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 027d721feb18..d748b98274e7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -3,7 +3,6 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/aer.h>
#include "fm10k.h"
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 60ce4d15d82a..6e310a539467 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -10,7 +10,6 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/netdevice.h>
#include <linux/ioport.h>
#include <linux/iommu.h>
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4934ff58332c..afc4fa8c66af 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -5402,6 +5402,13 @@ flags_complete:
return -EOPNOTSUPP;
}
+ if ((changed_flags & I40E_FLAG_LEGACY_RX) &&
+ I40E_2K_TOO_SMALL_WITH_PADDING) {
+ dev_warn(&pf->pdev->dev,
+ "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n");
+ return -EOPNOTSUPP;
+ }
+
if ((changed_flags & new_flags &
I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
(new_flags & I40E_FLAG_MFP_ENABLED))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 228cd502bb48..c8ff5675b29d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2896,15 +2896,35 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
}
/**
- * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * i40e_calculate_vsi_rx_buf_len - Calculates buffer length
+ *
+ * @vsi: VSI to calculate rx_buf_len from
+ */
+static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi)
+{
+ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ return SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048);
+
+ return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048;
+}
+
+/**
+ * i40e_max_vsi_frame_size - returns the maximum allowed frame size for VSI
* @vsi: the vsi
+ * @xdp_prog: XDP program
**/
-static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
+static int i40e_max_vsi_frame_size(struct i40e_vsi *vsi,
+ struct bpf_prog *xdp_prog)
{
- if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
- return I40E_RXBUFFER_2048;
+ u16 rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi);
+ u16 chain_len;
+
+ if (xdp_prog && !xdp_prog->aux->xdp_has_frags)
+ chain_len = 1;
else
- return I40E_RXBUFFER_3072;
+ chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
+
+ return min_t(u16, rx_buf_len * chain_len, I40E_MAX_RXBUFFER);
}
/**
@@ -2919,12 +2939,13 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ int frame_size;
- if (i40e_enabled_xdp_vsi(vsi)) {
- int frame_size = new_mtu + I40E_PACKET_HDR_PAD;
-
- if (frame_size > i40e_max_xdp_frame_size(vsi))
- return -EINVAL;
+ frame_size = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog);
+ if (new_mtu > frame_size - I40E_PACKET_HDR_PAD) {
+ netdev_err(netdev, "Error changing mtu to %d, Max is %d\n",
+ new_mtu, frame_size - I40E_PACKET_HDR_PAD);
+ return -EINVAL;
}
netdev_dbg(netdev, "changing MTU from %d to %d\n",
@@ -3595,6 +3616,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
}
}
+ xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
+
rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
@@ -3640,10 +3663,16 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
}
/* configure Rx buffer alignment */
- if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
+ if (I40E_2K_TOO_SMALL_WITH_PADDING) {
+ dev_info(&vsi->back->pdev->dev,
+ "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n");
+ return -EOPNOTSUPP;
+ }
clear_ring_build_skb_enabled(ring);
- else
+ } else {
set_ring_build_skb_enabled(ring);
+ }
ring->rx_offset = i40e_rx_offset(ring);
@@ -3694,24 +3723,6 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
}
/**
- * i40e_calculate_vsi_rx_buf_len - Calculates buffer length
- *
- * @vsi: VSI to calculate rx_buf_len from
- */
-static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi)
-{
- if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
- return I40E_RXBUFFER_2048;
-
-#if (PAGE_SIZE < 8192)
- if (!I40E_2K_TOO_SMALL_WITH_PADDING && vsi->netdev->mtu <= ETH_DATA_LEN)
- return I40E_RXBUFFER_1536 - NET_IP_ALIGN;
-#endif
-
- return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048;
-}
-
-/**
* i40e_vsi_configure_rx - Configure the VSI for Rx
* @vsi: the VSI being configured
*
@@ -3722,13 +3733,15 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
int err = 0;
u16 i;
- vsi->max_frame = I40E_MAX_RXBUFFER;
+ vsi->max_frame = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog);
vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi);
#if (PAGE_SIZE < 8192)
if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING &&
- vsi->netdev->mtu <= ETH_DATA_LEN)
- vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+ vsi->netdev->mtu <= ETH_DATA_LEN) {
+ vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+ vsi->max_frame = vsi->rx_buf_len;
+ }
#endif
/* set up individual rings */
@@ -13316,15 +13329,15 @@ out_err:
static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
- int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ int frame_size = i40e_max_vsi_frame_size(vsi, prog);
struct i40e_pf *pf = vsi->back;
struct bpf_prog *old_prog;
bool need_reset;
int i;
/* Don't allow frames that span over multiple buffers */
- if (frame_size > i40e_calculate_vsi_rx_buf_len(vsi)) {
- NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
+ if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for linear frames and XDP prog does not support frags");
return -EINVAL;
}
@@ -13810,7 +13823,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_RX_SG;
} else {
/* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
* are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
index 79d587ad5409..33b4e30f5e00 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
@@ -162,45 +162,45 @@ DECLARE_EVENT_CLASS(
TP_PROTO(struct i40e_ring *ring,
union i40e_16byte_rx_desc *desc,
- struct sk_buff *skb),
+ struct xdp_buff *xdp),
- TP_ARGS(ring, desc, skb),
+ TP_ARGS(ring, desc, xdp),
TP_STRUCT__entry(
__field(void*, ring)
__field(void*, desc)
- __field(void*, skb)
+ __field(void*, xdp)
__string(devname, ring->netdev->name)
),
TP_fast_assign(
__entry->ring = ring;
__entry->desc = desc;
- __entry->skb = skb;
+ __entry->xdp = xdp;
__assign_str(devname, ring->netdev->name);
),
TP_printk(
- "netdev: %s ring: %p desc: %p skb %p",
+ "netdev: %s ring: %p desc: %p xdp %p",
__get_str(devname), __entry->ring,
- __entry->desc, __entry->skb)
+ __entry->desc, __entry->xdp)
);
DEFINE_EVENT(
i40e_rx_template, i40e_clean_rx_irq,
TP_PROTO(struct i40e_ring *ring,
union i40e_16byte_rx_desc *desc,
- struct sk_buff *skb),
+ struct xdp_buff *xdp),
- TP_ARGS(ring, desc, skb));
+ TP_ARGS(ring, desc, xdp));
DEFINE_EVENT(
i40e_rx_template, i40e_clean_rx_irq_rx,
TP_PROTO(struct i40e_ring *ring,
union i40e_16byte_rx_desc *desc,
- struct sk_buff *skb),
+ struct xdp_buff *xdp),
- TP_ARGS(ring, desc, skb));
+ TP_ARGS(ring, desc, xdp));
DECLARE_EVENT_CLASS(
i40e_xmit_template,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 72b091f2509d..c8c2cbaa0ede 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1477,9 +1477,6 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
- dev_kfree_skb(rx_ring->skb);
- rx_ring->skb = NULL;
-
if (rx_ring->xsk_pool) {
i40e_xsk_clean_rx_ring(rx_ring);
goto skip_free;
@@ -1524,6 +1521,7 @@ skip_free:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
+ rx_ring->next_to_process = 0;
rx_ring->next_to_use = 0;
}
@@ -1576,6 +1574,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
+ rx_ring->next_to_process = 0;
rx_ring->next_to_use = 0;
/* XDP RX-queue info only needed for RX rings exposed to XDP */
@@ -1617,21 +1616,19 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
writel(val, rx_ring->tail);
}
+#if (PAGE_SIZE >= 8192)
static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
unsigned int size)
{
unsigned int truesize;
-#if (PAGE_SIZE < 8192)
- truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
-#else
truesize = rx_ring->rx_offset ?
SKB_DATA_ALIGN(size + rx_ring->rx_offset) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
SKB_DATA_ALIGN(size);
-#endif
return truesize;
}
+#endif
/**
* i40e_alloc_mapped_page - recycle or make a new page
@@ -1970,7 +1967,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
* i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buffer: buffer containing the page
* @rx_stats: rx stats structure for the rx ring
- * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
*
* If page is reusable, we have a green light for calling i40e_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
@@ -1981,8 +1977,7 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
* or busy if it could not be reused.
*/
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
- struct i40e_rx_queue_stats *rx_stats,
- int rx_buffer_pgcnt)
+ struct i40e_rx_queue_stats *rx_stats)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
@@ -1995,7 +1990,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) {
+ if (unlikely((rx_buffer->page_count - pagecnt_bias) > 1)) {
rx_stats->page_busy_count++;
return false;
}
@@ -2021,33 +2016,14 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
}
/**
- * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: buffer containing page to add
- * @skb: sk_buff to place the data into
- * @size: packet length from rx_desc
- *
- * This function will add the data contained in rx_buffer->page to the skb.
- * It will just attach the page as a frag to the skb.
- *
- * The function will then update the page offset.
+ * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
+ * @rx_buffer: Rx buffer to adjust
+ * @truesize: Size of adjustment
**/
-static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer,
- struct sk_buff *skb,
- unsigned int size)
+static void i40e_rx_buffer_flip(struct i40e_rx_buffer *rx_buffer,
+ unsigned int truesize)
{
#if (PAGE_SIZE < 8192)
- unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
-#endif
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
- rx_buffer->page_offset, size, truesize);
-
- /* page is being used so we must update the page offset */
-#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
rx_buffer->page_offset += truesize;
@@ -2058,19 +2034,17 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
* i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
* @rx_ring: rx descriptor ring to transact packets on
* @size: size of buffer to add to skb
- * @rx_buffer_pgcnt: buffer page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
- const unsigned int size,
- int *rx_buffer_pgcnt)
+ const unsigned int size)
{
struct i40e_rx_buffer *rx_buffer;
- rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
- *rx_buffer_pgcnt =
+ rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_process);
+ rx_buffer->page_count =
#if (PAGE_SIZE < 8192)
page_count(rx_buffer->page);
#else
@@ -2092,25 +2066,82 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
}
/**
- * i40e_construct_skb - Allocate skb and populate it
+ * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from
+ *
+ * This function will clean up the contents of the rx_buffer. It will
+ * either recycle the buffer or unmap it and free the associated resources.
+ */
+static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer)
+{
+ if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats)) {
+ /* hand second half of page back to the ring */
+ i40e_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+ /* clear contents of buffer_info */
+ rx_buffer->page = NULL;
+ }
+}
+
+/**
+ * i40e_process_rx_buffs- Processing of buffers post XDP prog or on error
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @xdp_res: Result of the XDP program
+ * @xdp: xdp_buff pointing to the data
+ **/
+static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
+ struct xdp_buff *xdp)
+{
+ u32 next = rx_ring->next_to_clean;
+ struct i40e_rx_buffer *rx_buffer;
+
+ xdp->flags = 0;
+
+ while (1) {
+ rx_buffer = i40e_rx_bi(rx_ring, next);
+ if (++next == rx_ring->count)
+ next = 0;
+
+ if (!rx_buffer->page)
+ continue;
+
+ if (xdp_res == I40E_XDP_CONSUMED)
+ rx_buffer->pagecnt_bias++;
+ else
+ i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
+
+ /* EOP buffer will be put in i40e_clean_rx_irq() */
+ if (next == rx_ring->next_to_process)
+ return;
+
+ i40e_put_rx_buffer(rx_ring, rx_buffer);
+ }
+}
+
+/**
+ * i40e_construct_skb - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
+ * @nr_frags: number of buffers for the packet
*
* This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the
* skb correctly.
*/
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer,
- struct xdp_buff *xdp)
+ struct xdp_buff *xdp,
+ u32 nr_frags)
{
unsigned int size = xdp->data_end - xdp->data;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(size);
-#endif
+ struct i40e_rx_buffer *rx_buffer;
unsigned int headlen;
struct sk_buff *skb;
@@ -2150,48 +2181,60 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
memcpy(__skb_put(skb, headlen), xdp->data,
ALIGN(headlen, sizeof(long)));
+ rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
/* update all of the pointers */
size -= headlen;
if (size) {
+ if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
skb_add_rx_frag(skb, 0, rx_buffer->page,
rx_buffer->page_offset + headlen,
- size, truesize);
-
+ size, xdp->frame_sz);
/* buffer is used by skb, update page_offset */
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
+ i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
} else {
/* buffer is unused, reset bias back to rx_buffer */
rx_buffer->pagecnt_bias++;
}
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb);
+
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
+ sizeof(skb_frag_t) * nr_frags);
+
+ xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
+
+ /* First buffer has already been processed, so bump ntc */
+ if (++rx_ring->next_to_clean == rx_ring->count)
+ rx_ring->next_to_clean = 0;
+
+ i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp);
+ }
+
return skb;
}
/**
* i40e_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buffer: Rx buffer to pull data from
* @xdp: xdp_buff pointing to the data
+ * @nr_frags: number of buffers for the packet
*
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer,
- struct xdp_buff *xdp)
+ struct xdp_buff *xdp,
+ u32 nr_frags)
{
unsigned int metasize = xdp->data - xdp->data_meta;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(xdp->data_end -
- xdp->data_hard_start);
-#endif
struct sk_buff *skb;
/* Prefetch first cache line of first page. If xdp->data_meta
@@ -2202,7 +2245,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = napi_build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
if (unlikely(!skb))
return NULL;
@@ -2212,42 +2255,25 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
if (metasize)
skb_metadata_set(skb, metasize);
- /* buffer is used by skb, update page_offset */
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ struct skb_shared_info *sinfo;
- return skb;
-}
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ xdp_update_skb_shared_info(skb, nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
-/**
- * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: rx buffer to pull data from
- * @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call
- *
- * This function will clean up the contents of the rx_buffer. It will
- * either recycle the buffer or unmap it and free the associated resources.
- */
-static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer,
- int rx_buffer_pgcnt)
-{
- if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats, rx_buffer_pgcnt)) {
- /* hand second half of page back to the ring */
- i40e_reuse_rx_page(rx_ring, rx_buffer);
+ i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp);
} else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
- i40e_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
- __page_frag_cache_drain(rx_buffer->page,
- rx_buffer->pagecnt_bias);
- /* clear contents of buffer_info */
- rx_buffer->page = NULL;
+ struct i40e_rx_buffer *rx_buffer;
+
+ rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+ /* buffer is used by skb, update page_offset */
+ i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
}
+
+ return skb;
}
/**
@@ -2333,25 +2359,6 @@ xdp_out:
}
/**
- * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
- * @rx_ring: Rx ring
- * @rx_buffer: Rx buffer to adjust
- * @size: Size of adjustment
- **/
-static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer,
- unsigned int size)
-{
- unsigned int truesize = i40e_rx_frame_truesize(rx_ring, size);
-
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
-}
-
-/**
* i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
* @xdp_ring: XDP Tx ring
*
@@ -2409,16 +2416,65 @@ void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
}
/**
- * i40e_inc_ntc: Advance the next_to_clean index
+ * i40e_inc_ntp: Advance the next_to_process index
* @rx_ring: Rx ring
**/
-static void i40e_inc_ntc(struct i40e_ring *rx_ring)
+static void i40e_inc_ntp(struct i40e_ring *rx_ring)
+{
+ u32 ntp = rx_ring->next_to_process + 1;
+
+ ntp = (ntp < rx_ring->count) ? ntp : 0;
+ rx_ring->next_to_process = ntp;
+ prefetch(I40E_RX_DESC(rx_ring, ntp));
+}
+
+/**
+ * i40e_add_xdp_frag: Add a frag to xdp_buff
+ * @xdp: xdp_buff pointing to the data
+ * @nr_frags: return number of buffers for the packet
+ * @rx_buffer: rx_buffer holding data of the current frag
+ * @size: size of data of current frag
+ */
+static int i40e_add_xdp_frag(struct xdp_buff *xdp, u32 *nr_frags,
+ struct i40e_rx_buffer *rx_buffer, u32 size)
{
- u32 ntc = rx_ring->next_to_clean + 1;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+
+ if (!xdp_buff_has_frags(xdp)) {
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(xdp);
+ } else if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
+ /* Overflowing packet: All frags need to be dropped */
+ return -ENOMEM;
+ }
+
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buffer->page,
+ rx_buffer->page_offset, size);
+
+ sinfo->xdp_frags_size += size;
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
- prefetch(I40E_RX_DESC(rx_ring, ntc));
+ if (page_is_pfmemalloc(rx_buffer->page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+ *nr_frags = sinfo->nr_frags;
+
+ return 0;
+}
+
+/**
+ * i40e_consume_xdp_buff - Consume all the buffers of the packet and update ntc
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @xdp: xdp_buff pointing to the data
+ * @rx_buffer: rx_buffer of eop desc
+ */
+static void i40e_consume_xdp_buff(struct i40e_ring *rx_ring,
+ struct xdp_buff *xdp,
+ struct i40e_rx_buffer *rx_buffer)
+{
+ i40e_process_rx_buffs(rx_ring, I40E_XDP_CONSUMED, xdp);
+ i40e_put_rx_buffer(rx_ring, rx_buffer);
+ rx_ring->next_to_clean = rx_ring->next_to_process;
+ xdp->data = NULL;
}
/**
@@ -2437,38 +2493,36 @@ static void i40e_inc_ntc(struct i40e_ring *rx_ring)
static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
unsigned int *rx_cleaned)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ u16 clean_threshold = rx_ring->count / 2;
unsigned int offset = rx_ring->rx_offset;
- struct sk_buff *skb = rx_ring->skb;
+ struct xdp_buff *xdp = &rx_ring->xdp;
unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
bool failure = false;
- struct xdp_buff xdp;
int xdp_res = 0;
-#if (PAGE_SIZE < 8192)
- frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
-#endif
- xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
-
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
while (likely(total_rx_packets < (unsigned int)budget)) {
+ u16 ntp = rx_ring->next_to_process;
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
- int rx_buffer_pgcnt;
+ struct sk_buff *skb;
unsigned int size;
+ u32 nfrags = 0;
+ bool neop;
u64 qword;
/* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+ if (cleaned_count >= clean_threshold) {
failure = failure ||
i40e_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
- rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ rx_desc = I40E_RX_DESC(rx_ring, ntp);
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
@@ -2487,8 +2541,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
i40e_clean_programming_status(rx_ring,
rx_desc->raw.qword[0],
qword);
- rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
- i40e_inc_ntc(rx_ring);
+ rx_buffer = i40e_rx_bi(rx_ring, ntp);
+ i40e_inc_ntp(rx_ring);
i40e_reuse_rx_page(rx_ring, rx_buffer);
cleaned_count++;
continue;
@@ -2499,76 +2553,84 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
if (!size)
break;
- i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
- rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
-
+ i40e_trace(clean_rx_irq, rx_ring, rx_desc, xdp);
/* retrieve a buffer from the ring */
- if (!skb) {
+ rx_buffer = i40e_get_rx_buffer(rx_ring, size);
+
+ neop = i40e_is_non_eop(rx_ring, rx_desc);
+ i40e_inc_ntp(rx_ring);
+
+ if (!xdp->data) {
unsigned char *hard_start;
hard_start = page_address(rx_buffer->page) +
rx_buffer->page_offset - offset;
- xdp_prepare_buff(&xdp, hard_start, offset, size, true);
- xdp_buff_clear_frags_flag(&xdp);
+ xdp_prepare_buff(xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
- xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
+ xdp->frame_sz = i40e_rx_frame_truesize(rx_ring, size);
#endif
- xdp_res = i40e_run_xdp(rx_ring, &xdp, xdp_prog);
+ } else if (i40e_add_xdp_frag(xdp, &nfrags, rx_buffer, size) &&
+ !neop) {
+ /* Overflowing packet: Drop all frags on EOP */
+ i40e_consume_xdp_buff(rx_ring, xdp, rx_buffer);
+ break;
}
+ if (neop)
+ continue;
+
+ xdp_res = i40e_run_xdp(rx_ring, xdp, xdp_prog);
+
if (xdp_res) {
- if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
- xdp_xmit |= xdp_res;
- i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
+ xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ i40e_process_rx_buffs(rx_ring, xdp_res, xdp);
+ size = xdp_get_buff_len(xdp);
+ } else if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
+ i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
} else {
rx_buffer->pagecnt_bias++;
}
total_rx_bytes += size;
- total_rx_packets++;
- } else if (skb) {
- i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
- } else if (ring_uses_build_skb(rx_ring)) {
- skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
} else {
- skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
- }
+ if (ring_uses_build_skb(rx_ring))
+ skb = i40e_build_skb(rx_ring, xdp, nfrags);
+ else
+ skb = i40e_construct_skb(rx_ring, xdp, nfrags);
+
+ /* drop if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ i40e_consume_xdp_buff(rx_ring, xdp, rx_buffer);
+ break;
+ }
- /* exit if we failed to retrieve a buffer */
- if (!xdp_res && !skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- rx_buffer->pagecnt_bias++;
- break;
- }
+ if (i40e_cleanup_headers(rx_ring, skb, rx_desc))
+ goto process_next;
- i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
- cleaned_count++;
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
- i40e_inc_ntc(rx_ring);
- if (i40e_is_non_eop(rx_ring, rx_desc))
- continue;
+ /* populate checksum, VLAN, and protocol */
+ i40e_process_skb_fields(rx_ring, rx_desc, skb);
- if (xdp_res || i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
- skb = NULL;
- continue;
+ i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, xdp);
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
-
- /* populate checksum, VLAN, and protocol */
- i40e_process_skb_fields(rx_ring, rx_desc, skb);
-
- i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
- napi_gro_receive(&rx_ring->q_vector->napi, skb);
- skb = NULL;
-
/* update budget accounting */
total_rx_packets++;
+process_next:
+ cleaned_count += nfrags + 1;
+ i40e_put_rx_buffer(rx_ring, rx_buffer);
+ rx_ring->next_to_clean = rx_ring->next_to_process;
+
+ xdp->data = NULL;
}
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
- rx_ring->skb = skb;
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 768290dc6f48..8c3d24012c54 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -277,6 +277,7 @@ struct i40e_rx_buffer {
struct page *page;
__u32 page_offset;
__u16 pagecnt_bias;
+ __u32 page_count;
};
struct i40e_queue_stats {
@@ -336,6 +337,17 @@ struct i40e_ring {
u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
+ /* Storing xdp_buff on ring helps in saving the state of partially built
+ * packet when i40e_clean_rx_ring_irq() must return before it sees EOP
+ * and to resume packet building for this ring in the next call to
+ * i40e_clean_rx_ring_irq().
+ */
+ struct xdp_buff xdp;
+
+ /* Next descriptor to be processed; next_to_clean is updated only on
+ * processing EOP descriptor
+ */
+ u16 next_to_process;
/* high bit set means dynamic, use accessor routines to read/write.
* hardware only supports 2us resolution for the ITR registers.
* these values always store the USER setting, and must be converted
@@ -380,14 +392,6 @@ struct i40e_ring {
struct rcu_head rcu; /* to avoid race on free */
u16 next_to_alloc;
- struct sk_buff *skb; /* When i40e_clean_rx_ring_irq() must
- * return before it sees the EOP for
- * the current packet, we save that skb
- * here and resume receiving this
- * packet the next time
- * i40e_clean_rx_ring_irq() is called
- * for this ring.
- */
struct i40e_channel *ch;
u16 rx_offset;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 232bc61d9eee..2cdce251472c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -6,7 +6,6 @@
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index e809249500e1..aa32111afd6e 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -20,7 +20,6 @@
#include <linux/pci.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
-#include <linux/aer.h>
#include <linux/interrupt.h>
#include <linux/ethtool.h>
#include <linux/timer.h>
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 0d8b8c6f9bd3..a1f7c8edc22f 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1393,6 +1393,8 @@ static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
wake_up(&pf->aq_wait_queue);
}
+#define ICE_MBX_OVERFLOW_WATERMARK 64
+
/**
* __ice_clean_ctrlq - helper function to clean controlq rings
* @pf: ptr to struct ice_pf
@@ -1483,6 +1485,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
return 0;
do {
+ struct ice_mbx_data data = {};
u16 opcode;
int ret;
@@ -1509,8 +1512,12 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
ice_vf_lan_overflow_event(pf, &event);
break;
case ice_mbx_opc_send_msg_to_pf:
- if (!ice_is_malicious_vf(pf, &event, i, pending))
- ice_vc_process_vf_msg(pf, &event);
+ data.num_msg_proc = i;
+ data.num_pending_arq = pending;
+ data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
+ data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
+
+ ice_vc_process_vf_msg(pf, &event, &data);
break;
case ice_aqc_opc_fw_logging:
ice_output_fw_log(hw, &event.desc, event.msg_buf);
@@ -3888,6 +3895,7 @@ static int ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->vfs.table_lock);
hash_init(pf->vfs.table);
+ ice_mbx_init_snapshot(&pf->hw);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 0cc05e54a781..f1dca59bd844 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -204,10 +204,7 @@ void ice_free_vfs(struct ice_pf *pf)
}
/* clear malicious info since the VF is getting released */
- if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
- ICE_MAX_SRIOV_VFS, vf->vf_id))
- dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
- vf->vf_id);
+ list_del(&vf->mbx_info.list_entry);
mutex_unlock(&vf->cfg_lock);
}
@@ -1017,7 +1014,6 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!num_vfs) {
if (!pci_vfs_assigned(pdev)) {
ice_free_vfs(pf);
- ice_mbx_deinit_snapshot(&pf->hw);
if (pf->lag)
ice_enable_lag(pf->lag);
return 0;
@@ -1027,15 +1023,9 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
return -EBUSY;
}
- err = ice_mbx_init_snapshot(&pf->hw, num_vfs);
- if (err)
- return err;
-
err = ice_pci_sriov_ena(pf, num_vfs);
- if (err) {
- ice_mbx_deinit_snapshot(&pf->hw);
+ if (err)
return err;
- }
if (pf->lag)
ice_disable_lag(pf->lag);
@@ -1787,66 +1777,3 @@ void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
}
}
}
-
-/**
- * ice_is_malicious_vf - helper function to detect a malicious VF
- * @pf: ptr to struct ice_pf
- * @event: pointer to the AQ event
- * @num_msg_proc: the number of messages processed so far
- * @num_msg_pending: the number of messages peinding in admin queue
- */
-bool
-ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
- u16 num_msg_proc, u16 num_msg_pending)
-{
- s16 vf_id = le16_to_cpu(event->desc.retval);
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_mbx_data mbxdata;
- bool malvf = false;
- struct ice_vf *vf;
- int status;
-
- vf = ice_get_vf_by_id(pf, vf_id);
- if (!vf)
- return false;
-
- if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
- goto out_put_vf;
-
- mbxdata.num_msg_proc = num_msg_proc;
- mbxdata.num_pending_arq = num_msg_pending;
- mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
-#define ICE_MBX_OVERFLOW_WATERMARK 64
- mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
-
- /* check to see if we have a malicious VF */
- status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
- if (status)
- goto out_put_vf;
-
- if (malvf) {
- bool report_vf = false;
-
- /* if the VF is malicious and we haven't let the user
- * know about it, then let them know now
- */
- status = ice_mbx_report_malvf(&pf->hw, pf->vfs.malvfs,
- ICE_MAX_SRIOV_VFS, vf_id,
- &report_vf);
- if (status)
- dev_dbg(dev, "Error reporting malicious VF\n");
-
- if (report_vf) {
- struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
-
- if (pf_vsi)
- dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
- &vf->dev_lan_addr[0],
- pf_vsi->netdev->dev_addr);
- }
- }
-
-out_put_vf:
- ice_put_vf(vf);
- return malvf;
-}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 955ab810a198..346cb2666f3a 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -33,11 +33,7 @@ int
ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
void ice_free_vfs(struct ice_pf *pf);
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
-bool
-ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
- u16 num_msg_proc, u16 num_msg_pending);
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
@@ -68,22 +64,11 @@ ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
static inline void ice_process_vflr_event(struct ice_pf *pf) { }
static inline void ice_free_vfs(struct ice_pf *pf) { }
static inline
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { }
-static inline
void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
-static inline bool
-ice_is_malicious_vf(struct ice_pf __always_unused *pf,
- struct ice_rq_event_info __always_unused *event,
- u16 __always_unused num_msg_proc,
- u16 __always_unused num_msg_pending)
-{
- return false;
-}
-
static inline int
ice_sriov_configure(struct pci_dev __always_unused *pdev,
int __always_unused num_vfs)
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index e3f622cad425..a09556e57803 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -784,14 +784,15 @@ struct ice_mbx_snap_buffer_data {
u16 max_num_msgs_mbx;
};
-/* Structure to track messages sent by VFs on mailbox:
- * 1. vf_cntr: a counter array of VFs to track the number of
- * asynchronous messages sent by each VF
- * 2. vfcntr_len: number of entries in VF counter array
+/* Structure used to track a single VF's messages on the mailbox:
+ * 1. list_entry: linked list entry node
+ * 2. msg_count: the number of asynchronous messages sent by this VF
+ * 3. malicious: whether this VF has been detected as malicious before
*/
-struct ice_mbx_vf_counter {
- u32 *vf_cntr;
- u32 vfcntr_len;
+struct ice_mbx_vf_info {
+ struct list_head list_entry;
+ u32 msg_count;
+ u8 malicious : 1;
};
/* Structure to hold data relevant to the captured static snapshot
@@ -799,7 +800,7 @@ struct ice_mbx_vf_counter {
*/
struct ice_mbx_snapshot {
struct ice_mbx_snap_buffer_data mbx_buf;
- struct ice_mbx_vf_counter mbx_vf;
+ struct list_head mbx_vf;
};
/* Structure to hold data to be used for capturing or updating a
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 0e57bd1b85fd..89fd6982df09 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -496,10 +496,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
/* clear all malicious info if the VFs are getting reset */
ice_for_each_vf(pf, bkt, vf)
- if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
- ICE_MAX_SRIOV_VFS, vf->vf_id))
- dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
- vf->vf_id);
+ ice_mbx_clear_malvf(&vf->mbx_info);
/* If VFs have been disabled, there is no need to reset */
if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
@@ -601,12 +598,10 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
struct device *dev;
- struct ice_hw *hw;
int err = 0;
bool rsd;
dev = ice_pf_to_dev(pf);
- hw = &pf->hw;
if (flags & ICE_VF_RESET_NOTIFY)
ice_notify_vf_reset(vf);
@@ -705,10 +700,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_eswitch_replay_vf_mac_rule(vf);
/* if the VF has been reset allow it to come up again */
- if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
- ICE_MAX_SRIOV_VFS, vf->vf_id))
- dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
- vf->vf_id);
+ ice_mbx_clear_malvf(&vf->mbx_info);
out_unlock:
if (flags & ICE_VF_RESET_LOCK)
@@ -764,6 +756,9 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
ice_vf_ctrl_invalidate_vsi(vf);
ice_vf_fdir_init(vf);
+ /* Initialize mailbox info for this VF */
+ ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
+
mutex_init(&vf->cfg_lock);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index ef30f05b5d02..e3cda6fb71ab 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -74,7 +74,6 @@ struct ice_vfs {
u16 num_qps_per; /* number of queue pairs per VF */
u16 num_msix_per; /* number of MSI-X vectors per VF */
unsigned long last_printed_mdd_jiffies; /* MDD message rate limit */
- DECLARE_BITMAP(malvfs, ICE_MAX_SRIOV_VFS); /* malicious VF indicator */
};
/* VF information structure */
@@ -105,6 +104,7 @@ struct ice_vf {
DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
struct ice_vlan port_vlan_info; /* Port VLAN ID, QoS, and TPID */
struct virtchnl_vlan_caps vlan_v2_caps;
+ struct ice_mbx_vf_info mbx_info;
u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
u8 trusted:1;
u8 spoofchk:1;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
index f56fa94ff3d0..40cb4ba0789c 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
@@ -93,36 +93,31 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
*
* 2. When the caller starts processing its mailbox queue in response to an
* interrupt, the structure ice_mbx_snapshot is expected to be cleared before
- * the algorithm can be run for the first time for that interrupt. This can be
- * done via ice_mbx_reset_snapshot().
+ * the algorithm can be run for the first time for that interrupt. This
+ * requires calling ice_mbx_reset_snapshot() as well as calling
+ * ice_mbx_reset_vf_info() for each VF tracking structure.
*
* 3. For every message read by the caller from the MBX Queue, the caller must
* call the detection algorithm's entry function ice_mbx_vf_state_handler().
* Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
* filled as it is required to be passed to the algorithm.
*
- * 4. Every time a message is read from the MBX queue, a VFId is received which
- * is passed to the state handler. The boolean output is_malvf of the state
- * handler ice_mbx_vf_state_handler() serves as an indicator to the caller
- * whether this VF is malicious or not.
+ * 4. Every time a message is read from the MBX queue, a tracking structure
+ * for the VF must be passed to the state handler. The boolean output
+ * report_malvf from ice_mbx_vf_state_handler() serves as an indicator to the
+ * caller whether it must report this VF as malicious or not.
*
* 5. When a VF is identified to be malicious, the caller can send a message
- * to the system administrator. The caller can invoke ice_mbx_report_malvf()
- * to help determine if a malicious VF is to be reported or not. This function
- * requires the caller to maintain a global bitmap to track all malicious VFs
- * and pass that to ice_mbx_report_malvf() along with the VFID which was identified
- * to be malicious by ice_mbx_vf_state_handler().
+ * to the system administrator.
*
- * 6. The global bitmap maintained by PF can be cleared completely if PF is in
- * reset or the bit corresponding to a VF can be cleared if that VF is in reset.
- * When a VF is shut down and brought back up, we assume that the new VF
- * brought up is not malicious and hence report it if found malicious.
+ * 6. The PF is responsible for maintaining the struct ice_mbx_vf_info
+ * structure for each VF. The PF should clear the VF tracking structure if the
+ * VF is reset. When a VF is shut down and brought back up, we will then
+ * assume that the new VF is not malicious and may report it again if we
+ * detect it again.
*
* 7. The function ice_mbx_reset_snapshot() is called to reset the information
* in ice_mbx_snapshot for every new mailbox interrupt handled.
- *
- * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
- * when driver is unloaded.
*/
#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
@@ -131,6 +126,25 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
/**
+ * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
+ * @snap: pointer to the mailbox snapshot
+ */
+static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
+{
+ struct ice_mbx_vf_info *vf_info;
+
+ /* Clear mbx_buf in the mailbox snaphot structure and setting the
+ * mailbox snapshot state to a new capture.
+ */
+ memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+ snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+
+ /* Reset message counts for all VFs to zero */
+ list_for_each_entry(vf_info, &snap->mbx_vf, list_entry)
+ vf_info->msg_count = 0;
+}
+
+/**
* ice_mbx_traverse - Pass through mailbox snapshot
* @hw: pointer to the HW struct
* @new_state: new algorithm state
@@ -171,7 +185,7 @@ ice_mbx_traverse(struct ice_hw *hw,
/**
* ice_mbx_detect_malvf - Detect malicious VF in snapshot
* @hw: pointer to the HW struct
- * @vf_id: relative virtual function ID
+ * @vf_info: mailbox tracking structure for a VF
* @new_state: new algorithm state
* @is_malvf: boolean output to indicate if VF is malicious
*
@@ -180,19 +194,14 @@ ice_mbx_traverse(struct ice_hw *hw,
* the permissible number of messages to send.
*/
static int
-ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
+ice_mbx_detect_malvf(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info,
enum ice_mbx_snapshot_state *new_state,
bool *is_malvf)
{
- struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
-
- if (vf_id >= snap->mbx_vf.vfcntr_len)
- return -EIO;
-
- /* increment the message count in the VF array */
- snap->mbx_vf.vf_cntr[vf_id]++;
+ /* increment the message count for this VF */
+ vf_info->msg_count++;
- if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
+ if (vf_info->msg_count >= ICE_ASYNC_VF_MSG_THRESHOLD)
*is_malvf = true;
/* continue to iterate through the mailbox snapshot */
@@ -202,35 +211,11 @@ ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
}
/**
- * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
- * @snap: pointer to mailbox snapshot structure in the ice_hw struct
- *
- * Reset the mailbox snapshot structure and clear VF counter array.
- */
-static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
-{
- u32 vfcntr_len;
-
- if (!snap || !snap->mbx_vf.vf_cntr)
- return;
-
- /* Clear VF counters. */
- vfcntr_len = snap->mbx_vf.vfcntr_len;
- if (vfcntr_len)
- memset(snap->mbx_vf.vf_cntr, 0,
- (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)));
-
- /* Reset mailbox snapshot for a new capture. */
- memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
- snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
-}
-
-/**
* ice_mbx_vf_state_handler - Handle states of the overflow algorithm
* @hw: pointer to the HW struct
* @mbx_data: pointer to structure containing mailbox data
- * @vf_id: relative virtual function (VF) ID
- * @is_malvf: boolean output to indicate if VF is malicious
+ * @vf_info: mailbox tracking structure for the VF in question
+ * @report_malvf: boolean output to indicate whether VF should be reported
*
* The function serves as an entry point for the malicious VF
* detection algorithm by handling the different states and state
@@ -249,24 +234,24 @@ static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
* the static snapshot and look for a malicious VF.
*/
int
-ice_mbx_vf_state_handler(struct ice_hw *hw,
- struct ice_mbx_data *mbx_data, u16 vf_id,
- bool *is_malvf)
+ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
+ struct ice_mbx_vf_info *vf_info, bool *report_malvf)
{
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
struct ice_mbx_snap_buffer_data *snap_buf;
struct ice_ctl_q_info *cq = &hw->mailboxq;
enum ice_mbx_snapshot_state new_state;
+ bool is_malvf = false;
int status = 0;
- if (!is_malvf || !mbx_data)
+ if (!report_malvf || !mbx_data || !vf_info)
return -EINVAL;
+ *report_malvf = false;
+
/* When entering the mailbox state machine assume that the VF
* is not malicious until detected.
*/
- *is_malvf = false;
-
/* Checking if max messages allowed to be processed while servicing current
* interrupt is not less than the defined AVF message threshold.
*/
@@ -315,7 +300,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
if (snap_buf->num_pending_arq >=
mbx_data->async_watermark_val) {
new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
- status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
+ status = ice_mbx_detect_malvf(hw, vf_info, &new_state, &is_malvf);
} else {
new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
ice_mbx_traverse(hw, &new_state);
@@ -329,7 +314,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
case ICE_MAL_VF_DETECT_STATE_DETECT:
new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
- status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
+ status = ice_mbx_detect_malvf(hw, vf_info, &new_state, &is_malvf);
break;
default:
@@ -339,145 +324,57 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
snap_buf->state = new_state;
- return status;
-}
-
-/**
- * ice_mbx_report_malvf - Track and note malicious VF
- * @hw: pointer to the HW struct
- * @all_malvfs: all malicious VFs tracked by PF
- * @bitmap_len: length of bitmap in bits
- * @vf_id: relative virtual function ID of the malicious VF
- * @report_malvf: boolean to indicate if malicious VF must be reported
- *
- * This function will update a bitmap that keeps track of the malicious
- * VFs attached to the PF. A malicious VF must be reported only once if
- * discovered between VF resets or loading so the function checks
- * the input vf_id against the bitmap to verify if the VF has been
- * detected in any previous mailbox iterations.
- */
-int
-ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id, bool *report_malvf)
-{
- if (!all_malvfs || !report_malvf)
- return -EINVAL;
-
- *report_malvf = false;
-
- if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
- return -EINVAL;
-
- if (vf_id >= bitmap_len)
- return -EIO;
-
- /* If the vf_id is found in the bitmap set bit and boolean to true */
- if (!test_and_set_bit(vf_id, all_malvfs))
+ /* Only report VFs as malicious the first time we detect it */
+ if (is_malvf && !vf_info->malicious) {
+ vf_info->malicious = 1;
*report_malvf = true;
+ }
- return 0;
+ return status;
}
/**
- * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID
- * @snap: pointer to the mailbox snapshot structure
- * @all_malvfs: all malicious VFs tracked by PF
- * @bitmap_len: length of bitmap in bits
- * @vf_id: relative virtual function ID of the malicious VF
+ * ice_mbx_clear_malvf - Clear VF mailbox info
+ * @vf_info: the mailbox tracking structure for a VF
*
- * In case of a VF reset, this function can be called to clear
- * the bit corresponding to the VF ID in the bitmap tracking all
- * malicious VFs attached to the PF. The function also clears the
- * VF counter array at the index of the VF ID. This is to ensure
- * that the new VF loaded is not considered malicious before going
- * through the overflow detection algorithm.
+ * In case of a VF reset, this function shall be called to clear the VF's
+ * current mailbox tracking state.
*/
-int
-ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id)
+void ice_mbx_clear_malvf(struct ice_mbx_vf_info *vf_info)
{
- if (!snap || !all_malvfs)
- return -EINVAL;
-
- if (bitmap_len < snap->mbx_vf.vfcntr_len)
- return -EINVAL;
-
- /* Ensure VF ID value is not larger than bitmap or VF counter length */
- if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
- return -EIO;
-
- /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
- clear_bit(vf_id, all_malvfs);
-
- /* Clear the VF counter in the mailbox snapshot structure for that VF ID.
- * This is to ensure that if a VF is unloaded and a new one brought back
- * up with the same VF ID for a snapshot currently in traversal or detect
- * state the counter for that VF ID does not increment on top of existing
- * values in the mailbox overflow detection algorithm.
- */
- snap->mbx_vf.vf_cntr[vf_id] = 0;
-
- return 0;
+ vf_info->malicious = 0;
+ vf_info->msg_count = 0;
}
/**
- * ice_mbx_init_snapshot - Initialize mailbox snapshot structure
+ * ice_mbx_init_vf_info - Initialize a new VF mailbox tracking info
* @hw: pointer to the hardware structure
- * @vf_count: number of VFs allocated on a PF
+ * @vf_info: the mailbox tracking info structure for a VF
*
- * Clear the mailbox snapshot structure and allocate memory
- * for the VF counter array based on the number of VFs allocated
- * on that PF.
+ * Initialize a VF mailbox tracking info structure and insert it into the
+ * snapshot list.
*
- * Assumption: This function will assume ice_get_caps() has already been
- * called to ensure that the vf_count can be compared against the number
- * of VFs supported as defined in the functional capabilities of the device.
+ * If you remove the VF, you must also delete the associated VF info structure
+ * from the linked list.
*/
-int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
+void ice_mbx_init_vf_info(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info)
{
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
- /* Ensure that the number of VFs allocated is non-zero and
- * is not greater than the number of supported VFs defined in
- * the functional capabilities of the PF.
- */
- if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
- return -EINVAL;
-
- snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
- sizeof(*snap->mbx_vf.vf_cntr),
- GFP_KERNEL);
- if (!snap->mbx_vf.vf_cntr)
- return -ENOMEM;
-
- /* Setting the VF counter length to the number of allocated
- * VFs for given PF's functional capabilities.
- */
- snap->mbx_vf.vfcntr_len = vf_count;
-
- /* Clear mbx_buf in the mailbox snaphot structure and setting the
- * mailbox snapshot state to a new capture.
- */
- memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
- snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
-
- return 0;
+ ice_mbx_clear_malvf(vf_info);
+ list_add(&vf_info->list_entry, &snap->mbx_vf);
}
/**
- * ice_mbx_deinit_snapshot - Free mailbox snapshot structure
+ * ice_mbx_init_snapshot - Initialize mailbox snapshot data
* @hw: pointer to the hardware structure
*
- * Clear the mailbox snapshot structure and free the VF counter array.
+ * Clear the mailbox snapshot structure and initialize the VF mailbox list.
*/
-void ice_mbx_deinit_snapshot(struct ice_hw *hw)
+void ice_mbx_init_snapshot(struct ice_hw *hw)
{
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
- /* Free VF counter array and reset VF counter length */
- devm_kfree(ice_hw_to_dev(hw), snap->mbx_vf.vf_cntr);
- snap->mbx_vf.vfcntr_len = 0;
-
- /* Clear mbx_buf in the mailbox snaphot structure */
- memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+ INIT_LIST_HEAD(&snap->mbx_vf);
+ ice_mbx_reset_snapshot(snap);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
index 582716e6d5f9..44bc030d17e0 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
@@ -21,15 +21,10 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
int
ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
- u16 vf_id, bool *is_mal_vf);
-int
-ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id);
-int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
-void ice_mbx_deinit_snapshot(struct ice_hw *hw);
-int
-ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id, bool *report_malvf);
+ struct ice_mbx_vf_info *vf_info, bool *report_malvf);
+void ice_mbx_clear_malvf(struct ice_mbx_vf_info *vf_info);
+void ice_mbx_init_vf_info(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info);
+void ice_mbx_init_snapshot(struct ice_hw *hw);
#else /* CONFIG_PCI_IOV */
static inline int
ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
@@ -48,5 +43,9 @@ ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
return 0;
}
+static inline void ice_mbx_init_snapshot(struct ice_hw *hw)
+{
+}
+
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VF_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index e24e3f5017ca..97243c616d5d 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -3834,14 +3834,57 @@ void ice_virtchnl_set_repr_ops(struct ice_vf *vf)
}
/**
+ * ice_is_malicious_vf - check if this vf might be overflowing mailbox
+ * @vf: the VF to check
+ * @mbxdata: data about the state of the mailbox
+ *
+ * Detect if a given VF might be malicious and attempting to overflow the PF
+ * mailbox. If so, log a warning message and ignore this event.
+ */
+static bool
+ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata)
+{
+ bool report_malvf = false;
+ struct device *dev;
+ struct ice_pf *pf;
+ int status;
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+
+ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
+ return vf->mbx_info.malicious;
+
+ /* check to see if we have a newly malicious VF */
+ status = ice_mbx_vf_state_handler(&pf->hw, mbxdata, &vf->mbx_info,
+ &report_malvf);
+ if (status)
+ dev_warn_ratelimited(dev, "Unable to check status of mailbox overflow for VF %u MAC %pM, status %d\n",
+ vf->vf_id, vf->dev_lan_addr, status);
+
+ if (report_malvf) {
+ struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
+ u8 zero_addr[ETH_ALEN] = {};
+
+ dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
+ vf->dev_lan_addr,
+ pf_vsi ? pf_vsi->netdev->dev_addr : zero_addr);
+ }
+
+ return vf->mbx_info.malicious;
+}
+
+/**
* ice_vc_process_vf_msg - Process request from VF
* @pf: pointer to the PF structure
* @event: pointer to the AQ event
+ * @mbxdata: information used to detect VF attempting mailbox overflow
*
* called from the common asq/arq handler to
* process request from VF
*/
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
+ struct ice_mbx_data *mbxdata)
{
u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
s16 vf_id = le16_to_cpu(event->desc.retval);
@@ -3863,6 +3906,10 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
mutex_lock(&vf->cfg_lock);
+ /* Check if the VF is trying to overflow the mailbox */
+ if (ice_is_malicious_vf(vf, mbxdata))
+ goto finish;
+
/* Check if VF is disabled. */
if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
err = -EPERM;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index b454654d7b0c..cd747718de73 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -63,6 +63,8 @@ int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
+ struct ice_mbx_data *mbxdata);
#else /* CONFIG_PCI_IOV */
static inline void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) { }
static inline void ice_virtchnl_set_repr_ops(struct ice_vf *vf) { }
@@ -81,6 +83,12 @@ static inline bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
{
return false;
}
+
+static inline void
+ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
+ struct ice_mbx_data *mbxdata)
+{
+}
#endif /* !CONFIG_PCI_IOV */
#endif /* _ICE_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 274c781b5547..58872a4c2540 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -28,7 +28,6 @@
#include <linux/tcp.h>
#include <linux/sctp.h>
#include <linux/if_ether.h>
-#include <linux/aer.h>
#include <linux/prefetch.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 6f471b91f562..405886ee5261 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -67,6 +67,7 @@
#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
#define INCVALUE_82576 (16u << IGB_82576_TSYNC_SHIFT)
#define IGB_NBITS_82580 40
+#define IGB_82580_BASE_PERIOD 0x800000000
static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
static void igb_ptp_sdp_init(struct igb_adapter *adapter);
@@ -209,17 +210,11 @@ static int igb_ptp_adjfine_82580(struct ptp_clock_info *ptp, long scaled_ppm)
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
struct e1000_hw *hw = &igb->hw;
- int neg_adj = 0;
+ bool neg_adj;
u64 rate;
u32 inca;
- if (scaled_ppm < 0) {
- neg_adj = 1;
- scaled_ppm = -scaled_ppm;
- }
- rate = scaled_ppm;
- rate <<= 13;
- rate = div_u64(rate, 15625);
+ neg_adj = diff_by_scaled_ppm(IGB_82580_BASE_PERIOD, scaled_ppm, &rate);
inca = rate & INCVALUE_MASK;
if (neg_adj)
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 72cb1b56e9f2..7ff2752dd763 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2593,6 +2593,33 @@ static void igbvf_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
}
+/**
+ * igbvf_io_prepare - prepare device driver for PCI reset
+ * @pdev: PCI device information struct
+ */
+static void igbvf_io_prepare(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+ igbvf_down(adapter);
+}
+
+/**
+ * igbvf_io_reset_done - PCI reset done, device driver reset can begin
+ * @pdev: PCI device information struct
+ */
+static void igbvf_io_reset_done(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ igbvf_up(adapter);
+ clear_bit(__IGBVF_RESETTING, &adapter->state);
+}
+
static void igbvf_print_device_info(struct igbvf_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -2920,6 +2947,8 @@ static const struct pci_error_handlers igbvf_err_handler = {
.error_detected = igbvf_io_error_detected,
.slot_reset = igbvf_io_slot_reset,
.resume = igbvf_io_resume,
+ .reset_prepare = igbvf_io_prepare,
+ .reset_done = igbvf_io_reset_done,
};
static const struct pci_device_id igbvf_pci_tbl[] = {
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index df3e26c0cf01..34aebf00a512 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -99,6 +99,7 @@ struct igc_ring {
u32 start_time;
u32 end_time;
+ u32 max_sdu;
/* CBS parameters */
bool cbs_enable; /* indicates if CBS is enabled */
@@ -185,6 +186,7 @@ struct igc_adapter {
ktime_t base_time;
ktime_t cycle_time;
bool qbv_enable;
+ u32 qbv_config_change_errors;
/* OS defined structs */
struct pci_dev *pdev;
@@ -292,8 +294,6 @@ extern char igc_driver_name[];
#define IGC_FLAG_PTP BIT(8)
#define IGC_FLAG_WOL_SUPPORTED BIT(8)
#define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
-#define IGC_FLAG_MEDIA_RESET BIT(10)
-#define IGC_FLAG_MAS_ENABLE BIT(12)
#define IGC_FLAG_HAS_MSIX BIT(13)
#define IGC_FLAG_EEE BIT(14)
#define IGC_FLAG_VLAN_PROMISC BIT(15)
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 9dec3563ce3a..44a507029946 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -662,9 +662,6 @@
*/
#define IGC_TW_SYSTEM_100_MASK 0x0000FF00
#define IGC_TW_SYSTEM_100_SHIFT 8
-#define IGC_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
-#define IGC_DMACR_DMACTHR_MASK 0x00FF0000
-#define IGC_DMACR_DMACTHR_SHIFT 16
/* Reg val to set scale to 1024 nsec */
#define IGC_LTRMINV_SCALE_1024 2
/* Reg val to set scale to 32768 nsec */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 5a26a7805ef8..0e2cb00622d1 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -67,6 +67,7 @@ static const struct igc_stats igc_gstrings_stats[] = {
IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
IGC_STAT("tx_lpi_counter", stats.tlpic),
IGC_STAT("rx_lpi_counter", stats.rlpic),
+ IGC_STAT("qbv_config_change_errors", qbv_config_change_errors),
};
#define IGC_NETDEV_STAT(_net_stat) { \
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 88680e3d613d..e1c572e0d4ef 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -273,6 +273,7 @@ struct igc_hw_stats {
u64 o2bspc;
u64 b2ospc;
u64 b2ogprc;
+ u64 txdrop;
};
struct net_device *igc_get_hw_dev(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index 59d5c467ea6e..17546a035ab1 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -593,20 +593,11 @@ s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
size = rd32(IGC_RXPBS) &
IGC_RXPBS_SIZE_I225_MASK;
- /* Calculations vary based on DMAC settings. */
- if (rd32(IGC_DMACR) & IGC_DMACR_DMAC_EN) {
- size -= (rd32(IGC_DMACR) &
- IGC_DMACR_DMACTHR_MASK) >>
- IGC_DMACR_DMACTHR_SHIFT;
- /* Convert size to bits. */
- size *= 1024 * 8;
- } else {
- /* Convert size to bytes, subtract the MTU, and then
- * convert the size to bits.
- */
- size *= 1024;
- size *= 8;
- }
+ /* Convert size to bytes, subtract the MTU, and then
+ * convert the size to bits.
+ */
+ size *= 1024;
+ size *= 8;
if (size < 0) {
hw_dbg("Invalid effective Rx buffer size %d\n",
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 25fc6c65209b..ba49728be919 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -4,7 +4,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/if_vlan.h>
-#include <linux/aer.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/ip.h>
@@ -1501,6 +1500,7 @@ static int igc_tso(struct igc_ring *tx_ring,
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
struct igc_ring *tx_ring)
{
+ struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
bool first_flag = false, insert_empty = false;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = vlan_get_protocol(skb);
@@ -1563,9 +1563,19 @@ done:
first->bytecount = skb->len;
first->gso_segs = 1;
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
+ if (tx_ring->max_sdu > 0) {
+ u32 max_sdu = 0;
+
+ max_sdu = tx_ring->max_sdu +
+ (skb_vlan_tagged(first->skb) ? VLAN_HLEN : 0);
+
+ if (first->bytecount > max_sdu) {
+ adapter->stats.txdrop++;
+ goto out_drop;
+ }
+ }
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
/* FIXME: add support for retrieving timestamps from
* the other timer registers before skipping the
* timestamping request.
@@ -4920,7 +4930,8 @@ void igc_update_stats(struct igc_adapter *adapter)
net_stats->tx_window_errors = adapter->stats.latecol;
net_stats->tx_carrier_errors = adapter->stats.tncrs;
- /* Tx Dropped needs to be maintained elsewhere */
+ /* Tx Dropped */
+ net_stats->tx_dropped = adapter->stats.txdrop;
/* Management Stats */
adapter->stats.mgptc += rd32(IGC_MGTPTC);
@@ -5566,25 +5577,8 @@ no_wait:
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
- /* link is down, time to check for alternate media */
- if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
- if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
- schedule_work(&adapter->reset_task);
- /* return immediately */
- return;
- }
- }
pm_schedule_suspend(netdev->dev.parent,
MSEC_PER_SEC * 5);
-
- /* also check for alternate media here */
- } else if (!netif_carrier_ok(netdev) &&
- (adapter->flags & IGC_FLAG_MAS_ENABLE)) {
- if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
- schedule_work(&adapter->reset_task);
- /* return immediately */
- return;
- }
}
}
@@ -6049,12 +6043,14 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
adapter->base_time = 0;
adapter->cycle_time = NSEC_PER_SEC;
+ adapter->qbv_config_change_errors = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
ring->start_time = 0;
ring->end_time = NSEC_PER_SEC;
+ ring->max_sdu = 0;
}
return 0;
@@ -6138,6 +6134,16 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
}
}
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+ struct net_device *dev = adapter->netdev;
+
+ if (qopt->max_sdu[i])
+ ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len;
+ else
+ ring->max_sdu = 0;
+ }
+
return 0;
}
@@ -6236,8 +6242,10 @@ static int igc_tc_query_caps(struct igc_adapter *adapter,
caps->broken_mqprio = true;
- if (hw->mac.type == igc_i225)
+ if (hw->mac.type == igc_i225) {
+ caps->supports_queue_max_sdu = true;
caps->gate_mask_per_txq = true;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 01c86d36856d..dba5a5759b1c 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -292,7 +292,6 @@
/* LTR registers */
#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */
-#define IGC_DMACR 0x02508 /* DMA Coalescing Control Register */
#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */
#define IGC_LTRMAXV 0x5BB4 /* LTR Maximum Value */
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index a386c8d61dbf..94a2b0dfb54d 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -114,6 +114,7 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
static int igc_tsn_enable_offload(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
+ bool tsn_mode_reconfig = false;
u32 tqavctrl, baset_l, baset_h;
u32 sec, nsec, cycle;
ktime_t base_time, systim;
@@ -226,6 +227,10 @@ skip_cbs:
}
tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS;
+
+ if (tqavctrl & IGC_TQAVCTRL_TRANSMIT_MODE_TSN)
+ tsn_mode_reconfig = true;
+
tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
cycle = adapter->cycle_time;
@@ -239,6 +244,13 @@ skip_cbs:
s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
base_time = ktime_add_ns(base_time, (n + 1) * cycle);
+
+ /* Increase the counter if scheduling into the past while
+ * Gate Control List (GCL) is running.
+ */
+ if ((rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
+ tsn_mode_reconfig)
+ adapter->qbv_config_change_errors++;
} else {
/* According to datasheet section 7.5.2.9.3.3, FutScdDis bit
* has to be configured before the cycle time and base time.
diff --git a/drivers/net/ethernet/intel/ixgb/Makefile b/drivers/net/ethernet/intel/ixgb/Makefile
deleted file mode 100644
index 2433e9300a33..000000000000
--- a/drivers/net/ethernet/intel/ixgb/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Copyright(c) 1999 - 2008 Intel Corporation.
-#
-# Makefile for the Intel(R) PRO/10GbE ethernet driver
-#
-
-obj-$(CONFIG_IXGB) += ixgb.o
-
-ixgb-objs := ixgb_main.o ixgb_hw.o ixgb_ee.o ixgb_ethtool.o ixgb_param.o
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
deleted file mode 100644
index 81ac39576803..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-#ifndef _IXGB_H_
-#define _IXGB_H_
-
-#include <linux/stddef.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/string.h>
-#include <linux/pagemap.h>
-#include <linux/dma-mapping.h>
-#include <linux/bitops.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <linux/capability.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <net/pkt_sched.h>
-#include <linux/list.h>
-#include <linux/reboot.h>
-#include <net/checksum.h>
-
-#include <linux/ethtool.h>
-#include <linux/if_vlan.h>
-
-#define BAR_0 0
-#define BAR_1 1
-
-struct ixgb_adapter;
-#include "ixgb_hw.h"
-#include "ixgb_ee.h"
-#include "ixgb_ids.h"
-
-/* TX/RX descriptor defines */
-#define DEFAULT_TXD 256
-#define MAX_TXD 4096
-#define MIN_TXD 64
-
-/* hardware cannot reliably support more than 512 descriptors owned by
- * hardware descriptor cache otherwise an unreliable ring under heavy
- * receive load may result */
-#define DEFAULT_RXD 512
-#define MAX_RXD 512
-#define MIN_RXD 64
-
-/* Supported Rx Buffer Sizes */
-#define IXGB_RXBUFFER_2048 2048
-#define IXGB_RXBUFFER_4096 4096
-#define IXGB_RXBUFFER_8192 8192
-#define IXGB_RXBUFFER_16384 16384
-
-/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define IXGB_RX_BUFFER_WRITE 8 /* Must be power of 2 */
-
-/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer */
-struct ixgb_buffer {
- struct sk_buff *skb;
- dma_addr_t dma;
- unsigned long time_stamp;
- u16 length;
- u16 next_to_watch;
- u16 mapped_as_page;
-};
-
-struct ixgb_desc_ring {
- /* pointer to the descriptor ring memory */
- void *desc;
- /* physical address of the descriptor ring */
- dma_addr_t dma;
- /* length of descriptor ring in bytes */
- unsigned int size;
- /* number of descriptors in the ring */
- unsigned int count;
- /* next descriptor to associate a buffer with */
- unsigned int next_to_use;
- /* next descriptor to check for DD status bit */
- unsigned int next_to_clean;
- /* array of buffer information structs */
- struct ixgb_buffer *buffer_info;
-};
-
-#define IXGB_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->next_to_clean - (R)->next_to_use - 1)
-
-#define IXGB_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
-#define IXGB_RX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_rx_desc)
-#define IXGB_TX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_tx_desc)
-#define IXGB_CONTEXT_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_context_desc)
-
-/* board specific private data structure */
-
-struct ixgb_adapter {
- struct timer_list watchdog_timer;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u32 bd_number;
- u32 rx_buffer_len;
- u32 part_num;
- u16 link_speed;
- u16 link_duplex;
- struct work_struct tx_timeout_task;
-
- /* TX */
- struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
- unsigned int restart_queue;
- unsigned long timeo_start;
- u32 tx_cmd_type;
- u64 hw_csum_tx_good;
- u64 hw_csum_tx_error;
- u32 tx_int_delay;
- u32 tx_timeout_count;
- bool tx_int_delay_enable;
- bool detect_tx_hung;
-
- /* RX */
- struct ixgb_desc_ring rx_ring;
- u64 hw_csum_rx_error;
- u64 hw_csum_rx_good;
- u32 rx_int_delay;
- bool rx_csum;
-
- /* OS defined structs */
- struct napi_struct napi;
- struct net_device *netdev;
- struct pci_dev *pdev;
-
- /* structs defined in ixgb_hw.h */
- struct ixgb_hw hw;
- u16 msg_enable;
- struct ixgb_hw_stats stats;
- u32 alloc_rx_buff_failed;
- bool have_msi;
- unsigned long flags;
-};
-
-enum ixgb_state_t {
- /* TBD
- __IXGB_TESTING,
- __IXGB_RESETTING,
- */
- __IXGB_DOWN
-};
-
-/* Exported from other modules */
-void ixgb_check_options(struct ixgb_adapter *adapter);
-void ixgb_set_ethtool_ops(struct net_device *netdev);
-extern char ixgb_driver_name[];
-
-void ixgb_set_speed_duplex(struct net_device *netdev);
-
-int ixgb_up(struct ixgb_adapter *adapter);
-void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
-void ixgb_reset(struct ixgb_adapter *adapter);
-int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
-int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
-void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
-void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
-void ixgb_update_stats(struct ixgb_adapter *adapter);
-
-
-#endif /* _IXGB_H_ */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
deleted file mode 100644
index 129286fc1634..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
+++ /dev/null
@@ -1,580 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include "ixgb_hw.h"
-#include "ixgb_ee.h"
-/* Local prototypes */
-static u16 ixgb_shift_in_bits(struct ixgb_hw *hw);
-
-static void ixgb_shift_out_bits(struct ixgb_hw *hw,
- u16 data,
- u16 count);
-static void ixgb_standby_eeprom(struct ixgb_hw *hw);
-
-static bool ixgb_wait_eeprom_command(struct ixgb_hw *hw);
-
-static void ixgb_cleanup_eeprom(struct ixgb_hw *hw);
-
-/******************************************************************************
- * Raises the EEPROM's clock input.
- *
- * hw - Struct containing variables accessed by shared code
- * eecd_reg - EECD's current value
- *****************************************************************************/
-static void
-ixgb_raise_clock(struct ixgb_hw *hw,
- u32 *eecd_reg)
-{
- /* Raise the clock input to the EEPROM (by setting the SK bit), and then
- * wait 50 microseconds.
- */
- *eecd_reg = *eecd_reg | IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, *eecd_reg);
- IXGB_WRITE_FLUSH(hw);
- udelay(50);
-}
-
-/******************************************************************************
- * Lowers the EEPROM's clock input.
- *
- * hw - Struct containing variables accessed by shared code
- * eecd_reg - EECD's current value
- *****************************************************************************/
-static void
-ixgb_lower_clock(struct ixgb_hw *hw,
- u32 *eecd_reg)
-{
- /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
- * wait 50 microseconds.
- */
- *eecd_reg = *eecd_reg & ~IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, *eecd_reg);
- IXGB_WRITE_FLUSH(hw);
- udelay(50);
-}
-
-/******************************************************************************
- * Shift data bits out to the EEPROM.
- *
- * hw - Struct containing variables accessed by shared code
- * data - data to send to the EEPROM
- * count - number of bits to shift out
- *****************************************************************************/
-static void
-ixgb_shift_out_bits(struct ixgb_hw *hw,
- u16 data,
- u16 count)
-{
- u32 eecd_reg;
- u32 mask;
-
- /* We need to shift "count" bits out to the EEPROM. So, value in the
- * "data" parameter will be shifted out to the EEPROM one bit at a time.
- * In order to do this, "data" must be broken down into bits.
- */
- mask = 0x01 << (count - 1);
- eecd_reg = IXGB_READ_REG(hw, EECD);
- eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
- do {
- /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
- * and then raising and then lowering the clock (the SK bit controls
- * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
- * by setting "DI" to "0" and then raising and then lowering the clock.
- */
- eecd_reg &= ~IXGB_EECD_DI;
-
- if (data & mask)
- eecd_reg |= IXGB_EECD_DI;
-
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- IXGB_WRITE_FLUSH(hw);
-
- udelay(50);
-
- ixgb_raise_clock(hw, &eecd_reg);
- ixgb_lower_clock(hw, &eecd_reg);
-
- mask = mask >> 1;
-
- } while (mask);
-
- /* We leave the "DI" bit set to "0" when we leave this routine. */
- eecd_reg &= ~IXGB_EECD_DI;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
-}
-
-/******************************************************************************
- * Shift data bits in from the EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static u16
-ixgb_shift_in_bits(struct ixgb_hw *hw)
-{
- u32 eecd_reg;
- u32 i;
- u16 data;
-
- /* In order to read a register from the EEPROM, we need to shift 16 bits
- * in from the EEPROM. Bits are "shifted in" by raising the clock input to
- * the EEPROM (setting the SK bit), and then reading the value of the "DO"
- * bit. During this "shifting in" process the "DI" bit should always be
- * clear..
- */
-
- eecd_reg = IXGB_READ_REG(hw, EECD);
-
- eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
- data = 0;
-
- for (i = 0; i < 16; i++) {
- data = data << 1;
- ixgb_raise_clock(hw, &eecd_reg);
-
- eecd_reg = IXGB_READ_REG(hw, EECD);
-
- eecd_reg &= ~(IXGB_EECD_DI);
- if (eecd_reg & IXGB_EECD_DO)
- data |= 1;
-
- ixgb_lower_clock(hw, &eecd_reg);
- }
-
- return data;
-}
-
-/******************************************************************************
- * Prepares EEPROM for access
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
- * function should be called before issuing a command to the EEPROM.
- *****************************************************************************/
-static void
-ixgb_setup_eeprom(struct ixgb_hw *hw)
-{
- u32 eecd_reg;
-
- eecd_reg = IXGB_READ_REG(hw, EECD);
-
- /* Clear SK and DI */
- eecd_reg &= ~(IXGB_EECD_SK | IXGB_EECD_DI);
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
-
- /* Set CS */
- eecd_reg |= IXGB_EECD_CS;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
-}
-
-/******************************************************************************
- * Returns EEPROM to a "standby" state
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void
-ixgb_standby_eeprom(struct ixgb_hw *hw)
-{
- u32 eecd_reg;
-
- eecd_reg = IXGB_READ_REG(hw, EECD);
-
- /* Deselect EEPROM */
- eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK);
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- IXGB_WRITE_FLUSH(hw);
- udelay(50);
-
- /* Clock high */
- eecd_reg |= IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- IXGB_WRITE_FLUSH(hw);
- udelay(50);
-
- /* Select EEPROM */
- eecd_reg |= IXGB_EECD_CS;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- IXGB_WRITE_FLUSH(hw);
- udelay(50);
-
- /* Clock low */
- eecd_reg &= ~IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- IXGB_WRITE_FLUSH(hw);
- udelay(50);
-}
-
-/******************************************************************************
- * Raises then lowers the EEPROM's clock pin
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void
-ixgb_clock_eeprom(struct ixgb_hw *hw)
-{
- u32 eecd_reg;
-
- eecd_reg = IXGB_READ_REG(hw, EECD);
-
- /* Rising edge of clock */
- eecd_reg |= IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- IXGB_WRITE_FLUSH(hw);
- udelay(50);
-
- /* Falling edge of clock */
- eecd_reg &= ~IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- IXGB_WRITE_FLUSH(hw);
- udelay(50);
-}
-
-/******************************************************************************
- * Terminates a command by lowering the EEPROM's chip select pin
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void
-ixgb_cleanup_eeprom(struct ixgb_hw *hw)
-{
- u32 eecd_reg;
-
- eecd_reg = IXGB_READ_REG(hw, EECD);
-
- eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_DI);
-
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
-
- ixgb_clock_eeprom(hw);
-}
-
-/******************************************************************************
- * Waits for the EEPROM to finish the current command.
- *
- * hw - Struct containing variables accessed by shared code
- *
- * The command is done when the EEPROM's data out pin goes high.
- *
- * Returns:
- * true: EEPROM data pin is high before timeout.
- * false: Time expired.
- *****************************************************************************/
-static bool
-ixgb_wait_eeprom_command(struct ixgb_hw *hw)
-{
- u32 eecd_reg;
- u32 i;
-
- /* Toggle the CS line. This in effect tells to EEPROM to actually execute
- * the command in question.
- */
- ixgb_standby_eeprom(hw);
-
- /* Now read DO repeatedly until is high (equal to '1'). The EEPROM will
- * signal that the command has been completed by raising the DO signal.
- * If DO does not go high in 10 milliseconds, then error out.
- */
- for (i = 0; i < 200; i++) {
- eecd_reg = IXGB_READ_REG(hw, EECD);
-
- if (eecd_reg & IXGB_EECD_DO)
- return true;
-
- udelay(50);
- }
- ASSERT(0);
- return false;
-}
-
-/******************************************************************************
- * Verifies that the EEPROM has a valid checksum
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Reads the first 64 16 bit words of the EEPROM and sums the values read.
- * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
- * valid.
- *
- * Returns:
- * true: Checksum is valid
- * false: Checksum is not valid.
- *****************************************************************************/
-bool
-ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
-{
- u16 checksum = 0;
- u16 i;
-
- for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
- checksum += ixgb_read_eeprom(hw, i);
-
- if (checksum == (u16) EEPROM_SUM)
- return true;
- else
- return false;
-}
-
-/******************************************************************************
- * Calculates the EEPROM checksum and writes it to the EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
- * Writes the difference to word offset 63 of the EEPROM.
- *****************************************************************************/
-void
-ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
-{
- u16 checksum = 0;
- u16 i;
-
- for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
- checksum += ixgb_read_eeprom(hw, i);
-
- checksum = (u16) EEPROM_SUM - checksum;
-
- ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum);
-}
-
-/******************************************************************************
- * Writes a 16 bit word to a given offset in the EEPROM.
- *
- * hw - Struct containing variables accessed by shared code
- * reg - offset within the EEPROM to be written to
- * data - 16 bit word to be written to the EEPROM
- *
- * If ixgb_update_eeprom_checksum is not called after this function, the
- * EEPROM will most likely contain an invalid checksum.
- *
- *****************************************************************************/
-void
-ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data)
-{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- /* Prepare the EEPROM for writing */
- ixgb_setup_eeprom(hw);
-
- /* Send the 9-bit EWEN (write enable) command to the EEPROM (5-bit opcode
- * plus 4-bit dummy). This puts the EEPROM into write/erase mode.
- */
- ixgb_shift_out_bits(hw, EEPROM_EWEN_OPCODE, 5);
- ixgb_shift_out_bits(hw, 0, 4);
-
- /* Prepare the EEPROM */
- ixgb_standby_eeprom(hw);
-
- /* Send the Write command (3-bit opcode + 6-bit addr) */
- ixgb_shift_out_bits(hw, EEPROM_WRITE_OPCODE, 3);
- ixgb_shift_out_bits(hw, offset, 6);
-
- /* Send the data */
- ixgb_shift_out_bits(hw, data, 16);
-
- ixgb_wait_eeprom_command(hw);
-
- /* Recover from write */
- ixgb_standby_eeprom(hw);
-
- /* Send the 9-bit EWDS (write disable) command to the EEPROM (5-bit
- * opcode plus 4-bit dummy). This takes the EEPROM out of write/erase
- * mode.
- */
- ixgb_shift_out_bits(hw, EEPROM_EWDS_OPCODE, 5);
- ixgb_shift_out_bits(hw, 0, 4);
-
- /* Done with writing */
- ixgb_cleanup_eeprom(hw);
-
- /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */
- ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
-}
-
-/******************************************************************************
- * Reads a 16 bit word from the EEPROM.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset of 16 bit word in the EEPROM to read
- *
- * Returns:
- * The 16-bit value read from the eeprom
- *****************************************************************************/
-u16
-ixgb_read_eeprom(struct ixgb_hw *hw,
- u16 offset)
-{
- u16 data;
-
- /* Prepare the EEPROM for reading */
- ixgb_setup_eeprom(hw);
-
- /* Send the READ command (opcode + addr) */
- ixgb_shift_out_bits(hw, EEPROM_READ_OPCODE, 3);
- /*
- * We have a 64 word EEPROM, there are 6 address bits
- */
- ixgb_shift_out_bits(hw, offset, 6);
-
- /* Read the data */
- data = ixgb_shift_in_bits(hw);
-
- /* End this read operation */
- ixgb_standby_eeprom(hw);
-
- return data;
-}
-
-/******************************************************************************
- * Reads eeprom and stores data in shared structure.
- * Validates eeprom checksum and eeprom signature.
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- * true: if eeprom read is successful
- * false: otherwise.
- *****************************************************************************/
-bool
-ixgb_get_eeprom_data(struct ixgb_hw *hw)
-{
- u16 i;
- u16 checksum = 0;
- struct ixgb_ee_map_type *ee_map;
-
- ENTER();
-
- ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- pr_debug("Reading eeprom data\n");
- for (i = 0; i < IXGB_EEPROM_SIZE ; i++) {
- u16 ee_data;
- ee_data = ixgb_read_eeprom(hw, i);
- checksum += ee_data;
- hw->eeprom[i] = cpu_to_le16(ee_data);
- }
-
- if (checksum != (u16) EEPROM_SUM) {
- pr_debug("Checksum invalid\n");
- /* clear the init_ctrl_reg_1 to signify that the cache is
- * invalidated */
- ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
- return false;
- }
-
- if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
- != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
- pr_debug("Signature invalid\n");
- return false;
- }
-
- return true;
-}
-
-/******************************************************************************
- * Local function to check if the eeprom signature is good
- * If the eeprom signature is good, calls ixgb)get_eeprom_data.
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- * true: eeprom signature was good and the eeprom read was successful
- * false: otherwise.
- ******************************************************************************/
-static bool
-ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
-{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
- == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
- return true;
- } else {
- return ixgb_get_eeprom_data(hw);
- }
-}
-
-/******************************************************************************
- * return a word from the eeprom
- *
- * hw - Struct containing variables accessed by shared code
- * index - Offset of eeprom word
- *
- * Returns:
- * Word at indexed offset in eeprom, if valid, 0 otherwise.
- ******************************************************************************/
-__le16
-ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index)
-{
-
- if (index < IXGB_EEPROM_SIZE && ixgb_check_and_get_eeprom_data(hw))
- return hw->eeprom[index];
-
- return 0;
-}
-
-/******************************************************************************
- * return the mac address from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- * mac_addr - Ethernet Address if EEPROM contents are valid, 0 otherwise
- *
- * Returns: None.
- ******************************************************************************/
-void
-ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
- u8 *mac_addr)
-{
- int i;
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- ENTER();
-
- if (ixgb_check_and_get_eeprom_data(hw)) {
- for (i = 0; i < ETH_ALEN; i++) {
- mac_addr[i] = ee_map->mac_addr[i];
- }
- pr_debug("eeprom mac address = %pM\n", mac_addr);
- }
-}
-
-
-/******************************************************************************
- * return the Printed Board Assembly number from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- * PBA number if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-u32
-ixgb_get_ee_pba_number(struct ixgb_hw *hw)
-{
- if (ixgb_check_and_get_eeprom_data(hw))
- return le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
- | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16);
-
- return 0;
-}
-
-
-/******************************************************************************
- * return the Device Id from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- * Device Id if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-u16
-ixgb_get_ee_device_id(struct ixgb_hw *hw)
-{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- if (ixgb_check_and_get_eeprom_data(hw))
- return le16_to_cpu(ee_map->device_id);
-
- return 0;
-}
-
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h
deleted file mode 100644
index 3ee0a09e5d0a..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-#ifndef _IXGB_EE_H_
-#define _IXGB_EE_H_
-
-#define IXGB_EEPROM_SIZE 64 /* Size in words */
-
-/* EEPROM Commands */
-#define EEPROM_READ_OPCODE 0x6 /* EEPROM read opcode */
-#define EEPROM_WRITE_OPCODE 0x5 /* EEPROM write opcode */
-#define EEPROM_ERASE_OPCODE 0x7 /* EEPROM erase opcode */
-#define EEPROM_EWEN_OPCODE 0x13 /* EEPROM erase/write enable */
-#define EEPROM_EWDS_OPCODE 0x10 /* EEPROM erase/write disable */
-
-/* EEPROM MAP (Word Offsets) */
-#define EEPROM_IA_1_2_REG 0x0000
-#define EEPROM_IA_3_4_REG 0x0001
-#define EEPROM_IA_5_6_REG 0x0002
-#define EEPROM_COMPATIBILITY_REG 0x0003
-#define EEPROM_PBA_1_2_REG 0x0008
-#define EEPROM_PBA_3_4_REG 0x0009
-#define EEPROM_INIT_CONTROL1_REG 0x000A
-#define EEPROM_SUBSYS_ID_REG 0x000B
-#define EEPROM_SUBVEND_ID_REG 0x000C
-#define EEPROM_DEVICE_ID_REG 0x000D
-#define EEPROM_VENDOR_ID_REG 0x000E
-#define EEPROM_INIT_CONTROL2_REG 0x000F
-#define EEPROM_SWDPINS_REG 0x0020
-#define EEPROM_CIRCUIT_CTRL_REG 0x0021
-#define EEPROM_D0_D3_POWER_REG 0x0022
-#define EEPROM_FLASH_VERSION 0x0032
-#define EEPROM_CHECKSUM_REG 0x003F
-
-/* Mask bits for fields in Word 0x0a of the EEPROM */
-
-#define EEPROM_ICW1_SIGNATURE_MASK 0xC000
-#define EEPROM_ICW1_SIGNATURE_VALID 0x4000
-#define EEPROM_ICW1_SIGNATURE_CLEAR 0x0000
-
-/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */
-#define EEPROM_SUM 0xBABA
-
-/* EEPROM Map Sizes (Byte Counts) */
-#define PBA_SIZE 4
-
-/* EEPROM Map defines (WORD OFFSETS)*/
-
-/* EEPROM structure */
-struct ixgb_ee_map_type {
- u8 mac_addr[ETH_ALEN];
- __le16 compatibility;
- __le16 reserved1[4];
- __le32 pba_number;
- __le16 init_ctrl_reg_1;
- __le16 subsystem_id;
- __le16 subvendor_id;
- __le16 device_id;
- __le16 vendor_id;
- __le16 init_ctrl_reg_2;
- __le16 oem_reserved[16];
- __le16 swdpins_reg;
- __le16 circuit_ctrl_reg;
- u8 d3_power;
- u8 d0_power;
- __le16 reserved2[28];
- __le16 checksum;
-};
-
-/* EEPROM Functions */
-u16 ixgb_read_eeprom(struct ixgb_hw *hw, u16 reg);
-
-bool ixgb_validate_eeprom_checksum(struct ixgb_hw *hw);
-
-void ixgb_update_eeprom_checksum(struct ixgb_hw *hw);
-
-void ixgb_write_eeprom(struct ixgb_hw *hw, u16 reg, u16 data);
-
-#endif /* IXGB_EE_H */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
deleted file mode 100644
index efa980514944..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ /dev/null
@@ -1,642 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-/* ethtool support for ixgb */
-
-#include "ixgb.h"
-
-#include <linux/uaccess.h>
-
-#define IXGB_ALL_RAR_ENTRIES 16
-
-enum {NETDEV_STATS, IXGB_STATS};
-
-struct ixgb_stats {
- char stat_string[ETH_GSTRING_LEN];
- int type;
- int sizeof_stat;
- int stat_offset;
-};
-
-#define IXGB_STAT(m) IXGB_STATS, \
- sizeof_field(struct ixgb_adapter, m), \
- offsetof(struct ixgb_adapter, m)
-#define IXGB_NETDEV_STAT(m) NETDEV_STATS, \
- sizeof_field(struct net_device, m), \
- offsetof(struct net_device, m)
-
-static struct ixgb_stats ixgb_gstrings_stats[] = {
- {"rx_packets", IXGB_NETDEV_STAT(stats.rx_packets)},
- {"tx_packets", IXGB_NETDEV_STAT(stats.tx_packets)},
- {"rx_bytes", IXGB_NETDEV_STAT(stats.rx_bytes)},
- {"tx_bytes", IXGB_NETDEV_STAT(stats.tx_bytes)},
- {"rx_errors", IXGB_NETDEV_STAT(stats.rx_errors)},
- {"tx_errors", IXGB_NETDEV_STAT(stats.tx_errors)},
- {"rx_dropped", IXGB_NETDEV_STAT(stats.rx_dropped)},
- {"tx_dropped", IXGB_NETDEV_STAT(stats.tx_dropped)},
- {"multicast", IXGB_NETDEV_STAT(stats.multicast)},
- {"collisions", IXGB_NETDEV_STAT(stats.collisions)},
-
-/* { "rx_length_errors", IXGB_NETDEV_STAT(stats.rx_length_errors) }, */
- {"rx_over_errors", IXGB_NETDEV_STAT(stats.rx_over_errors)},
- {"rx_crc_errors", IXGB_NETDEV_STAT(stats.rx_crc_errors)},
- {"rx_frame_errors", IXGB_NETDEV_STAT(stats.rx_frame_errors)},
- {"rx_no_buffer_count", IXGB_STAT(stats.rnbc)},
- {"rx_fifo_errors", IXGB_NETDEV_STAT(stats.rx_fifo_errors)},
- {"rx_missed_errors", IXGB_NETDEV_STAT(stats.rx_missed_errors)},
- {"tx_aborted_errors", IXGB_NETDEV_STAT(stats.tx_aborted_errors)},
- {"tx_carrier_errors", IXGB_NETDEV_STAT(stats.tx_carrier_errors)},
- {"tx_fifo_errors", IXGB_NETDEV_STAT(stats.tx_fifo_errors)},
- {"tx_heartbeat_errors", IXGB_NETDEV_STAT(stats.tx_heartbeat_errors)},
- {"tx_window_errors", IXGB_NETDEV_STAT(stats.tx_window_errors)},
- {"tx_deferred_ok", IXGB_STAT(stats.dc)},
- {"tx_timeout_count", IXGB_STAT(tx_timeout_count) },
- {"tx_restart_queue", IXGB_STAT(restart_queue) },
- {"rx_long_length_errors", IXGB_STAT(stats.roc)},
- {"rx_short_length_errors", IXGB_STAT(stats.ruc)},
- {"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)},
- {"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)},
- {"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)},
- {"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)},
- {"tx_flow_control_xon", IXGB_STAT(stats.xontxc)},
- {"tx_flow_control_xoff", IXGB_STAT(stats.xofftxc)},
- {"rx_csum_offload_good", IXGB_STAT(hw_csum_rx_good)},
- {"rx_csum_offload_errors", IXGB_STAT(hw_csum_rx_error)},
- {"tx_csum_offload_good", IXGB_STAT(hw_csum_tx_good)},
- {"tx_csum_offload_errors", IXGB_STAT(hw_csum_tx_error)}
-};
-
-#define IXGB_STATS_LEN ARRAY_SIZE(ixgb_gstrings_stats)
-
-static int
-ixgb_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
-
- cmd->base.port = PORT_FIBRE;
-
- if (netif_carrier_ok(adapter->netdev)) {
- cmd->base.speed = SPEED_10000;
- cmd->base.duplex = DUPLEX_FULL;
- } else {
- cmd->base.speed = SPEED_UNKNOWN;
- cmd->base.duplex = DUPLEX_UNKNOWN;
- }
-
- cmd->base.autoneg = AUTONEG_DISABLE;
- return 0;
-}
-
-void ixgb_set_speed_duplex(struct net_device *netdev)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- /* be optimistic about our link, since we were up before */
- adapter->link_speed = 10000;
- adapter->link_duplex = FULL_DUPLEX;
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
-}
-
-static int
-ixgb_set_link_ksettings(struct net_device *netdev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- u32 speed = cmd->base.speed;
-
- if (cmd->base.autoneg == AUTONEG_ENABLE ||
- (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
- return -EINVAL;
-
- if (netif_running(adapter->netdev)) {
- ixgb_down(adapter, true);
- ixgb_reset(adapter);
- ixgb_up(adapter);
- ixgb_set_speed_duplex(netdev);
- } else
- ixgb_reset(adapter);
-
- return 0;
-}
-
-static void
-ixgb_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_hw *hw = &adapter->hw;
-
- pause->autoneg = AUTONEG_DISABLE;
-
- if (hw->fc.type == ixgb_fc_rx_pause)
- pause->rx_pause = 1;
- else if (hw->fc.type == ixgb_fc_tx_pause)
- pause->tx_pause = 1;
- else if (hw->fc.type == ixgb_fc_full) {
- pause->rx_pause = 1;
- pause->tx_pause = 1;
- }
-}
-
-static int
-ixgb_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_hw *hw = &adapter->hw;
-
- if (pause->autoneg == AUTONEG_ENABLE)
- return -EINVAL;
-
- if (pause->rx_pause && pause->tx_pause)
- hw->fc.type = ixgb_fc_full;
- else if (pause->rx_pause && !pause->tx_pause)
- hw->fc.type = ixgb_fc_rx_pause;
- else if (!pause->rx_pause && pause->tx_pause)
- hw->fc.type = ixgb_fc_tx_pause;
- else if (!pause->rx_pause && !pause->tx_pause)
- hw->fc.type = ixgb_fc_none;
-
- if (netif_running(adapter->netdev)) {
- ixgb_down(adapter, true);
- ixgb_up(adapter);
- ixgb_set_speed_duplex(netdev);
- } else
- ixgb_reset(adapter);
-
- return 0;
-}
-
-static u32
-ixgb_get_msglevel(struct net_device *netdev)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- return adapter->msg_enable;
-}
-
-static void
-ixgb_set_msglevel(struct net_device *netdev, u32 data)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- adapter->msg_enable = data;
-}
-#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_
-
-static int
-ixgb_get_regs_len(struct net_device *netdev)
-{
-#define IXGB_REG_DUMP_LEN 136*sizeof(u32)
- return IXGB_REG_DUMP_LEN;
-}
-
-static void
-ixgb_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *p)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_hw *hw = &adapter->hw;
- u32 *reg = p;
- u32 *reg_start = reg;
- u8 i;
-
- /* the 1 (one) below indicates an attempt at versioning, if the
- * interface in ethtool or the driver changes, this 1 should be
- * incremented */
- regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id;
-
- /* General Registers */
- *reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */
- *reg++ = IXGB_READ_REG(hw, CTRL1); /* 1 */
- *reg++ = IXGB_READ_REG(hw, STATUS); /* 2 */
- *reg++ = IXGB_READ_REG(hw, EECD); /* 3 */
- *reg++ = IXGB_READ_REG(hw, MFS); /* 4 */
-
- /* Interrupt */
- *reg++ = IXGB_READ_REG(hw, ICR); /* 5 */
- *reg++ = IXGB_READ_REG(hw, ICS); /* 6 */
- *reg++ = IXGB_READ_REG(hw, IMS); /* 7 */
- *reg++ = IXGB_READ_REG(hw, IMC); /* 8 */
-
- /* Receive */
- *reg++ = IXGB_READ_REG(hw, RCTL); /* 9 */
- *reg++ = IXGB_READ_REG(hw, FCRTL); /* 10 */
- *reg++ = IXGB_READ_REG(hw, FCRTH); /* 11 */
- *reg++ = IXGB_READ_REG(hw, RDBAL); /* 12 */
- *reg++ = IXGB_READ_REG(hw, RDBAH); /* 13 */
- *reg++ = IXGB_READ_REG(hw, RDLEN); /* 14 */
- *reg++ = IXGB_READ_REG(hw, RDH); /* 15 */
- *reg++ = IXGB_READ_REG(hw, RDT); /* 16 */
- *reg++ = IXGB_READ_REG(hw, RDTR); /* 17 */
- *reg++ = IXGB_READ_REG(hw, RXDCTL); /* 18 */
- *reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */
- *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
-
- /* there are 16 RAR entries in hardware, we only use 3 */
- for (i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) {
- *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
- *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
- }
-
- /* Transmit */
- *reg++ = IXGB_READ_REG(hw, TCTL); /* 53 */
- *reg++ = IXGB_READ_REG(hw, TDBAL); /* 54 */
- *reg++ = IXGB_READ_REG(hw, TDBAH); /* 55 */
- *reg++ = IXGB_READ_REG(hw, TDLEN); /* 56 */
- *reg++ = IXGB_READ_REG(hw, TDH); /* 57 */
- *reg++ = IXGB_READ_REG(hw, TDT); /* 58 */
- *reg++ = IXGB_READ_REG(hw, TIDV); /* 59 */
- *reg++ = IXGB_READ_REG(hw, TXDCTL); /* 60 */
- *reg++ = IXGB_READ_REG(hw, TSPMT); /* 61 */
- *reg++ = IXGB_READ_REG(hw, PAP); /* 62 */
-
- /* Physical */
- *reg++ = IXGB_READ_REG(hw, PCSC1); /* 63 */
- *reg++ = IXGB_READ_REG(hw, PCSC2); /* 64 */
- *reg++ = IXGB_READ_REG(hw, PCSS1); /* 65 */
- *reg++ = IXGB_READ_REG(hw, PCSS2); /* 66 */
- *reg++ = IXGB_READ_REG(hw, XPCSS); /* 67 */
- *reg++ = IXGB_READ_REG(hw, UCCR); /* 68 */
- *reg++ = IXGB_READ_REG(hw, XPCSTC); /* 69 */
- *reg++ = IXGB_READ_REG(hw, MACA); /* 70 */
- *reg++ = IXGB_READ_REG(hw, APAE); /* 71 */
- *reg++ = IXGB_READ_REG(hw, ARD); /* 72 */
- *reg++ = IXGB_READ_REG(hw, AIS); /* 73 */
- *reg++ = IXGB_READ_REG(hw, MSCA); /* 74 */
- *reg++ = IXGB_READ_REG(hw, MSRWD); /* 75 */
-
- /* Statistics */
- *reg++ = IXGB_GET_STAT(adapter, tprl); /* 76 */
- *reg++ = IXGB_GET_STAT(adapter, tprh); /* 77 */
- *reg++ = IXGB_GET_STAT(adapter, gprcl); /* 78 */
- *reg++ = IXGB_GET_STAT(adapter, gprch); /* 79 */
- *reg++ = IXGB_GET_STAT(adapter, bprcl); /* 80 */
- *reg++ = IXGB_GET_STAT(adapter, bprch); /* 81 */
- *reg++ = IXGB_GET_STAT(adapter, mprcl); /* 82 */
- *reg++ = IXGB_GET_STAT(adapter, mprch); /* 83 */
- *reg++ = IXGB_GET_STAT(adapter, uprcl); /* 84 */
- *reg++ = IXGB_GET_STAT(adapter, uprch); /* 85 */
- *reg++ = IXGB_GET_STAT(adapter, vprcl); /* 86 */
- *reg++ = IXGB_GET_STAT(adapter, vprch); /* 87 */
- *reg++ = IXGB_GET_STAT(adapter, jprcl); /* 88 */
- *reg++ = IXGB_GET_STAT(adapter, jprch); /* 89 */
- *reg++ = IXGB_GET_STAT(adapter, gorcl); /* 90 */
- *reg++ = IXGB_GET_STAT(adapter, gorch); /* 91 */
- *reg++ = IXGB_GET_STAT(adapter, torl); /* 92 */
- *reg++ = IXGB_GET_STAT(adapter, torh); /* 93 */
- *reg++ = IXGB_GET_STAT(adapter, rnbc); /* 94 */
- *reg++ = IXGB_GET_STAT(adapter, ruc); /* 95 */
- *reg++ = IXGB_GET_STAT(adapter, roc); /* 96 */
- *reg++ = IXGB_GET_STAT(adapter, rlec); /* 97 */
- *reg++ = IXGB_GET_STAT(adapter, crcerrs); /* 98 */
- *reg++ = IXGB_GET_STAT(adapter, icbc); /* 99 */
- *reg++ = IXGB_GET_STAT(adapter, ecbc); /* 100 */
- *reg++ = IXGB_GET_STAT(adapter, mpc); /* 101 */
- *reg++ = IXGB_GET_STAT(adapter, tptl); /* 102 */
- *reg++ = IXGB_GET_STAT(adapter, tpth); /* 103 */
- *reg++ = IXGB_GET_STAT(adapter, gptcl); /* 104 */
- *reg++ = IXGB_GET_STAT(adapter, gptch); /* 105 */
- *reg++ = IXGB_GET_STAT(adapter, bptcl); /* 106 */
- *reg++ = IXGB_GET_STAT(adapter, bptch); /* 107 */
- *reg++ = IXGB_GET_STAT(adapter, mptcl); /* 108 */
- *reg++ = IXGB_GET_STAT(adapter, mptch); /* 109 */
- *reg++ = IXGB_GET_STAT(adapter, uptcl); /* 110 */
- *reg++ = IXGB_GET_STAT(adapter, uptch); /* 111 */
- *reg++ = IXGB_GET_STAT(adapter, vptcl); /* 112 */
- *reg++ = IXGB_GET_STAT(adapter, vptch); /* 113 */
- *reg++ = IXGB_GET_STAT(adapter, jptcl); /* 114 */
- *reg++ = IXGB_GET_STAT(adapter, jptch); /* 115 */
- *reg++ = IXGB_GET_STAT(adapter, gotcl); /* 116 */
- *reg++ = IXGB_GET_STAT(adapter, gotch); /* 117 */
- *reg++ = IXGB_GET_STAT(adapter, totl); /* 118 */
- *reg++ = IXGB_GET_STAT(adapter, toth); /* 119 */
- *reg++ = IXGB_GET_STAT(adapter, dc); /* 120 */
- *reg++ = IXGB_GET_STAT(adapter, plt64c); /* 121 */
- *reg++ = IXGB_GET_STAT(adapter, tsctc); /* 122 */
- *reg++ = IXGB_GET_STAT(adapter, tsctfc); /* 123 */
- *reg++ = IXGB_GET_STAT(adapter, ibic); /* 124 */
- *reg++ = IXGB_GET_STAT(adapter, rfc); /* 125 */
- *reg++ = IXGB_GET_STAT(adapter, lfc); /* 126 */
- *reg++ = IXGB_GET_STAT(adapter, pfrc); /* 127 */
- *reg++ = IXGB_GET_STAT(adapter, pftc); /* 128 */
- *reg++ = IXGB_GET_STAT(adapter, mcfrc); /* 129 */
- *reg++ = IXGB_GET_STAT(adapter, mcftc); /* 130 */
- *reg++ = IXGB_GET_STAT(adapter, xonrxc); /* 131 */
- *reg++ = IXGB_GET_STAT(adapter, xontxc); /* 132 */
- *reg++ = IXGB_GET_STAT(adapter, xoffrxc); /* 133 */
- *reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */
- *reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */
-
- regs->len = (reg - reg_start) * sizeof(u32);
-}
-
-static int
-ixgb_get_eeprom_len(struct net_device *netdev)
-{
- /* return size in bytes */
- return IXGB_EEPROM_SIZE << 1;
-}
-
-static int
-ixgb_get_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_hw *hw = &adapter->hw;
- __le16 *eeprom_buff;
- int i, max_len, first_word, last_word;
- int ret_val = 0;
-
- if (eeprom->len == 0) {
- ret_val = -EINVAL;
- goto geeprom_error;
- }
-
- eeprom->magic = hw->vendor_id | (hw->device_id << 16);
-
- max_len = ixgb_get_eeprom_len(netdev);
-
- if (eeprom->offset > eeprom->offset + eeprom->len) {
- ret_val = -EINVAL;
- goto geeprom_error;
- }
-
- if ((eeprom->offset + eeprom->len) > max_len)
- eeprom->len = (max_len - eeprom->offset);
-
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
-
- eeprom_buff = kmalloc_array(last_word - first_word + 1,
- sizeof(__le16),
- GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
-
- /* note the eeprom was good because the driver loaded */
- for (i = 0; i <= (last_word - first_word); i++)
- eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i));
-
- memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
- kfree(eeprom_buff);
-
-geeprom_error:
- return ret_val;
-}
-
-static int
-ixgb_set_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_hw *hw = &adapter->hw;
- u16 *eeprom_buff;
- void *ptr;
- int max_len, first_word, last_word;
- u16 i;
-
- if (eeprom->len == 0)
- return -EINVAL;
-
- if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
- return -EFAULT;
-
- max_len = ixgb_get_eeprom_len(netdev);
-
- if (eeprom->offset > eeprom->offset + eeprom->len)
- return -EINVAL;
-
- if ((eeprom->offset + eeprom->len) > max_len)
- eeprom->len = (max_len - eeprom->offset);
-
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_buff = kmalloc(max_len, GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
-
- ptr = (void *)eeprom_buff;
-
- if (eeprom->offset & 1) {
- /* need read/modify/write of first changed EEPROM word */
- /* only the second byte of the word is being modified */
- eeprom_buff[0] = ixgb_read_eeprom(hw, first_word);
- ptr++;
- }
- if ((eeprom->offset + eeprom->len) & 1) {
- /* need read/modify/write of last changed EEPROM word */
- /* only the first byte of the word is being modified */
- eeprom_buff[last_word - first_word]
- = ixgb_read_eeprom(hw, last_word);
- }
-
- memcpy(ptr, bytes, eeprom->len);
- for (i = 0; i <= (last_word - first_word); i++)
- ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]);
-
- /* Update the checksum over the first part of the EEPROM if needed */
- if (first_word <= EEPROM_CHECKSUM_REG)
- ixgb_update_eeprom_checksum(hw);
-
- kfree(eeprom_buff);
- return 0;
-}
-
-static void
-ixgb_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- strscpy(drvinfo->driver, ixgb_driver_name,
- sizeof(drvinfo->driver));
- strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info));
-}
-
-static void
-ixgb_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring,
- struct kernel_ethtool_ringparam *kernel_ring,
- struct netlink_ext_ack *extack)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_desc_ring *txdr = &adapter->tx_ring;
- struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
-
- ring->rx_max_pending = MAX_RXD;
- ring->tx_max_pending = MAX_TXD;
- ring->rx_pending = rxdr->count;
- ring->tx_pending = txdr->count;
-}
-
-static int
-ixgb_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring,
- struct kernel_ethtool_ringparam *kernel_ring,
- struct netlink_ext_ack *extack)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_desc_ring *txdr = &adapter->tx_ring;
- struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
- struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new;
- int err;
-
- tx_old = adapter->tx_ring;
- rx_old = adapter->rx_ring;
-
- if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
- return -EINVAL;
-
- if (netif_running(adapter->netdev))
- ixgb_down(adapter, true);
-
- rxdr->count = max(ring->rx_pending,(u32)MIN_RXD);
- rxdr->count = min(rxdr->count,(u32)MAX_RXD);
- rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
-
- txdr->count = max(ring->tx_pending,(u32)MIN_TXD);
- txdr->count = min(txdr->count,(u32)MAX_TXD);
- txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
-
- if (netif_running(adapter->netdev)) {
- /* Try to get new resources before deleting old */
- if ((err = ixgb_setup_rx_resources(adapter)))
- goto err_setup_rx;
- if ((err = ixgb_setup_tx_resources(adapter)))
- goto err_setup_tx;
-
- /* save the new, restore the old in order to free it,
- * then restore the new back again */
-
- rx_new = adapter->rx_ring;
- tx_new = adapter->tx_ring;
- adapter->rx_ring = rx_old;
- adapter->tx_ring = tx_old;
- ixgb_free_rx_resources(adapter);
- ixgb_free_tx_resources(adapter);
- adapter->rx_ring = rx_new;
- adapter->tx_ring = tx_new;
- if ((err = ixgb_up(adapter)))
- return err;
- ixgb_set_speed_duplex(netdev);
- }
-
- return 0;
-err_setup_tx:
- ixgb_free_rx_resources(adapter);
-err_setup_rx:
- adapter->rx_ring = rx_old;
- adapter->tx_ring = tx_old;
- ixgb_up(adapter);
- return err;
-}
-
-static int
-ixgb_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- return 2;
-
- case ETHTOOL_ID_ON:
- ixgb_led_on(&adapter->hw);
- break;
-
- case ETHTOOL_ID_OFF:
- case ETHTOOL_ID_INACTIVE:
- ixgb_led_off(&adapter->hw);
- }
-
- return 0;
-}
-
-static int
-ixgb_get_sset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return IXGB_STATS_LEN;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void
-ixgb_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- int i;
- char *p = NULL;
-
- ixgb_update_stats(adapter);
- for (i = 0; i < IXGB_STATS_LEN; i++) {
- switch (ixgb_gstrings_stats[i].type) {
- case NETDEV_STATS:
- p = (char *) netdev +
- ixgb_gstrings_stats[i].stat_offset;
- break;
- case IXGB_STATS:
- p = (char *) adapter +
- ixgb_gstrings_stats[i].stat_offset;
- break;
- }
-
- data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
-}
-
-static void
-ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
-{
- int i;
-
- switch(stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < IXGB_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- ixgb_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- }
- break;
- }
-}
-
-static const struct ethtool_ops ixgb_ethtool_ops = {
- .get_drvinfo = ixgb_get_drvinfo,
- .get_regs_len = ixgb_get_regs_len,
- .get_regs = ixgb_get_regs,
- .get_link = ethtool_op_get_link,
- .get_eeprom_len = ixgb_get_eeprom_len,
- .get_eeprom = ixgb_get_eeprom,
- .set_eeprom = ixgb_set_eeprom,
- .get_ringparam = ixgb_get_ringparam,
- .set_ringparam = ixgb_set_ringparam,
- .get_pauseparam = ixgb_get_pauseparam,
- .set_pauseparam = ixgb_set_pauseparam,
- .get_msglevel = ixgb_get_msglevel,
- .set_msglevel = ixgb_set_msglevel,
- .get_strings = ixgb_get_strings,
- .set_phys_id = ixgb_set_phys_id,
- .get_sset_count = ixgb_get_sset_count,
- .get_ethtool_stats = ixgb_get_ethtool_stats,
- .get_link_ksettings = ixgb_get_link_ksettings,
- .set_link_ksettings = ixgb_set_link_ksettings,
-};
-
-void ixgb_set_ethtool_ops(struct net_device *netdev)
-{
- netdev->ethtool_ops = &ixgb_ethtool_ops;
-}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
deleted file mode 100644
index 98bd3267b99b..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ /dev/null
@@ -1,1229 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-/* ixgb_hw.c
- * Shared functions for accessing and configuring the adapter
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci_ids.h>
-#include "ixgb_hw.h"
-#include "ixgb_ids.h"
-
-#include <linux/etherdevice.h>
-
-/* Local function prototypes */
-
-static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr);
-
-static void ixgb_mta_set(struct ixgb_hw *hw, u32 hash_value);
-
-static void ixgb_get_bus_info(struct ixgb_hw *hw);
-
-static bool ixgb_link_reset(struct ixgb_hw *hw);
-
-static void ixgb_optics_reset(struct ixgb_hw *hw);
-
-static void ixgb_optics_reset_bcm(struct ixgb_hw *hw);
-
-static ixgb_phy_type ixgb_identify_phy(struct ixgb_hw *hw);
-
-static void ixgb_clear_hw_cntrs(struct ixgb_hw *hw);
-
-static void ixgb_clear_vfta(struct ixgb_hw *hw);
-
-static void ixgb_init_rx_addrs(struct ixgb_hw *hw);
-
-static u16 ixgb_read_phy_reg(struct ixgb_hw *hw,
- u32 reg_address,
- u32 phy_address,
- u32 device_type);
-
-static bool ixgb_setup_fc(struct ixgb_hw *hw);
-
-static bool mac_addr_valid(u8 *mac_addr);
-
-static u32 ixgb_mac_reset(struct ixgb_hw *hw)
-{
- u32 ctrl_reg;
-
- ctrl_reg = IXGB_CTRL0_RST |
- IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */
- IXGB_CTRL0_SDP2_DIR |
- IXGB_CTRL0_SDP1_DIR |
- IXGB_CTRL0_SDP0_DIR |
- IXGB_CTRL0_SDP3 | /* Initial value 1101 */
- IXGB_CTRL0_SDP2 |
- IXGB_CTRL0_SDP0;
-
-#ifdef HP_ZX1
- /* Workaround for 82597EX reset errata */
- IXGB_WRITE_REG_IO(hw, CTRL0, ctrl_reg);
-#else
- IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
-#endif
-
- /* Delay a few ms just to allow the reset to complete */
- msleep(IXGB_DELAY_AFTER_RESET);
- ctrl_reg = IXGB_READ_REG(hw, CTRL0);
-#ifdef DBG
- /* Make sure the self-clearing global reset bit did self clear */
- ASSERT(!(ctrl_reg & IXGB_CTRL0_RST));
-#endif
-
- if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN) {
- ctrl_reg = /* Enable interrupt from XFP and SerDes */
- IXGB_CTRL1_GPI0_EN |
- IXGB_CTRL1_SDP6_DIR |
- IXGB_CTRL1_SDP7_DIR |
- IXGB_CTRL1_SDP6 |
- IXGB_CTRL1_SDP7;
- IXGB_WRITE_REG(hw, CTRL1, ctrl_reg);
- ixgb_optics_reset_bcm(hw);
- }
-
- if (hw->phy_type == ixgb_phy_type_txn17401)
- ixgb_optics_reset(hw);
-
- return ctrl_reg;
-}
-
-/******************************************************************************
- * Reset the transmit and receive units; mask and clear all interrupts.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-bool
-ixgb_adapter_stop(struct ixgb_hw *hw)
-{
- u32 ctrl_reg;
-
- ENTER();
-
- /* If we are stopped or resetting exit gracefully and wait to be
- * started again before accessing the hardware.
- */
- if (hw->adapter_stopped) {
- pr_debug("Exiting because the adapter is already stopped!!!\n");
- return false;
- }
-
- /* Set the Adapter Stopped flag so other driver functions stop
- * touching the Hardware.
- */
- hw->adapter_stopped = true;
-
- /* Clear interrupt mask to stop board from generating interrupts */
- pr_debug("Masking off all interrupts\n");
- IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF);
-
- /* Disable the Transmit and Receive units. Then delay to allow
- * any pending transactions to complete before we hit the MAC with
- * the global reset.
- */
- IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN);
- IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN);
- IXGB_WRITE_FLUSH(hw);
- msleep(IXGB_DELAY_BEFORE_RESET);
-
- /* Issue a global reset to the MAC. This will reset the chip's
- * transmit, receive, DMA, and link units. It will not effect
- * the current PCI configuration. The global reset bit is self-
- * clearing, and should clear within a microsecond.
- */
- pr_debug("Issuing a global reset to MAC\n");
-
- ctrl_reg = ixgb_mac_reset(hw);
-
- /* Clear interrupt mask to stop board from generating interrupts */
- pr_debug("Masking off all interrupts\n");
- IXGB_WRITE_REG(hw, IMC, 0xffffffff);
-
- /* Clear any pending interrupt events. */
- IXGB_READ_REG(hw, ICR);
-
- return ctrl_reg & IXGB_CTRL0_RST;
-}
-
-
-/******************************************************************************
- * Identifies the vendor of the optics module on the adapter. The SR adapters
- * support two different types of XPAK optics, so it is necessary to determine
- * which optics are present before applying any optics-specific workarounds.
- *
- * hw - Struct containing variables accessed by shared code.
- *
- * Returns: the vendor of the XPAK optics module.
- *****************************************************************************/
-static ixgb_xpak_vendor
-ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
-{
- u32 i;
- u16 vendor_name[5];
- ixgb_xpak_vendor xpak_vendor;
-
- ENTER();
-
- /* Read the first few bytes of the vendor string from the XPAK NVR
- * registers. These are standard XENPAK/XPAK registers, so all XPAK
- * devices should implement them. */
- for (i = 0; i < 5; i++) {
- vendor_name[i] = ixgb_read_phy_reg(hw,
- MDIO_PMA_PMD_XPAK_VENDOR_NAME
- + i, IXGB_PHY_ADDRESS,
- MDIO_MMD_PMAPMD);
- }
-
- /* Determine the actual vendor */
- if (vendor_name[0] == 'I' &&
- vendor_name[1] == 'N' &&
- vendor_name[2] == 'T' &&
- vendor_name[3] == 'E' && vendor_name[4] == 'L') {
- xpak_vendor = ixgb_xpak_vendor_intel;
- } else {
- xpak_vendor = ixgb_xpak_vendor_infineon;
- }
-
- return xpak_vendor;
-}
-
-/******************************************************************************
- * Determine the physical layer module on the adapter.
- *
- * hw - Struct containing variables accessed by shared code. The device_id
- * field must be (correctly) populated before calling this routine.
- *
- * Returns: the phy type of the adapter.
- *****************************************************************************/
-static ixgb_phy_type
-ixgb_identify_phy(struct ixgb_hw *hw)
-{
- ixgb_phy_type phy_type;
- ixgb_xpak_vendor xpak_vendor;
-
- ENTER();
-
- /* Infer the transceiver/phy type from the device id */
- switch (hw->device_id) {
- case IXGB_DEVICE_ID_82597EX:
- pr_debug("Identified TXN17401 optics\n");
- phy_type = ixgb_phy_type_txn17401;
- break;
-
- case IXGB_DEVICE_ID_82597EX_SR:
- /* The SR adapters carry two different types of XPAK optics
- * modules; read the vendor identifier to determine the exact
- * type of optics. */
- xpak_vendor = ixgb_identify_xpak_vendor(hw);
- if (xpak_vendor == ixgb_xpak_vendor_intel) {
- pr_debug("Identified TXN17201 optics\n");
- phy_type = ixgb_phy_type_txn17201;
- } else {
- pr_debug("Identified G6005 optics\n");
- phy_type = ixgb_phy_type_g6005;
- }
- break;
- case IXGB_DEVICE_ID_82597EX_LR:
- pr_debug("Identified G6104 optics\n");
- phy_type = ixgb_phy_type_g6104;
- break;
- case IXGB_DEVICE_ID_82597EX_CX4:
- pr_debug("Identified CX4\n");
- xpak_vendor = ixgb_identify_xpak_vendor(hw);
- if (xpak_vendor == ixgb_xpak_vendor_intel) {
- pr_debug("Identified TXN17201 optics\n");
- phy_type = ixgb_phy_type_txn17201;
- } else {
- pr_debug("Identified G6005 optics\n");
- phy_type = ixgb_phy_type_g6005;
- }
- break;
- default:
- pr_debug("Unknown physical layer module\n");
- phy_type = ixgb_phy_type_unknown;
- break;
- }
-
- /* update phy type for sun specific board */
- if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN)
- phy_type = ixgb_phy_type_bcm;
-
- return phy_type;
-}
-
-/******************************************************************************
- * Performs basic configuration of the adapter.
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Resets the controller.
- * Reads and validates the EEPROM.
- * Initializes the receive address registers.
- * Initializes the multicast table.
- * Clears all on-chip counters.
- * Calls routine to setup flow control settings.
- * Leaves the transmit and receive units disabled and uninitialized.
- *
- * Returns:
- * true if successful,
- * false if unrecoverable problems were encountered.
- *****************************************************************************/
-bool
-ixgb_init_hw(struct ixgb_hw *hw)
-{
- u32 i;
- bool status;
-
- ENTER();
-
- /* Issue a global reset to the MAC. This will reset the chip's
- * transmit, receive, DMA, and link units. It will not effect
- * the current PCI configuration. The global reset bit is self-
- * clearing, and should clear within a microsecond.
- */
- pr_debug("Issuing a global reset to MAC\n");
-
- ixgb_mac_reset(hw);
-
- pr_debug("Issuing an EE reset to MAC\n");
-#ifdef HP_ZX1
- /* Workaround for 82597EX reset errata */
- IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST);
-#else
- IXGB_WRITE_REG(hw, CTRL1, IXGB_CTRL1_EE_RST);
-#endif
-
- /* Delay a few ms just to allow the reset to complete */
- msleep(IXGB_DELAY_AFTER_EE_RESET);
-
- if (!ixgb_get_eeprom_data(hw))
- return false;
-
- /* Use the device id to determine the type of phy/transceiver. */
- hw->device_id = ixgb_get_ee_device_id(hw);
- hw->phy_type = ixgb_identify_phy(hw);
-
- /* Setup the receive addresses.
- * Receive Address Registers (RARs 0 - 15).
- */
- ixgb_init_rx_addrs(hw);
-
- /*
- * Check that a valid MAC address has been set.
- * If it is not valid, we fail hardware init.
- */
- if (!mac_addr_valid(hw->curr_mac_addr)) {
- pr_debug("MAC address invalid after ixgb_init_rx_addrs\n");
- return(false);
- }
-
- /* tell the routines in this file they can access hardware again */
- hw->adapter_stopped = false;
-
- /* Fill in the bus_info structure */
- ixgb_get_bus_info(hw);
-
- /* Zero out the Multicast HASH table */
- pr_debug("Zeroing the MTA\n");
- for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
- IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
-
- /* Zero out the VLAN Filter Table Array */
- ixgb_clear_vfta(hw);
-
- /* Zero all of the hardware counters */
- ixgb_clear_hw_cntrs(hw);
-
- /* Call a subroutine to setup flow control. */
- status = ixgb_setup_fc(hw);
-
- /* 82597EX errata: Call check-for-link in case lane deskew is locked */
- ixgb_check_for_link(hw);
-
- return status;
-}
-
-/******************************************************************************
- * Initializes receive address filters.
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Places the MAC address in receive address register 0 and clears the rest
- * of the receive address registers. Clears the multicast table. Assumes
- * the receiver is in reset when the routine is called.
- *****************************************************************************/
-static void
-ixgb_init_rx_addrs(struct ixgb_hw *hw)
-{
- u32 i;
-
- ENTER();
-
- /*
- * If the current mac address is valid, assume it is a software override
- * to the permanent address.
- * Otherwise, use the permanent address from the eeprom.
- */
- if (!mac_addr_valid(hw->curr_mac_addr)) {
-
- /* Get the MAC address from the eeprom for later reference */
- ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr);
-
- pr_debug("Keeping Permanent MAC Addr = %pM\n",
- hw->curr_mac_addr);
- } else {
-
- /* Setup the receive address. */
- pr_debug("Overriding MAC Address in RAR[0]\n");
- pr_debug("New MAC Addr = %pM\n", hw->curr_mac_addr);
-
- ixgb_rar_set(hw, hw->curr_mac_addr, 0);
- }
-
- /* Zero out the other 15 receive addresses. */
- pr_debug("Clearing RAR[1-15]\n");
- for (i = 1; i < IXGB_RAR_ENTRIES; i++) {
- /* Write high reg first to disable the AV bit first */
- IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
- IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
- }
-}
-
-/******************************************************************************
- * Updates the MAC's list of multicast addresses.
- *
- * hw - Struct containing variables accessed by shared code
- * mc_addr_list - the list of new multicast addresses
- * mc_addr_count - number of addresses
- * pad - number of bytes between addresses in the list
- *
- * The given list replaces any existing list. Clears the last 15 receive
- * address registers and the multicast table. Uses receive address registers
- * for the first 15 multicast addresses, and hashes the rest into the
- * multicast table.
- *****************************************************************************/
-void
-ixgb_mc_addr_list_update(struct ixgb_hw *hw,
- u8 *mc_addr_list,
- u32 mc_addr_count,
- u32 pad)
-{
- u32 hash_value;
- u32 i;
- u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */
- u8 *mca;
-
- ENTER();
-
- /* Set the new number of MC addresses that we are being requested to use. */
- hw->num_mc_addrs = mc_addr_count;
-
- /* Clear RAR[1-15] */
- pr_debug("Clearing RAR[1-15]\n");
- for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
- IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
- IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
- }
-
- /* Clear the MTA */
- pr_debug("Clearing MTA\n");
- for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
- IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
-
- /* Add the new addresses */
- mca = mc_addr_list;
- for (i = 0; i < mc_addr_count; i++) {
- pr_debug("Adding the multicast addresses:\n");
- pr_debug("MC Addr #%d = %pM\n", i, mca);
-
- /* Place this multicast address in the RAR if there is room, *
- * else put it in the MTA
- */
- if (rar_used_count < IXGB_RAR_ENTRIES) {
- ixgb_rar_set(hw, mca, rar_used_count);
- pr_debug("Added a multicast address to RAR[%d]\n", i);
- rar_used_count++;
- } else {
- hash_value = ixgb_hash_mc_addr(hw, mca);
-
- pr_debug("Hash value = 0x%03X\n", hash_value);
-
- ixgb_mta_set(hw, hash_value);
- }
-
- mca += ETH_ALEN + pad;
- }
-
- pr_debug("MC Update Complete\n");
-}
-
-/******************************************************************************
- * Hashes an address to determine its location in the multicast table
- *
- * hw - Struct containing variables accessed by shared code
- * mc_addr - the multicast address to hash
- *
- * Returns:
- * The hash value
- *****************************************************************************/
-static u32
-ixgb_hash_mc_addr(struct ixgb_hw *hw,
- u8 *mc_addr)
-{
- u32 hash_value = 0;
-
- ENTER();
-
- /* The portion of the address that is used for the hash table is
- * determined by the mc_filter_type setting.
- */
- switch (hw->mc_filter_type) {
- /* [0] [1] [2] [3] [4] [5]
- * 01 AA 00 12 34 56
- * LSB MSB - According to H/W docs */
- case 0:
- /* [47:36] i.e. 0x563 for above example address */
- hash_value =
- ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
- break;
- case 1: /* [46:35] i.e. 0xAC6 for above example address */
- hash_value =
- ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
- break;
- case 2: /* [45:34] i.e. 0x5D8 for above example address */
- hash_value =
- ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
- break;
- case 3: /* [43:32] i.e. 0x634 for above example address */
- hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
- break;
- default:
- /* Invalid mc_filter_type, what should we do? */
- pr_debug("MC filter type param set incorrectly\n");
- ASSERT(0);
- break;
- }
-
- hash_value &= 0xFFF;
- return hash_value;
-}
-
-/******************************************************************************
- * Sets the bit in the multicast table corresponding to the hash value.
- *
- * hw - Struct containing variables accessed by shared code
- * hash_value - Multicast address hash value
- *****************************************************************************/
-static void
-ixgb_mta_set(struct ixgb_hw *hw,
- u32 hash_value)
-{
- u32 hash_bit, hash_reg;
- u32 mta_reg;
-
- /* The MTA is a register array of 128 32-bit registers.
- * It is treated like an array of 4096 bits. We want to set
- * bit BitArray[hash_value]. So we figure out what register
- * the bit is in, read it, OR in the new bit, then write
- * back the new value. The register is determined by the
- * upper 7 bits of the hash value and the bit within that
- * register are determined by the lower 5 bits of the value.
- */
- hash_reg = (hash_value >> 5) & 0x7F;
- hash_bit = hash_value & 0x1F;
-
- mta_reg = IXGB_READ_REG_ARRAY(hw, MTA, hash_reg);
-
- mta_reg |= (1 << hash_bit);
-
- IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg);
-}
-
-/******************************************************************************
- * Puts an ethernet address into a receive address register.
- *
- * hw - Struct containing variables accessed by shared code
- * addr - Address to put into receive address register
- * index - Receive address register to write
- *****************************************************************************/
-void
-ixgb_rar_set(struct ixgb_hw *hw,
- const u8 *addr,
- u32 index)
-{
- u32 rar_low, rar_high;
-
- ENTER();
-
- /* HW expects these in little endian so we reverse the byte order
- * from network order (big endian) to little endian
- */
- rar_low = ((u32) addr[0] |
- ((u32)addr[1] << 8) |
- ((u32)addr[2] << 16) |
- ((u32)addr[3] << 24));
-
- rar_high = ((u32) addr[4] |
- ((u32)addr[5] << 8) |
- IXGB_RAH_AV);
-
- IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
- IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
-}
-
-/******************************************************************************
- * Writes a value to the specified offset in the VLAN filter table.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - Offset in VLAN filter table to write
- * value - Value to write into VLAN filter table
- *****************************************************************************/
-void
-ixgb_write_vfta(struct ixgb_hw *hw,
- u32 offset,
- u32 value)
-{
- IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
-}
-
-/******************************************************************************
- * Clears the VLAN filter table
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void
-ixgb_clear_vfta(struct ixgb_hw *hw)
-{
- u32 offset;
-
- for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
- IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
-}
-
-/******************************************************************************
- * Configures the flow control settings based on SW configuration.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-
-static bool
-ixgb_setup_fc(struct ixgb_hw *hw)
-{
- u32 ctrl_reg;
- u32 pap_reg = 0; /* by default, assume no pause time */
- bool status = true;
-
- ENTER();
-
- /* Get the current control reg 0 settings */
- ctrl_reg = IXGB_READ_REG(hw, CTRL0);
-
- /* Clear the Receive Pause Enable and Transmit Pause Enable bits */
- ctrl_reg &= ~(IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
-
- /* The possible values of the "flow_control" parameter are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames
- * but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames
- * but we do not support receiving pause frames).
- * 3: Both Rx and TX flow control (symmetric) are enabled.
- * other: Invalid.
- */
- switch (hw->fc.type) {
- case ixgb_fc_none: /* 0 */
- /* Set CMDC bit to disable Rx Flow control */
- ctrl_reg |= (IXGB_CTRL0_CMDC);
- break;
- case ixgb_fc_rx_pause: /* 1 */
- /* RX Flow control is enabled, and TX Flow control is
- * disabled.
- */
- ctrl_reg |= (IXGB_CTRL0_RPE);
- break;
- case ixgb_fc_tx_pause: /* 2 */
- /* TX Flow control is enabled, and RX Flow control is
- * disabled, by a software over-ride.
- */
- ctrl_reg |= (IXGB_CTRL0_TPE);
- pap_reg = hw->fc.pause_time;
- break;
- case ixgb_fc_full: /* 3 */
- /* Flow control (both RX and TX) is enabled by a software
- * over-ride.
- */
- ctrl_reg |= (IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
- pap_reg = hw->fc.pause_time;
- break;
- default:
- /* We should never get here. The value should be 0-3. */
- pr_debug("Flow control param set incorrectly\n");
- ASSERT(0);
- break;
- }
-
- /* Write the new settings */
- IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
-
- if (pap_reg != 0)
- IXGB_WRITE_REG(hw, PAP, pap_reg);
-
- /* Set the flow control receive threshold registers. Normally,
- * these registers will be set to a default threshold that may be
- * adjusted later by the driver's runtime code. However, if the
- * ability to transmit pause frames in not enabled, then these
- * registers will be set to 0.
- */
- if (!(hw->fc.type & ixgb_fc_tx_pause)) {
- IXGB_WRITE_REG(hw, FCRTL, 0);
- IXGB_WRITE_REG(hw, FCRTH, 0);
- } else {
- /* We need to set up the Receive Threshold high and low water
- * marks as well as (optionally) enabling the transmission of XON
- * frames. */
- if (hw->fc.send_xon) {
- IXGB_WRITE_REG(hw, FCRTL,
- (hw->fc.low_water | IXGB_FCRTL_XONE));
- } else {
- IXGB_WRITE_REG(hw, FCRTL, hw->fc.low_water);
- }
- IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water);
- }
- return status;
-}
-
-/******************************************************************************
- * Reads a word from a device over the Management Data Interface (MDI) bus.
- * This interface is used to manage Physical layer devices.
- *
- * hw - Struct containing variables accessed by hw code
- * reg_address - Offset of device register being read.
- * phy_address - Address of device on MDI.
- *
- * Returns: Data word (16 bits) from MDI device.
- *
- * The 82597EX has support for several MDI access methods. This routine
- * uses the new protocol MDI Single Command and Address Operation.
- * This requires that first an address cycle command is sent, followed by a
- * read command.
- *****************************************************************************/
-static u16
-ixgb_read_phy_reg(struct ixgb_hw *hw,
- u32 reg_address,
- u32 phy_address,
- u32 device_type)
-{
- u32 i;
- u32 data;
- u32 command = 0;
-
- ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
- ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
- ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
-
- /* Setup and write the address cycle command */
- command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
- (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
- (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
-
- IXGB_WRITE_REG(hw, MSCA, command);
-
- /**************************************************************
- ** Check every 10 usec to see if the address cycle completed
- ** The COMMAND bit will clear when the operation is complete.
- ** This may take as long as 64 usecs (we'll wait 100 usecs max)
- ** from the CPU Write to the Ready bit assertion.
- **************************************************************/
-
- for (i = 0; i < 10; i++)
- {
- udelay(10);
-
- command = IXGB_READ_REG(hw, MSCA);
-
- if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
-
- /* Address cycle complete, setup and write the read command */
- command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
- (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
- (IXGB_MSCA_READ | IXGB_MSCA_MDI_COMMAND));
-
- IXGB_WRITE_REG(hw, MSCA, command);
-
- /**************************************************************
- ** Check every 10 usec to see if the read command completed
- ** The COMMAND bit will clear when the operation is complete.
- ** The read may take as long as 64 usecs (we'll wait 100 usecs max)
- ** from the CPU Write to the Ready bit assertion.
- **************************************************************/
-
- for (i = 0; i < 10; i++)
- {
- udelay(10);
-
- command = IXGB_READ_REG(hw, MSCA);
-
- if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
-
- /* Operation is complete, get the data from the MDIO Read/Write Data
- * register and return.
- */
- data = IXGB_READ_REG(hw, MSRWD);
- data >>= IXGB_MSRWD_READ_DATA_SHIFT;
- return((u16) data);
-}
-
-/******************************************************************************
- * Writes a word to a device over the Management Data Interface (MDI) bus.
- * This interface is used to manage Physical layer devices.
- *
- * hw - Struct containing variables accessed by hw code
- * reg_address - Offset of device register being read.
- * phy_address - Address of device on MDI.
- * device_type - Also known as the Device ID or DID.
- * data - 16-bit value to be written
- *
- * Returns: void.
- *
- * The 82597EX has support for several MDI access methods. This routine
- * uses the new protocol MDI Single Command and Address Operation.
- * This requires that first an address cycle command is sent, followed by a
- * write command.
- *****************************************************************************/
-static void
-ixgb_write_phy_reg(struct ixgb_hw *hw,
- u32 reg_address,
- u32 phy_address,
- u32 device_type,
- u16 data)
-{
- u32 i;
- u32 command = 0;
-
- ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
- ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
- ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
-
- /* Put the data in the MDIO Read/Write Data register */
- IXGB_WRITE_REG(hw, MSRWD, (u32)data);
-
- /* Setup and write the address cycle command */
- command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
- (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
- (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
-
- IXGB_WRITE_REG(hw, MSCA, command);
-
- /**************************************************************
- ** Check every 10 usec to see if the address cycle completed
- ** The COMMAND bit will clear when the operation is complete.
- ** This may take as long as 64 usecs (we'll wait 100 usecs max)
- ** from the CPU Write to the Ready bit assertion.
- **************************************************************/
-
- for (i = 0; i < 10; i++)
- {
- udelay(10);
-
- command = IXGB_READ_REG(hw, MSCA);
-
- if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
-
- /* Address cycle complete, setup and write the write command */
- command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
- (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
- (IXGB_MSCA_WRITE | IXGB_MSCA_MDI_COMMAND));
-
- IXGB_WRITE_REG(hw, MSCA, command);
-
- /**************************************************************
- ** Check every 10 usec to see if the read command completed
- ** The COMMAND bit will clear when the operation is complete.
- ** The write may take as long as 64 usecs (we'll wait 100 usecs max)
- ** from the CPU Write to the Ready bit assertion.
- **************************************************************/
-
- for (i = 0; i < 10; i++)
- {
- udelay(10);
-
- command = IXGB_READ_REG(hw, MSCA);
-
- if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
-
- /* Operation is complete, return. */
-}
-
-/******************************************************************************
- * Checks to see if the link status of the hardware has changed.
- *
- * hw - Struct containing variables accessed by hw code
- *
- * Called by any function that needs to check the link status of the adapter.
- *****************************************************************************/
-void
-ixgb_check_for_link(struct ixgb_hw *hw)
-{
- u32 status_reg;
- u32 xpcss_reg;
-
- ENTER();
-
- xpcss_reg = IXGB_READ_REG(hw, XPCSS);
- status_reg = IXGB_READ_REG(hw, STATUS);
-
- if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
- (status_reg & IXGB_STATUS_LU)) {
- hw->link_up = true;
- } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
- (status_reg & IXGB_STATUS_LU)) {
- pr_debug("XPCSS Not Aligned while Status:LU is set\n");
- hw->link_up = ixgb_link_reset(hw);
- } else {
- /*
- * 82597EX errata. Since the lane deskew problem may prevent
- * link, reset the link before reporting link down.
- */
- hw->link_up = ixgb_link_reset(hw);
- }
- /* Anything else for 10 Gig?? */
-}
-
-/******************************************************************************
- * Check for a bad link condition that may have occurred.
- * The indication is that the RFC / LFC registers may be incrementing
- * continually. A full adapter reset is required to recover.
- *
- * hw - Struct containing variables accessed by hw code
- *
- * Called by any function that needs to check the link status of the adapter.
- *****************************************************************************/
-bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
-{
- u32 newLFC, newRFC;
- bool bad_link_returncode = false;
-
- if (hw->phy_type == ixgb_phy_type_txn17401) {
- newLFC = IXGB_READ_REG(hw, LFC);
- newRFC = IXGB_READ_REG(hw, RFC);
- if ((hw->lastLFC + 250 < newLFC)
- || (hw->lastRFC + 250 < newRFC)) {
- pr_debug("BAD LINK! too many LFC/RFC since last check\n");
- bad_link_returncode = true;
- }
- hw->lastLFC = newLFC;
- hw->lastRFC = newRFC;
- }
-
- return bad_link_returncode;
-}
-
-/******************************************************************************
- * Clears all hardware statistics counters.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void
-ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
-{
- ENTER();
-
- /* if we are stopped or resetting exit gracefully */
- if (hw->adapter_stopped) {
- pr_debug("Exiting because the adapter is stopped!!!\n");
- return;
- }
-
- IXGB_READ_REG(hw, TPRL);
- IXGB_READ_REG(hw, TPRH);
- IXGB_READ_REG(hw, GPRCL);
- IXGB_READ_REG(hw, GPRCH);
- IXGB_READ_REG(hw, BPRCL);
- IXGB_READ_REG(hw, BPRCH);
- IXGB_READ_REG(hw, MPRCL);
- IXGB_READ_REG(hw, MPRCH);
- IXGB_READ_REG(hw, UPRCL);
- IXGB_READ_REG(hw, UPRCH);
- IXGB_READ_REG(hw, VPRCL);
- IXGB_READ_REG(hw, VPRCH);
- IXGB_READ_REG(hw, JPRCL);
- IXGB_READ_REG(hw, JPRCH);
- IXGB_READ_REG(hw, GORCL);
- IXGB_READ_REG(hw, GORCH);
- IXGB_READ_REG(hw, TORL);
- IXGB_READ_REG(hw, TORH);
- IXGB_READ_REG(hw, RNBC);
- IXGB_READ_REG(hw, RUC);
- IXGB_READ_REG(hw, ROC);
- IXGB_READ_REG(hw, RLEC);
- IXGB_READ_REG(hw, CRCERRS);
- IXGB_READ_REG(hw, ICBC);
- IXGB_READ_REG(hw, ECBC);
- IXGB_READ_REG(hw, MPC);
- IXGB_READ_REG(hw, TPTL);
- IXGB_READ_REG(hw, TPTH);
- IXGB_READ_REG(hw, GPTCL);
- IXGB_READ_REG(hw, GPTCH);
- IXGB_READ_REG(hw, BPTCL);
- IXGB_READ_REG(hw, BPTCH);
- IXGB_READ_REG(hw, MPTCL);
- IXGB_READ_REG(hw, MPTCH);
- IXGB_READ_REG(hw, UPTCL);
- IXGB_READ_REG(hw, UPTCH);
- IXGB_READ_REG(hw, VPTCL);
- IXGB_READ_REG(hw, VPTCH);
- IXGB_READ_REG(hw, JPTCL);
- IXGB_READ_REG(hw, JPTCH);
- IXGB_READ_REG(hw, GOTCL);
- IXGB_READ_REG(hw, GOTCH);
- IXGB_READ_REG(hw, TOTL);
- IXGB_READ_REG(hw, TOTH);
- IXGB_READ_REG(hw, DC);
- IXGB_READ_REG(hw, PLT64C);
- IXGB_READ_REG(hw, TSCTC);
- IXGB_READ_REG(hw, TSCTFC);
- IXGB_READ_REG(hw, IBIC);
- IXGB_READ_REG(hw, RFC);
- IXGB_READ_REG(hw, LFC);
- IXGB_READ_REG(hw, PFRC);
- IXGB_READ_REG(hw, PFTC);
- IXGB_READ_REG(hw, MCFRC);
- IXGB_READ_REG(hw, MCFTC);
- IXGB_READ_REG(hw, XONRXC);
- IXGB_READ_REG(hw, XONTXC);
- IXGB_READ_REG(hw, XOFFRXC);
- IXGB_READ_REG(hw, XOFFTXC);
- IXGB_READ_REG(hw, RJC);
-}
-
-/******************************************************************************
- * Turns on the software controllable LED
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-void
-ixgb_led_on(struct ixgb_hw *hw)
-{
- u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
-
- /* To turn on the LED, clear software-definable pin 0 (SDP0). */
- ctrl0_reg &= ~IXGB_CTRL0_SDP0;
- IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
-}
-
-/******************************************************************************
- * Turns off the software controllable LED
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-void
-ixgb_led_off(struct ixgb_hw *hw)
-{
- u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
-
- /* To turn off the LED, set software-definable pin 0 (SDP0). */
- ctrl0_reg |= IXGB_CTRL0_SDP0;
- IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
-}
-
-/******************************************************************************
- * Gets the current PCI bus type, speed, and width of the hardware
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void
-ixgb_get_bus_info(struct ixgb_hw *hw)
-{
- u32 status_reg;
-
- status_reg = IXGB_READ_REG(hw, STATUS);
-
- hw->bus.type = (status_reg & IXGB_STATUS_PCIX_MODE) ?
- ixgb_bus_type_pcix : ixgb_bus_type_pci;
-
- if (hw->bus.type == ixgb_bus_type_pci) {
- hw->bus.speed = (status_reg & IXGB_STATUS_PCI_SPD) ?
- ixgb_bus_speed_66 : ixgb_bus_speed_33;
- } else {
- switch (status_reg & IXGB_STATUS_PCIX_SPD_MASK) {
- case IXGB_STATUS_PCIX_SPD_66:
- hw->bus.speed = ixgb_bus_speed_66;
- break;
- case IXGB_STATUS_PCIX_SPD_100:
- hw->bus.speed = ixgb_bus_speed_100;
- break;
- case IXGB_STATUS_PCIX_SPD_133:
- hw->bus.speed = ixgb_bus_speed_133;
- break;
- default:
- hw->bus.speed = ixgb_bus_speed_reserved;
- break;
- }
- }
-
- hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ?
- ixgb_bus_width_64 : ixgb_bus_width_32;
-}
-
-/******************************************************************************
- * Tests a MAC address to ensure it is a valid Individual Address
- *
- * mac_addr - pointer to MAC address.
- *
- *****************************************************************************/
-static bool
-mac_addr_valid(u8 *mac_addr)
-{
- bool is_valid = true;
- ENTER();
-
- /* Make sure it is not a multicast address */
- if (is_multicast_ether_addr(mac_addr)) {
- pr_debug("MAC address is multicast\n");
- is_valid = false;
- }
- /* Not a broadcast address */
- else if (is_broadcast_ether_addr(mac_addr)) {
- pr_debug("MAC address is broadcast\n");
- is_valid = false;
- }
- /* Reject the zero address */
- else if (is_zero_ether_addr(mac_addr)) {
- pr_debug("MAC address is all zeros\n");
- is_valid = false;
- }
- return is_valid;
-}
-
-/******************************************************************************
- * Resets the 10GbE link. Waits the settle time and returns the state of
- * the link.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static bool
-ixgb_link_reset(struct ixgb_hw *hw)
-{
- bool link_status = false;
- u8 wait_retries = MAX_RESET_ITERATIONS;
- u8 lrst_retries = MAX_RESET_ITERATIONS;
-
- do {
- /* Reset the link */
- IXGB_WRITE_REG(hw, CTRL0,
- IXGB_READ_REG(hw, CTRL0) | IXGB_CTRL0_LRST);
-
- /* Wait for link-up and lane re-alignment */
- do {
- udelay(IXGB_DELAY_USECS_AFTER_LINK_RESET);
- link_status =
- ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU)
- && (IXGB_READ_REG(hw, XPCSS) &
- IXGB_XPCSS_ALIGN_STATUS)) ? true : false;
- } while (!link_status && --wait_retries);
-
- } while (!link_status && --lrst_retries);
-
- return link_status;
-}
-
-/******************************************************************************
- * Resets the 10GbE optics module.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void
-ixgb_optics_reset(struct ixgb_hw *hw)
-{
- if (hw->phy_type == ixgb_phy_type_txn17401) {
- ixgb_write_phy_reg(hw,
- MDIO_CTRL1,
- IXGB_PHY_ADDRESS,
- MDIO_MMD_PMAPMD,
- MDIO_CTRL1_RESET);
-
- ixgb_read_phy_reg(hw, MDIO_CTRL1, IXGB_PHY_ADDRESS, MDIO_MMD_PMAPMD);
- }
-}
-
-/******************************************************************************
- * Resets the 10GbE optics module for Sun variant NIC.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-
-#define IXGB_BCM8704_USER_PMD_TX_CTRL_REG 0xC803
-#define IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL 0x0164
-#define IXGB_BCM8704_USER_CTRL_REG 0xC800
-#define IXGB_BCM8704_USER_CTRL_REG_VAL 0x7FBF
-#define IXGB_BCM8704_USER_DEV3_ADDR 0x0003
-#define IXGB_SUN_PHY_ADDRESS 0x0000
-#define IXGB_SUN_PHY_RESET_DELAY 305
-
-static void
-ixgb_optics_reset_bcm(struct ixgb_hw *hw)
-{
- u32 ctrl = IXGB_READ_REG(hw, CTRL0);
- ctrl &= ~IXGB_CTRL0_SDP2;
- ctrl |= IXGB_CTRL0_SDP3;
- IXGB_WRITE_REG(hw, CTRL0, ctrl);
- IXGB_WRITE_FLUSH(hw);
-
- /* SerDes needs extra delay */
- msleep(IXGB_SUN_PHY_RESET_DELAY);
-
- /* Broadcom 7408L configuration */
- /* Reference clock config */
- ixgb_write_phy_reg(hw,
- IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
- IXGB_SUN_PHY_ADDRESS,
- IXGB_BCM8704_USER_DEV3_ADDR,
- IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL);
- /* we must read the registers twice */
- ixgb_read_phy_reg(hw,
- IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
- IXGB_SUN_PHY_ADDRESS,
- IXGB_BCM8704_USER_DEV3_ADDR);
- ixgb_read_phy_reg(hw,
- IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
- IXGB_SUN_PHY_ADDRESS,
- IXGB_BCM8704_USER_DEV3_ADDR);
-
- ixgb_write_phy_reg(hw,
- IXGB_BCM8704_USER_CTRL_REG,
- IXGB_SUN_PHY_ADDRESS,
- IXGB_BCM8704_USER_DEV3_ADDR,
- IXGB_BCM8704_USER_CTRL_REG_VAL);
- ixgb_read_phy_reg(hw,
- IXGB_BCM8704_USER_CTRL_REG,
- IXGB_SUN_PHY_ADDRESS,
- IXGB_BCM8704_USER_DEV3_ADDR);
- ixgb_read_phy_reg(hw,
- IXGB_BCM8704_USER_CTRL_REG,
- IXGB_SUN_PHY_ADDRESS,
- IXGB_BCM8704_USER_DEV3_ADDR);
-
- /* SerDes needs extra delay */
- msleep(IXGB_SUN_PHY_RESET_DELAY);
-}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
deleted file mode 100644
index 70bcff5fb3db..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
+++ /dev/null
@@ -1,767 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-#ifndef _IXGB_HW_H_
-#define _IXGB_HW_H_
-
-#include <linux/mdio.h>
-
-#include "ixgb_osdep.h"
-
-/* Enums */
-typedef enum {
- ixgb_mac_unknown = 0,
- ixgb_82597,
- ixgb_num_macs
-} ixgb_mac_type;
-
-/* Types of physical layer modules */
-typedef enum {
- ixgb_phy_type_unknown = 0,
- ixgb_phy_type_g6005, /* 850nm, MM fiber, XPAK transceiver */
- ixgb_phy_type_g6104, /* 1310nm, SM fiber, XPAK transceiver */
- ixgb_phy_type_txn17201, /* 850nm, MM fiber, XPAK transceiver */
- ixgb_phy_type_txn17401, /* 1310nm, SM fiber, XENPAK transceiver */
- ixgb_phy_type_bcm /* SUN specific board */
-} ixgb_phy_type;
-
-/* XPAK transceiver vendors, for the SR adapters */
-typedef enum {
- ixgb_xpak_vendor_intel,
- ixgb_xpak_vendor_infineon
-} ixgb_xpak_vendor;
-
-/* Media Types */
-typedef enum {
- ixgb_media_type_unknown = 0,
- ixgb_media_type_fiber = 1,
- ixgb_media_type_copper = 2,
- ixgb_num_media_types
-} ixgb_media_type;
-
-/* Flow Control Settings */
-typedef enum {
- ixgb_fc_none = 0,
- ixgb_fc_rx_pause = 1,
- ixgb_fc_tx_pause = 2,
- ixgb_fc_full = 3,
- ixgb_fc_default = 0xFF
-} ixgb_fc_type;
-
-/* PCI bus types */
-typedef enum {
- ixgb_bus_type_unknown = 0,
- ixgb_bus_type_pci,
- ixgb_bus_type_pcix
-} ixgb_bus_type;
-
-/* PCI bus speeds */
-typedef enum {
- ixgb_bus_speed_unknown = 0,
- ixgb_bus_speed_33,
- ixgb_bus_speed_66,
- ixgb_bus_speed_100,
- ixgb_bus_speed_133,
- ixgb_bus_speed_reserved
-} ixgb_bus_speed;
-
-/* PCI bus widths */
-typedef enum {
- ixgb_bus_width_unknown = 0,
- ixgb_bus_width_32,
- ixgb_bus_width_64
-} ixgb_bus_width;
-
-#define IXGB_EEPROM_SIZE 64 /* Size in words */
-
-#define SPEED_10000 10000
-#define FULL_DUPLEX 2
-
-#define MIN_NUMBER_OF_DESCRIPTORS 8
-#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 /* 13 bits in RDLEN/TDLEN, 128B aligned */
-
-#define IXGB_DELAY_BEFORE_RESET 10 /* allow 10ms after idling rx/tx units */
-#define IXGB_DELAY_AFTER_RESET 1 /* allow 1ms after the reset */
-#define IXGB_DELAY_AFTER_EE_RESET 10 /* allow 10ms after the EEPROM reset */
-
-#define IXGB_DELAY_USECS_AFTER_LINK_RESET 13 /* allow 13 microseconds after the reset */
- /* NOTE: this is MICROSECONDS */
-#define MAX_RESET_ITERATIONS 8 /* number of iterations to get things right */
-
-/* General Registers */
-#define IXGB_CTRL0 0x00000 /* Device Control Register 0 - RW */
-#define IXGB_CTRL1 0x00008 /* Device Control Register 1 - RW */
-#define IXGB_STATUS 0x00010 /* Device Status Register - RO */
-#define IXGB_EECD 0x00018 /* EEPROM/Flash Control/Data Register - RW */
-#define IXGB_MFS 0x00020 /* Maximum Frame Size - RW */
-
-/* Interrupt */
-#define IXGB_ICR 0x00080 /* Interrupt Cause Read - R/clr */
-#define IXGB_ICS 0x00088 /* Interrupt Cause Set - RW */
-#define IXGB_IMS 0x00090 /* Interrupt Mask Set/Read - RW */
-#define IXGB_IMC 0x00098 /* Interrupt Mask Clear - WO */
-
-/* Receive */
-#define IXGB_RCTL 0x00100 /* RX Control - RW */
-#define IXGB_FCRTL 0x00108 /* Flow Control Receive Threshold Low - RW */
-#define IXGB_FCRTH 0x00110 /* Flow Control Receive Threshold High - RW */
-#define IXGB_RDBAL 0x00118 /* RX Descriptor Base Low - RW */
-#define IXGB_RDBAH 0x0011C /* RX Descriptor Base High - RW */
-#define IXGB_RDLEN 0x00120 /* RX Descriptor Length - RW */
-#define IXGB_RDH 0x00128 /* RX Descriptor Head - RW */
-#define IXGB_RDT 0x00130 /* RX Descriptor Tail - RW */
-#define IXGB_RDTR 0x00138 /* RX Delay Timer Ring - RW */
-#define IXGB_RXDCTL 0x00140 /* Receive Descriptor Control - RW */
-#define IXGB_RAIDC 0x00148 /* Receive Adaptive Interrupt Delay Control - RW */
-#define IXGB_RXCSUM 0x00158 /* Receive Checksum Control - RW */
-#define IXGB_RA 0x00180 /* Receive Address Array Base - RW */
-#define IXGB_RAL 0x00180 /* Receive Address Low [0:15] - RW */
-#define IXGB_RAH 0x00184 /* Receive Address High [0:15] - RW */
-#define IXGB_MTA 0x00200 /* Multicast Table Array [0:127] - RW */
-#define IXGB_VFTA 0x00400 /* VLAN Filter Table Array [0:127] - RW */
-#define IXGB_REQ_RX_DESCRIPTOR_MULTIPLE 8
-
-/* Transmit */
-#define IXGB_TCTL 0x00600 /* TX Control - RW */
-#define IXGB_TDBAL 0x00608 /* TX Descriptor Base Low - RW */
-#define IXGB_TDBAH 0x0060C /* TX Descriptor Base High - RW */
-#define IXGB_TDLEN 0x00610 /* TX Descriptor Length - RW */
-#define IXGB_TDH 0x00618 /* TX Descriptor Head - RW */
-#define IXGB_TDT 0x00620 /* TX Descriptor Tail - RW */
-#define IXGB_TIDV 0x00628 /* TX Interrupt Delay Value - RW */
-#define IXGB_TXDCTL 0x00630 /* Transmit Descriptor Control - RW */
-#define IXGB_TSPMT 0x00638 /* TCP Segmentation PAD & Min Threshold - RW */
-#define IXGB_PAP 0x00640 /* Pause and Pace - RW */
-#define IXGB_REQ_TX_DESCRIPTOR_MULTIPLE 8
-
-/* Physical */
-#define IXGB_PCSC1 0x00700 /* PCS Control 1 - RW */
-#define IXGB_PCSC2 0x00708 /* PCS Control 2 - RW */
-#define IXGB_PCSS1 0x00710 /* PCS Status 1 - RO */
-#define IXGB_PCSS2 0x00718 /* PCS Status 2 - RO */
-#define IXGB_XPCSS 0x00720 /* 10GBASE-X PCS Status (or XGXS Lane Status) - RO */
-#define IXGB_UCCR 0x00728 /* Unilink Circuit Control Register */
-#define IXGB_XPCSTC 0x00730 /* 10GBASE-X PCS Test Control */
-#define IXGB_MACA 0x00738 /* MDI Autoscan Command and Address - RW */
-#define IXGB_APAE 0x00740 /* Autoscan PHY Address Enable - RW */
-#define IXGB_ARD 0x00748 /* Autoscan Read Data - RO */
-#define IXGB_AIS 0x00750 /* Autoscan Interrupt Status - RO */
-#define IXGB_MSCA 0x00758 /* MDI Single Command and Address - RW */
-#define IXGB_MSRWD 0x00760 /* MDI Single Read and Write Data - RW, RO */
-
-/* Wake-up */
-#define IXGB_WUFC 0x00808 /* Wake Up Filter Control - RW */
-#define IXGB_WUS 0x00810 /* Wake Up Status - RO */
-#define IXGB_FFLT 0x01000 /* Flexible Filter Length Table - RW */
-#define IXGB_FFMT 0x01020 /* Flexible Filter Mask Table - RW */
-#define IXGB_FTVT 0x01420 /* Flexible Filter Value Table - RW */
-
-/* Statistics */
-#define IXGB_TPRL 0x02000 /* Total Packets Received (Low) */
-#define IXGB_TPRH 0x02004 /* Total Packets Received (High) */
-#define IXGB_GPRCL 0x02008 /* Good Packets Received Count (Low) */
-#define IXGB_GPRCH 0x0200C /* Good Packets Received Count (High) */
-#define IXGB_BPRCL 0x02010 /* Broadcast Packets Received Count (Low) */
-#define IXGB_BPRCH 0x02014 /* Broadcast Packets Received Count (High) */
-#define IXGB_MPRCL 0x02018 /* Multicast Packets Received Count (Low) */
-#define IXGB_MPRCH 0x0201C /* Multicast Packets Received Count (High) */
-#define IXGB_UPRCL 0x02020 /* Unicast Packets Received Count (Low) */
-#define IXGB_UPRCH 0x02024 /* Unicast Packets Received Count (High) */
-#define IXGB_VPRCL 0x02028 /* VLAN Packets Received Count (Low) */
-#define IXGB_VPRCH 0x0202C /* VLAN Packets Received Count (High) */
-#define IXGB_JPRCL 0x02030 /* Jumbo Packets Received Count (Low) */
-#define IXGB_JPRCH 0x02034 /* Jumbo Packets Received Count (High) */
-#define IXGB_GORCL 0x02038 /* Good Octets Received Count (Low) */
-#define IXGB_GORCH 0x0203C /* Good Octets Received Count (High) */
-#define IXGB_TORL 0x02040 /* Total Octets Received (Low) */
-#define IXGB_TORH 0x02044 /* Total Octets Received (High) */
-#define IXGB_RNBC 0x02048 /* Receive No Buffers Count */
-#define IXGB_RUC 0x02050 /* Receive Undersize Count */
-#define IXGB_ROC 0x02058 /* Receive Oversize Count */
-#define IXGB_RLEC 0x02060 /* Receive Length Error Count */
-#define IXGB_CRCERRS 0x02068 /* CRC Error Count */
-#define IXGB_ICBC 0x02070 /* Illegal control byte in mid-packet Count */
-#define IXGB_ECBC 0x02078 /* Error Control byte in mid-packet Count */
-#define IXGB_MPC 0x02080 /* Missed Packets Count */
-#define IXGB_TPTL 0x02100 /* Total Packets Transmitted (Low) */
-#define IXGB_TPTH 0x02104 /* Total Packets Transmitted (High) */
-#define IXGB_GPTCL 0x02108 /* Good Packets Transmitted Count (Low) */
-#define IXGB_GPTCH 0x0210C /* Good Packets Transmitted Count (High) */
-#define IXGB_BPTCL 0x02110 /* Broadcast Packets Transmitted Count (Low) */
-#define IXGB_BPTCH 0x02114 /* Broadcast Packets Transmitted Count (High) */
-#define IXGB_MPTCL 0x02118 /* Multicast Packets Transmitted Count (Low) */
-#define IXGB_MPTCH 0x0211C /* Multicast Packets Transmitted Count (High) */
-#define IXGB_UPTCL 0x02120 /* Unicast Packets Transmitted Count (Low) */
-#define IXGB_UPTCH 0x02124 /* Unicast Packets Transmitted Count (High) */
-#define IXGB_VPTCL 0x02128 /* VLAN Packets Transmitted Count (Low) */
-#define IXGB_VPTCH 0x0212C /* VLAN Packets Transmitted Count (High) */
-#define IXGB_JPTCL 0x02130 /* Jumbo Packets Transmitted Count (Low) */
-#define IXGB_JPTCH 0x02134 /* Jumbo Packets Transmitted Count (High) */
-#define IXGB_GOTCL 0x02138 /* Good Octets Transmitted Count (Low) */
-#define IXGB_GOTCH 0x0213C /* Good Octets Transmitted Count (High) */
-#define IXGB_TOTL 0x02140 /* Total Octets Transmitted Count (Low) */
-#define IXGB_TOTH 0x02144 /* Total Octets Transmitted Count (High) */
-#define IXGB_DC 0x02148 /* Defer Count */
-#define IXGB_PLT64C 0x02150 /* Packet Transmitted was less than 64 bytes Count */
-#define IXGB_TSCTC 0x02170 /* TCP Segmentation Context Transmitted Count */
-#define IXGB_TSCTFC 0x02178 /* TCP Segmentation Context Tx Fail Count */
-#define IXGB_IBIC 0x02180 /* Illegal byte during Idle stream count */
-#define IXGB_RFC 0x02188 /* Remote Fault Count */
-#define IXGB_LFC 0x02190 /* Local Fault Count */
-#define IXGB_PFRC 0x02198 /* Pause Frame Receive Count */
-#define IXGB_PFTC 0x021A0 /* Pause Frame Transmit Count */
-#define IXGB_MCFRC 0x021A8 /* MAC Control Frames (non-Pause) Received Count */
-#define IXGB_MCFTC 0x021B0 /* MAC Control Frames (non-Pause) Transmitted Count */
-#define IXGB_XONRXC 0x021B8 /* XON Received Count */
-#define IXGB_XONTXC 0x021C0 /* XON Transmitted Count */
-#define IXGB_XOFFRXC 0x021C8 /* XOFF Received Count */
-#define IXGB_XOFFTXC 0x021D0 /* XOFF Transmitted Count */
-#define IXGB_RJC 0x021D8 /* Receive Jabber Count */
-
-/* CTRL0 Bit Masks */
-#define IXGB_CTRL0_LRST 0x00000008
-#define IXGB_CTRL0_JFE 0x00000010
-#define IXGB_CTRL0_XLE 0x00000020
-#define IXGB_CTRL0_MDCS 0x00000040
-#define IXGB_CTRL0_CMDC 0x00000080
-#define IXGB_CTRL0_SDP0 0x00040000
-#define IXGB_CTRL0_SDP1 0x00080000
-#define IXGB_CTRL0_SDP2 0x00100000
-#define IXGB_CTRL0_SDP3 0x00200000
-#define IXGB_CTRL0_SDP0_DIR 0x00400000
-#define IXGB_CTRL0_SDP1_DIR 0x00800000
-#define IXGB_CTRL0_SDP2_DIR 0x01000000
-#define IXGB_CTRL0_SDP3_DIR 0x02000000
-#define IXGB_CTRL0_RST 0x04000000
-#define IXGB_CTRL0_RPE 0x08000000
-#define IXGB_CTRL0_TPE 0x10000000
-#define IXGB_CTRL0_VME 0x40000000
-
-/* CTRL1 Bit Masks */
-#define IXGB_CTRL1_GPI0_EN 0x00000001
-#define IXGB_CTRL1_GPI1_EN 0x00000002
-#define IXGB_CTRL1_GPI2_EN 0x00000004
-#define IXGB_CTRL1_GPI3_EN 0x00000008
-#define IXGB_CTRL1_SDP4 0x00000010
-#define IXGB_CTRL1_SDP5 0x00000020
-#define IXGB_CTRL1_SDP6 0x00000040
-#define IXGB_CTRL1_SDP7 0x00000080
-#define IXGB_CTRL1_SDP4_DIR 0x00000100
-#define IXGB_CTRL1_SDP5_DIR 0x00000200
-#define IXGB_CTRL1_SDP6_DIR 0x00000400
-#define IXGB_CTRL1_SDP7_DIR 0x00000800
-#define IXGB_CTRL1_EE_RST 0x00002000
-#define IXGB_CTRL1_RO_DIS 0x00020000
-#define IXGB_CTRL1_PCIXHM_MASK 0x00C00000
-#define IXGB_CTRL1_PCIXHM_1_2 0x00000000
-#define IXGB_CTRL1_PCIXHM_5_8 0x00400000
-#define IXGB_CTRL1_PCIXHM_3_4 0x00800000
-#define IXGB_CTRL1_PCIXHM_7_8 0x00C00000
-
-/* STATUS Bit Masks */
-#define IXGB_STATUS_LU 0x00000002
-#define IXGB_STATUS_AIP 0x00000004
-#define IXGB_STATUS_TXOFF 0x00000010
-#define IXGB_STATUS_XAUIME 0x00000020
-#define IXGB_STATUS_RES 0x00000040
-#define IXGB_STATUS_RIS 0x00000080
-#define IXGB_STATUS_RIE 0x00000100
-#define IXGB_STATUS_RLF 0x00000200
-#define IXGB_STATUS_RRF 0x00000400
-#define IXGB_STATUS_PCI_SPD 0x00000800
-#define IXGB_STATUS_BUS64 0x00001000
-#define IXGB_STATUS_PCIX_MODE 0x00002000
-#define IXGB_STATUS_PCIX_SPD_MASK 0x0000C000
-#define IXGB_STATUS_PCIX_SPD_66 0x00000000
-#define IXGB_STATUS_PCIX_SPD_100 0x00004000
-#define IXGB_STATUS_PCIX_SPD_133 0x00008000
-#define IXGB_STATUS_REV_ID_MASK 0x000F0000
-#define IXGB_STATUS_REV_ID_SHIFT 16
-
-/* EECD Bit Masks */
-#define IXGB_EECD_SK 0x00000001
-#define IXGB_EECD_CS 0x00000002
-#define IXGB_EECD_DI 0x00000004
-#define IXGB_EECD_DO 0x00000008
-#define IXGB_EECD_FWE_MASK 0x00000030
-#define IXGB_EECD_FWE_DIS 0x00000010
-#define IXGB_EECD_FWE_EN 0x00000020
-
-/* MFS */
-#define IXGB_MFS_SHIFT 16
-
-/* Interrupt Register Bit Masks (used for ICR, ICS, IMS, and IMC) */
-#define IXGB_INT_TXDW 0x00000001
-#define IXGB_INT_TXQE 0x00000002
-#define IXGB_INT_LSC 0x00000004
-#define IXGB_INT_RXSEQ 0x00000008
-#define IXGB_INT_RXDMT0 0x00000010
-#define IXGB_INT_RXO 0x00000040
-#define IXGB_INT_RXT0 0x00000080
-#define IXGB_INT_AUTOSCAN 0x00000200
-#define IXGB_INT_GPI0 0x00000800
-#define IXGB_INT_GPI1 0x00001000
-#define IXGB_INT_GPI2 0x00002000
-#define IXGB_INT_GPI3 0x00004000
-
-/* RCTL Bit Masks */
-#define IXGB_RCTL_RXEN 0x00000002
-#define IXGB_RCTL_SBP 0x00000004
-#define IXGB_RCTL_UPE 0x00000008
-#define IXGB_RCTL_MPE 0x00000010
-#define IXGB_RCTL_RDMTS_MASK 0x00000300
-#define IXGB_RCTL_RDMTS_1_2 0x00000000
-#define IXGB_RCTL_RDMTS_1_4 0x00000100
-#define IXGB_RCTL_RDMTS_1_8 0x00000200
-#define IXGB_RCTL_MO_MASK 0x00003000
-#define IXGB_RCTL_MO_47_36 0x00000000
-#define IXGB_RCTL_MO_46_35 0x00001000
-#define IXGB_RCTL_MO_45_34 0x00002000
-#define IXGB_RCTL_MO_43_32 0x00003000
-#define IXGB_RCTL_MO_SHIFT 12
-#define IXGB_RCTL_BAM 0x00008000
-#define IXGB_RCTL_BSIZE_MASK 0x00030000
-#define IXGB_RCTL_BSIZE_2048 0x00000000
-#define IXGB_RCTL_BSIZE_4096 0x00010000
-#define IXGB_RCTL_BSIZE_8192 0x00020000
-#define IXGB_RCTL_BSIZE_16384 0x00030000
-#define IXGB_RCTL_VFE 0x00040000
-#define IXGB_RCTL_CFIEN 0x00080000
-#define IXGB_RCTL_CFI 0x00100000
-#define IXGB_RCTL_RPDA_MASK 0x00600000
-#define IXGB_RCTL_RPDA_MC_MAC 0x00000000
-#define IXGB_RCTL_MC_ONLY 0x00400000
-#define IXGB_RCTL_CFF 0x00800000
-#define IXGB_RCTL_SECRC 0x04000000
-#define IXGB_RDT_FPDB 0x80000000
-
-#define IXGB_RCTL_IDLE_RX_UNIT 0
-
-/* FCRTL Bit Masks */
-#define IXGB_FCRTL_XONE 0x80000000
-
-/* RXDCTL Bit Masks */
-#define IXGB_RXDCTL_PTHRESH_MASK 0x000001FF
-#define IXGB_RXDCTL_PTHRESH_SHIFT 0
-#define IXGB_RXDCTL_HTHRESH_MASK 0x0003FE00
-#define IXGB_RXDCTL_HTHRESH_SHIFT 9
-#define IXGB_RXDCTL_WTHRESH_MASK 0x07FC0000
-#define IXGB_RXDCTL_WTHRESH_SHIFT 18
-
-/* RAIDC Bit Masks */
-#define IXGB_RAIDC_HIGHTHRS_MASK 0x0000003F
-#define IXGB_RAIDC_DELAY_MASK 0x000FF800
-#define IXGB_RAIDC_DELAY_SHIFT 11
-#define IXGB_RAIDC_POLL_MASK 0x1FF00000
-#define IXGB_RAIDC_POLL_SHIFT 20
-#define IXGB_RAIDC_RXT_GATE 0x40000000
-#define IXGB_RAIDC_EN 0x80000000
-
-#define IXGB_RAIDC_POLL_1000_INTERRUPTS_PER_SECOND 1220
-#define IXGB_RAIDC_POLL_5000_INTERRUPTS_PER_SECOND 244
-#define IXGB_RAIDC_POLL_10000_INTERRUPTS_PER_SECOND 122
-#define IXGB_RAIDC_POLL_20000_INTERRUPTS_PER_SECOND 61
-
-/* RXCSUM Bit Masks */
-#define IXGB_RXCSUM_IPOFL 0x00000100
-#define IXGB_RXCSUM_TUOFL 0x00000200
-
-/* RAH Bit Masks */
-#define IXGB_RAH_ASEL_MASK 0x00030000
-#define IXGB_RAH_ASEL_DEST 0x00000000
-#define IXGB_RAH_ASEL_SRC 0x00010000
-#define IXGB_RAH_AV 0x80000000
-
-/* TCTL Bit Masks */
-#define IXGB_TCTL_TCE 0x00000001
-#define IXGB_TCTL_TXEN 0x00000002
-#define IXGB_TCTL_TPDE 0x00000004
-
-#define IXGB_TCTL_IDLE_TX_UNIT 0
-
-/* TXDCTL Bit Masks */
-#define IXGB_TXDCTL_PTHRESH_MASK 0x0000007F
-#define IXGB_TXDCTL_HTHRESH_MASK 0x00007F00
-#define IXGB_TXDCTL_HTHRESH_SHIFT 8
-#define IXGB_TXDCTL_WTHRESH_MASK 0x007F0000
-#define IXGB_TXDCTL_WTHRESH_SHIFT 16
-
-/* TSPMT Bit Masks */
-#define IXGB_TSPMT_TSMT_MASK 0x0000FFFF
-#define IXGB_TSPMT_TSPBP_MASK 0xFFFF0000
-#define IXGB_TSPMT_TSPBP_SHIFT 16
-
-/* PAP Bit Masks */
-#define IXGB_PAP_TXPC_MASK 0x0000FFFF
-#define IXGB_PAP_TXPV_MASK 0x000F0000
-#define IXGB_PAP_TXPV_10G 0x00000000
-#define IXGB_PAP_TXPV_1G 0x00010000
-#define IXGB_PAP_TXPV_2G 0x00020000
-#define IXGB_PAP_TXPV_3G 0x00030000
-#define IXGB_PAP_TXPV_4G 0x00040000
-#define IXGB_PAP_TXPV_5G 0x00050000
-#define IXGB_PAP_TXPV_6G 0x00060000
-#define IXGB_PAP_TXPV_7G 0x00070000
-#define IXGB_PAP_TXPV_8G 0x00080000
-#define IXGB_PAP_TXPV_9G 0x00090000
-#define IXGB_PAP_TXPV_WAN 0x000F0000
-
-/* PCSC1 Bit Masks */
-#define IXGB_PCSC1_LOOPBACK 0x00004000
-
-/* PCSC2 Bit Masks */
-#define IXGB_PCSC2_PCS_TYPE_MASK 0x00000003
-#define IXGB_PCSC2_PCS_TYPE_10GBX 0x00000001
-
-/* PCSS1 Bit Masks */
-#define IXGB_PCSS1_LOCAL_FAULT 0x00000080
-#define IXGB_PCSS1_RX_LINK_STATUS 0x00000004
-
-/* PCSS2 Bit Masks */
-#define IXGB_PCSS2_DEV_PRES_MASK 0x0000C000
-#define IXGB_PCSS2_DEV_PRES 0x00004000
-#define IXGB_PCSS2_TX_LF 0x00000800
-#define IXGB_PCSS2_RX_LF 0x00000400
-#define IXGB_PCSS2_10GBW 0x00000004
-#define IXGB_PCSS2_10GBX 0x00000002
-#define IXGB_PCSS2_10GBR 0x00000001
-
-/* XPCSS Bit Masks */
-#define IXGB_XPCSS_ALIGN_STATUS 0x00001000
-#define IXGB_XPCSS_PATTERN_TEST 0x00000800
-#define IXGB_XPCSS_LANE_3_SYNC 0x00000008
-#define IXGB_XPCSS_LANE_2_SYNC 0x00000004
-#define IXGB_XPCSS_LANE_1_SYNC 0x00000002
-#define IXGB_XPCSS_LANE_0_SYNC 0x00000001
-
-/* XPCSTC Bit Masks */
-#define IXGB_XPCSTC_BERT_TRIG 0x00200000
-#define IXGB_XPCSTC_BERT_SST 0x00100000
-#define IXGB_XPCSTC_BERT_PSZ_MASK 0x000C0000
-#define IXGB_XPCSTC_BERT_PSZ_SHIFT 17
-#define IXGB_XPCSTC_BERT_PSZ_INF 0x00000003
-#define IXGB_XPCSTC_BERT_PSZ_68 0x00000001
-#define IXGB_XPCSTC_BERT_PSZ_1028 0x00000000
-
-/* MSCA bit Masks */
-/* New Protocol Address */
-#define IXGB_MSCA_NP_ADDR_MASK 0x0000FFFF
-#define IXGB_MSCA_NP_ADDR_SHIFT 0
-/* Either Device Type or Register Address,depending on ST_CODE */
-#define IXGB_MSCA_DEV_TYPE_MASK 0x001F0000
-#define IXGB_MSCA_DEV_TYPE_SHIFT 16
-#define IXGB_MSCA_PHY_ADDR_MASK 0x03E00000
-#define IXGB_MSCA_PHY_ADDR_SHIFT 21
-#define IXGB_MSCA_OP_CODE_MASK 0x0C000000
-/* OP_CODE == 00, Address cycle, New Protocol */
-/* OP_CODE == 01, Write operation */
-/* OP_CODE == 10, Read operation */
-/* OP_CODE == 11, Read, auto increment, New Protocol */
-#define IXGB_MSCA_ADDR_CYCLE 0x00000000
-#define IXGB_MSCA_WRITE 0x04000000
-#define IXGB_MSCA_READ 0x08000000
-#define IXGB_MSCA_READ_AUTOINC 0x0C000000
-#define IXGB_MSCA_OP_CODE_SHIFT 26
-#define IXGB_MSCA_ST_CODE_MASK 0x30000000
-/* ST_CODE == 00, New Protocol */
-/* ST_CODE == 01, Old Protocol */
-#define IXGB_MSCA_NEW_PROTOCOL 0x00000000
-#define IXGB_MSCA_OLD_PROTOCOL 0x10000000
-#define IXGB_MSCA_ST_CODE_SHIFT 28
-/* Initiate command, self-clearing when command completes */
-#define IXGB_MSCA_MDI_COMMAND 0x40000000
-/*MDI In Progress Enable. */
-#define IXGB_MSCA_MDI_IN_PROG_EN 0x80000000
-
-/* MSRWD bit masks */
-#define IXGB_MSRWD_WRITE_DATA_MASK 0x0000FFFF
-#define IXGB_MSRWD_WRITE_DATA_SHIFT 0
-#define IXGB_MSRWD_READ_DATA_MASK 0xFFFF0000
-#define IXGB_MSRWD_READ_DATA_SHIFT 16
-
-/* Definitions for the optics devices on the MDIO bus. */
-#define IXGB_PHY_ADDRESS 0x0 /* Single PHY, multiple "Devices" */
-
-#define MDIO_PMA_PMD_XPAK_VENDOR_NAME 0x803A /* XPAK/XENPAK devices only */
-
-/* Vendor-specific MDIO registers */
-#define G6XXX_PMA_PMD_VS1 0xC001 /* Vendor-specific register */
-#define G6XXX_XGXS_XAUI_VS2 0x18 /* Vendor-specific register */
-
-#define G6XXX_PMA_PMD_VS1_PLL_RESET 0x80
-#define G6XXX_PMA_PMD_VS1_REMOVE_PLL_RESET 0x00
-#define G6XXX_XGXS_XAUI_VS2_INPUT_MASK 0x0F /* XAUI lanes synchronized */
-
-/* Layout of a single receive descriptor. The controller assumes that this
- * structure is packed into 16 bytes, which is a safe assumption with most
- * compilers. However, some compilers may insert padding between the fields,
- * in which case the structure must be packed in some compiler-specific
- * manner. */
-struct ixgb_rx_desc {
- __le64 buff_addr;
- __le16 length;
- __le16 reserved;
- u8 status;
- u8 errors;
- __le16 special;
-};
-
-#define IXGB_RX_DESC_STATUS_DD 0x01
-#define IXGB_RX_DESC_STATUS_EOP 0x02
-#define IXGB_RX_DESC_STATUS_IXSM 0x04
-#define IXGB_RX_DESC_STATUS_VP 0x08
-#define IXGB_RX_DESC_STATUS_TCPCS 0x20
-#define IXGB_RX_DESC_STATUS_IPCS 0x40
-#define IXGB_RX_DESC_STATUS_PIF 0x80
-
-#define IXGB_RX_DESC_ERRORS_CE 0x01
-#define IXGB_RX_DESC_ERRORS_SE 0x02
-#define IXGB_RX_DESC_ERRORS_P 0x08
-#define IXGB_RX_DESC_ERRORS_TCPE 0x20
-#define IXGB_RX_DESC_ERRORS_IPE 0x40
-#define IXGB_RX_DESC_ERRORS_RXE 0x80
-
-#define IXGB_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
-#define IXGB_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
-#define IXGB_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16 */
-
-/* Layout of a single transmit descriptor. The controller assumes that this
- * structure is packed into 16 bytes, which is a safe assumption with most
- * compilers. However, some compilers may insert padding between the fields,
- * in which case the structure must be packed in some compiler-specific
- * manner. */
-struct ixgb_tx_desc {
- __le64 buff_addr;
- __le32 cmd_type_len;
- u8 status;
- u8 popts;
- __le16 vlan;
-};
-
-#define IXGB_TX_DESC_LENGTH_MASK 0x000FFFFF
-#define IXGB_TX_DESC_TYPE_MASK 0x00F00000
-#define IXGB_TX_DESC_TYPE_SHIFT 20
-#define IXGB_TX_DESC_CMD_MASK 0xFF000000
-#define IXGB_TX_DESC_CMD_SHIFT 24
-#define IXGB_TX_DESC_CMD_EOP 0x01000000
-#define IXGB_TX_DESC_CMD_TSE 0x04000000
-#define IXGB_TX_DESC_CMD_RS 0x08000000
-#define IXGB_TX_DESC_CMD_VLE 0x40000000
-#define IXGB_TX_DESC_CMD_IDE 0x80000000
-
-#define IXGB_TX_DESC_TYPE 0x00100000
-
-#define IXGB_TX_DESC_STATUS_DD 0x01
-
-#define IXGB_TX_DESC_POPTS_IXSM 0x01
-#define IXGB_TX_DESC_POPTS_TXSM 0x02
-#define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */
-
-struct ixgb_context_desc {
- u8 ipcss;
- u8 ipcso;
- __le16 ipcse;
- u8 tucss;
- u8 tucso;
- __le16 tucse;
- __le32 cmd_type_len;
- u8 status;
- u8 hdr_len;
- __le16 mss;
-};
-
-#define IXGB_CONTEXT_DESC_CMD_TCP 0x01000000
-#define IXGB_CONTEXT_DESC_CMD_IP 0x02000000
-#define IXGB_CONTEXT_DESC_CMD_TSE 0x04000000
-#define IXGB_CONTEXT_DESC_CMD_RS 0x08000000
-#define IXGB_CONTEXT_DESC_CMD_IDE 0x80000000
-
-#define IXGB_CONTEXT_DESC_TYPE 0x00000000
-
-#define IXGB_CONTEXT_DESC_STATUS_DD 0x01
-
-/* Filters */
-#define IXGB_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
-#define IXGB_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
-#define IXGB_RAR_ENTRIES 3 /* Number of entries in Rx Address array */
-
-#define IXGB_MEMORY_REGISTER_BASE_ADDRESS 0
-#define ENET_HEADER_SIZE 14
-#define ENET_FCS_LENGTH 4
-#define IXGB_MAX_NUM_MULTICAST_ADDRESSES 128
-#define IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS 60
-#define IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS 1514
-#define IXGB_MAX_JUMBO_FRAME_SIZE 0x3F00
-
-/* Phy Addresses */
-#define IXGB_OPTICAL_PHY_ADDR 0x0 /* Optical Module phy address */
-#define IXGB_XAUII_PHY_ADDR 0x1 /* Xauii transceiver phy address */
-#define IXGB_DIAG_PHY_ADDR 0x1F /* Diagnostic Device phy address */
-
-/* This structure takes a 64k flash and maps it for identification commands */
-struct ixgb_flash_buffer {
- u8 manufacturer_id;
- u8 device_id;
- u8 filler1[0x2AA8];
- u8 cmd2;
- u8 filler2[0x2AAA];
- u8 cmd1;
- u8 filler3[0xAAAA];
-};
-
-/* Flow control parameters */
-struct ixgb_fc {
- u32 high_water; /* Flow Control High-water */
- u32 low_water; /* Flow Control Low-water */
- u16 pause_time; /* Flow Control Pause timer */
- bool send_xon; /* Flow control send XON */
- ixgb_fc_type type; /* Type of flow control */
-};
-
-/* The historical defaults for the flow control values are given below. */
-#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */
-#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */
-#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */
-
-/* Phy definitions */
-#define IXGB_MAX_PHY_REG_ADDRESS 0xFFFF
-#define IXGB_MAX_PHY_ADDRESS 31
-#define IXGB_MAX_PHY_DEV_TYPE 31
-
-/* Bus parameters */
-struct ixgb_bus {
- ixgb_bus_speed speed;
- ixgb_bus_width width;
- ixgb_bus_type type;
-};
-
-struct ixgb_hw {
- u8 __iomem *hw_addr;/* Base Address of the hardware */
- void *back; /* Pointer to OS-dependent struct */
- struct ixgb_fc fc; /* Flow control parameters */
- struct ixgb_bus bus; /* Bus parameters */
- u32 phy_id; /* Phy Identifier */
- u32 phy_addr; /* XGMII address of Phy */
- ixgb_mac_type mac_type; /* Identifier for MAC controller */
- ixgb_phy_type phy_type; /* Transceiver/phy identifier */
- u32 max_frame_size; /* Maximum frame size supported */
- u32 mc_filter_type; /* Multicast filter hash type */
- u32 num_mc_addrs; /* Number of current Multicast addrs */
- u8 curr_mac_addr[ETH_ALEN]; /* Individual address currently programmed in MAC */
- u32 num_tx_desc; /* Number of Transmit descriptors */
- u32 num_rx_desc; /* Number of Receive descriptors */
- u32 rx_buffer_size; /* Size of Receive buffer */
- bool link_up; /* true if link is valid */
- bool adapter_stopped; /* State of adapter */
- u16 device_id; /* device id from PCI configuration space */
- u16 vendor_id; /* vendor id from PCI configuration space */
- u8 revision_id; /* revision id from PCI configuration space */
- u16 subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */
- u16 subsystem_id; /* subsystem id from PCI configuration space */
- u32 bar0; /* Base Address registers */
- u32 bar1;
- u32 bar2;
- u32 bar3;
- u16 pci_cmd_word; /* PCI command register id from PCI configuration space */
- __le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */
- unsigned long io_base; /* Our I/O mapped location */
- u32 lastLFC;
- u32 lastRFC;
-};
-
-/* Statistics reported by the hardware */
-struct ixgb_hw_stats {
- u64 tprl;
- u64 tprh;
- u64 gprcl;
- u64 gprch;
- u64 bprcl;
- u64 bprch;
- u64 mprcl;
- u64 mprch;
- u64 uprcl;
- u64 uprch;
- u64 vprcl;
- u64 vprch;
- u64 jprcl;
- u64 jprch;
- u64 gorcl;
- u64 gorch;
- u64 torl;
- u64 torh;
- u64 rnbc;
- u64 ruc;
- u64 roc;
- u64 rlec;
- u64 crcerrs;
- u64 icbc;
- u64 ecbc;
- u64 mpc;
- u64 tptl;
- u64 tpth;
- u64 gptcl;
- u64 gptch;
- u64 bptcl;
- u64 bptch;
- u64 mptcl;
- u64 mptch;
- u64 uptcl;
- u64 uptch;
- u64 vptcl;
- u64 vptch;
- u64 jptcl;
- u64 jptch;
- u64 gotcl;
- u64 gotch;
- u64 totl;
- u64 toth;
- u64 dc;
- u64 plt64c;
- u64 tsctc;
- u64 tsctfc;
- u64 ibic;
- u64 rfc;
- u64 lfc;
- u64 pfrc;
- u64 pftc;
- u64 mcfrc;
- u64 mcftc;
- u64 xonrxc;
- u64 xontxc;
- u64 xoffrxc;
- u64 xofftxc;
- u64 rjc;
-};
-
-/* Function Prototypes */
-bool ixgb_adapter_stop(struct ixgb_hw *hw);
-bool ixgb_init_hw(struct ixgb_hw *hw);
-bool ixgb_adapter_start(struct ixgb_hw *hw);
-void ixgb_check_for_link(struct ixgb_hw *hw);
-bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
-
-void ixgb_rar_set(struct ixgb_hw *hw, const u8 *addr, u32 index);
-
-/* Filters (multicast, vlan, receive) */
-void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, u32 pad);
-
-/* Vfta functions */
-void ixgb_write_vfta(struct ixgb_hw *hw, u32 offset, u32 value);
-
-/* Access functions to eeprom data */
-void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr);
-u32 ixgb_get_ee_pba_number(struct ixgb_hw *hw);
-u16 ixgb_get_ee_device_id(struct ixgb_hw *hw);
-bool ixgb_get_eeprom_data(struct ixgb_hw *hw);
-__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index);
-
-/* Everything else */
-void ixgb_led_on(struct ixgb_hw *hw);
-void ixgb_led_off(struct ixgb_hw *hw);
-void ixgb_write_pci_cfg(struct ixgb_hw *hw,
- u32 reg,
- u16 * value);
-
-
-#endif /* _IXGB_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
deleted file mode 100644
index 9695b8215f01..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-#ifndef _IXGB_IDS_H_
-#define _IXGB_IDS_H_
-
-/**********************************************************************
-** The Device and Vendor IDs for 10 Gigabit MACs
-**********************************************************************/
-
-#define IXGB_DEVICE_ID_82597EX 0x1048
-#define IXGB_DEVICE_ID_82597EX_SR 0x1A48
-#define IXGB_DEVICE_ID_82597EX_LR 0x1B48
-#define IXGB_SUBDEVICE_ID_A11F 0xA11F
-#define IXGB_SUBDEVICE_ID_A01F 0xA01F
-
-#define IXGB_DEVICE_ID_82597EX_CX4 0x109E
-#define IXGB_SUBDEVICE_ID_A00C 0xA00C
-#define IXGB_SUBDEVICE_ID_A01C 0xA01C
-#define IXGB_SUBDEVICE_ID_7036 0x7036
-
-#endif /* #ifndef _IXGB_IDS_H_ */
-/* End of File */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
deleted file mode 100644
index b4d47e7a76c8..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ /dev/null
@@ -1,2285 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/prefetch.h>
-#include "ixgb.h"
-
-char ixgb_driver_name[] = "ixgb";
-static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
-
-static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
-
-#define IXGB_CB_LENGTH 256
-static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
-module_param(copybreak, uint, 0644);
-MODULE_PARM_DESC(copybreak,
- "Maximum size of packet that is copied to a new buffer on receive");
-
-/* ixgb_pci_tbl - PCI Device ID Table
- *
- * Wildcard entries (PCI_ANY_ID) should come last
- * Last entry must be all 0s
- *
- * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
- * Class, Class Mask, private data (not used) }
- */
-static const struct pci_device_id ixgb_pci_tbl[] = {
- {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
-
- /* required last entry */
- {0,}
-};
-
-MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
-
-/* Local Function Prototypes */
-static int ixgb_init_module(void);
-static void ixgb_exit_module(void);
-static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-static void ixgb_remove(struct pci_dev *pdev);
-static int ixgb_sw_init(struct ixgb_adapter *adapter);
-static int ixgb_open(struct net_device *netdev);
-static int ixgb_close(struct net_device *netdev);
-static void ixgb_configure_tx(struct ixgb_adapter *adapter);
-static void ixgb_configure_rx(struct ixgb_adapter *adapter);
-static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
-static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
-static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
-static void ixgb_set_multi(struct net_device *netdev);
-static void ixgb_watchdog(struct timer_list *t);
-static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
- struct net_device *netdev);
-static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
-static int ixgb_set_mac(struct net_device *netdev, void *p);
-static irqreturn_t ixgb_intr(int irq, void *data);
-static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
-
-static int ixgb_clean(struct napi_struct *, int);
-static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
-static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
-
-static void ixgb_tx_timeout(struct net_device *dev, unsigned int txqueue);
-static void ixgb_tx_timeout_task(struct work_struct *work);
-
-static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
-static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
-static int ixgb_vlan_rx_add_vid(struct net_device *netdev,
- __be16 proto, u16 vid);
-static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
- __be16 proto, u16 vid);
-static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
-
-static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
- pci_channel_state_t state);
-static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
-static void ixgb_io_resume (struct pci_dev *pdev);
-
-static const struct pci_error_handlers ixgb_err_handler = {
- .error_detected = ixgb_io_error_detected,
- .slot_reset = ixgb_io_slot_reset,
- .resume = ixgb_io_resume,
-};
-
-static struct pci_driver ixgb_driver = {
- .name = ixgb_driver_name,
- .id_table = ixgb_pci_tbl,
- .probe = ixgb_probe,
- .remove = ixgb_remove,
- .err_handler = &ixgb_err_handler
-};
-
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
-MODULE_LICENSE("GPL v2");
-
-#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
-static int debug = -1;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-
-/**
- * ixgb_init_module - Driver Registration Routine
- *
- * ixgb_init_module is the first routine called when the driver is
- * loaded. All it does is register with the PCI subsystem.
- **/
-
-static int __init
-ixgb_init_module(void)
-{
- pr_info("%s\n", ixgb_driver_string);
- pr_info("%s\n", ixgb_copyright);
-
- return pci_register_driver(&ixgb_driver);
-}
-
-module_init(ixgb_init_module);
-
-/**
- * ixgb_exit_module - Driver Exit Cleanup Routine
- *
- * ixgb_exit_module is called just before the driver is removed
- * from memory.
- **/
-
-static void __exit
-ixgb_exit_module(void)
-{
- pci_unregister_driver(&ixgb_driver);
-}
-
-module_exit(ixgb_exit_module);
-
-/**
- * ixgb_irq_disable - Mask off interrupt generation on the NIC
- * @adapter: board private structure
- **/
-
-static void
-ixgb_irq_disable(struct ixgb_adapter *adapter)
-{
- IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
- IXGB_WRITE_FLUSH(&adapter->hw);
- synchronize_irq(adapter->pdev->irq);
-}
-
-/**
- * ixgb_irq_enable - Enable default interrupt generation settings
- * @adapter: board private structure
- **/
-
-static void
-ixgb_irq_enable(struct ixgb_adapter *adapter)
-{
- u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
- IXGB_INT_TXDW | IXGB_INT_LSC;
- if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN)
- val |= IXGB_INT_GPI0;
- IXGB_WRITE_REG(&adapter->hw, IMS, val);
- IXGB_WRITE_FLUSH(&adapter->hw);
-}
-
-int
-ixgb_up(struct ixgb_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- int err, irq_flags = IRQF_SHARED;
- int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
- struct ixgb_hw *hw = &adapter->hw;
-
- /* hardware has been reset, we need to reload some things */
-
- ixgb_rar_set(hw, netdev->dev_addr, 0);
- ixgb_set_multi(netdev);
-
- ixgb_restore_vlan(adapter);
-
- ixgb_configure_tx(adapter);
- ixgb_setup_rctl(adapter);
- ixgb_configure_rx(adapter);
- ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
-
- /* disable interrupts and get the hardware into a known state */
- IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
-
- /* only enable MSI if bus is in PCI-X mode */
- if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
- err = pci_enable_msi(adapter->pdev);
- if (!err) {
- adapter->have_msi = true;
- irq_flags = 0;
- }
- /* proceed to try to request regular interrupt */
- }
-
- err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
- netdev->name, netdev);
- if (err) {
- if (adapter->have_msi)
- pci_disable_msi(adapter->pdev);
- netif_err(adapter, probe, adapter->netdev,
- "Unable to allocate interrupt Error: %d\n", err);
- return err;
- }
-
- if ((hw->max_frame_size != max_frame) ||
- (hw->max_frame_size !=
- (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
-
- hw->max_frame_size = max_frame;
-
- IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
-
- if (hw->max_frame_size >
- IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
- u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
-
- if (!(ctrl0 & IXGB_CTRL0_JFE)) {
- ctrl0 |= IXGB_CTRL0_JFE;
- IXGB_WRITE_REG(hw, CTRL0, ctrl0);
- }
- }
- }
-
- clear_bit(__IXGB_DOWN, &adapter->flags);
-
- napi_enable(&adapter->napi);
- ixgb_irq_enable(adapter);
-
- netif_wake_queue(netdev);
-
- mod_timer(&adapter->watchdog_timer, jiffies);
-
- return 0;
-}
-
-void
-ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
-{
- struct net_device *netdev = adapter->netdev;
-
- /* prevent the interrupt handler from restarting watchdog */
- set_bit(__IXGB_DOWN, &adapter->flags);
-
- netif_carrier_off(netdev);
-
- napi_disable(&adapter->napi);
- /* waiting for NAPI to complete can re-enable interrupts */
- ixgb_irq_disable(adapter);
- free_irq(adapter->pdev->irq, netdev);
-
- if (adapter->have_msi)
- pci_disable_msi(adapter->pdev);
-
- if (kill_watchdog)
- del_timer_sync(&adapter->watchdog_timer);
-
- adapter->link_speed = 0;
- adapter->link_duplex = 0;
- netif_stop_queue(netdev);
-
- ixgb_reset(adapter);
- ixgb_clean_tx_ring(adapter);
- ixgb_clean_rx_ring(adapter);
-}
-
-void
-ixgb_reset(struct ixgb_adapter *adapter)
-{
- struct ixgb_hw *hw = &adapter->hw;
-
- ixgb_adapter_stop(hw);
- if (!ixgb_init_hw(hw))
- netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
-
- /* restore frame size information */
- IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
- if (hw->max_frame_size >
- IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
- u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
- if (!(ctrl0 & IXGB_CTRL0_JFE)) {
- ctrl0 |= IXGB_CTRL0_JFE;
- IXGB_WRITE_REG(hw, CTRL0, ctrl0);
- }
- }
-}
-
-static netdev_features_t
-ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
-{
- /*
- * Tx VLAN insertion does not work per HW design when Rx stripping is
- * disabled.
- */
- if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
- features &= ~NETIF_F_HW_VLAN_CTAG_TX;
-
- return features;
-}
-
-static int
-ixgb_set_features(struct net_device *netdev, netdev_features_t features)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- netdev_features_t changed = features ^ netdev->features;
-
- if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX)))
- return 0;
-
- adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
-
- if (netif_running(netdev)) {
- ixgb_down(adapter, true);
- ixgb_up(adapter);
- ixgb_set_speed_duplex(netdev);
- } else
- ixgb_reset(adapter);
-
- return 0;
-}
-
-
-static const struct net_device_ops ixgb_netdev_ops = {
- .ndo_open = ixgb_open,
- .ndo_stop = ixgb_close,
- .ndo_start_xmit = ixgb_xmit_frame,
- .ndo_set_rx_mode = ixgb_set_multi,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = ixgb_set_mac,
- .ndo_change_mtu = ixgb_change_mtu,
- .ndo_tx_timeout = ixgb_tx_timeout,
- .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
- .ndo_fix_features = ixgb_fix_features,
- .ndo_set_features = ixgb_set_features,
-};
-
-/**
- * ixgb_probe - Device Initialization Routine
- * @pdev: PCI device information struct
- * @ent: entry in ixgb_pci_tbl
- *
- * Returns 0 on success, negative on failure
- *
- * ixgb_probe initializes an adapter identified by a pci_dev structure.
- * The OS initialization, configuring of the adapter private structure,
- * and a hardware reset occur.
- **/
-
-static int
-ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct net_device *netdev = NULL;
- struct ixgb_adapter *adapter;
- static int cards_found = 0;
- u8 addr[ETH_ALEN];
- int i;
- int err;
-
- err = pci_enable_device(pdev);
- if (err)
- return err;
-
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err) {
- pr_err("No usable DMA configuration, aborting\n");
- goto err_dma_mask;
- }
-
- err = pci_request_regions(pdev, ixgb_driver_name);
- if (err)
- goto err_request_regions;
-
- pci_set_master(pdev);
-
- netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
- if (!netdev) {
- err = -ENOMEM;
- goto err_alloc_etherdev;
- }
-
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- pci_set_drvdata(pdev, netdev);
- adapter = netdev_priv(netdev);
- adapter->netdev = netdev;
- adapter->pdev = pdev;
- adapter->hw.back = adapter;
- adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
-
- adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
- if (!adapter->hw.hw_addr) {
- err = -EIO;
- goto err_ioremap;
- }
-
- for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
- adapter->hw.io_base = pci_resource_start(pdev, i);
- break;
- }
- }
-
- netdev->netdev_ops = &ixgb_netdev_ops;
- ixgb_set_ethtool_ops(netdev);
- netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, ixgb_clean);
-
- strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
-
- adapter->bd_number = cards_found;
- adapter->link_speed = 0;
- adapter->link_duplex = 0;
-
- /* setup the private structure */
-
- err = ixgb_sw_init(adapter);
- if (err)
- goto err_sw_init;
-
- netdev->hw_features = NETIF_F_SG |
- NETIF_F_TSO |
- NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
- netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_FILTER;
- netdev->hw_features |= NETIF_F_RXCSUM;
-
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
-
- /* MTU range: 68 - 16114 */
- netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = IXGB_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
-
- /* make sure the EEPROM is good */
-
- if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
- netif_err(adapter, probe, adapter->netdev,
- "The EEPROM Checksum Is Not Valid\n");
- err = -EIO;
- goto err_eeprom;
- }
-
- ixgb_get_ee_mac_addr(&adapter->hw, addr);
- eth_hw_addr_set(netdev, addr);
-
- if (!is_valid_ether_addr(netdev->dev_addr)) {
- netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
- err = -EIO;
- goto err_eeprom;
- }
-
- adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
-
- timer_setup(&adapter->watchdog_timer, ixgb_watchdog, 0);
-
- INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
-
- strcpy(netdev->name, "eth%d");
- err = register_netdev(netdev);
- if (err)
- goto err_register;
-
- /* carrier off reporting is important to ethtool even BEFORE open */
- netif_carrier_off(netdev);
-
- netif_info(adapter, probe, adapter->netdev,
- "Intel(R) PRO/10GbE Network Connection\n");
- ixgb_check_options(adapter);
- /* reset the hardware with the new settings */
-
- ixgb_reset(adapter);
-
- cards_found++;
- return 0;
-
-err_register:
-err_sw_init:
-err_eeprom:
- iounmap(adapter->hw.hw_addr);
-err_ioremap:
- free_netdev(netdev);
-err_alloc_etherdev:
- pci_release_regions(pdev);
-err_request_regions:
-err_dma_mask:
- pci_disable_device(pdev);
- return err;
-}
-
-/**
- * ixgb_remove - Device Removal Routine
- * @pdev: PCI device information struct
- *
- * ixgb_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device. The could be caused by a
- * Hot-Plug event, or because the driver is going to be removed from
- * memory.
- **/
-
-static void
-ixgb_remove(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- cancel_work_sync(&adapter->tx_timeout_task);
-
- unregister_netdev(netdev);
-
- iounmap(adapter->hw.hw_addr);
- pci_release_regions(pdev);
-
- free_netdev(netdev);
- pci_disable_device(pdev);
-}
-
-/**
- * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
- * @adapter: board private structure to initialize
- *
- * ixgb_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
- **/
-
-static int
-ixgb_sw_init(struct ixgb_adapter *adapter)
-{
- struct ixgb_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
-
- /* PCI config space info */
-
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_id = pdev->subsystem_device;
-
- hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
- adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
-
- if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
- (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
- (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
- (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
- hw->mac_type = ixgb_82597;
- else {
- /* should never have loaded on this device */
- netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
- }
-
- /* enable flow control to be programmed */
- hw->fc.send_xon = 1;
-
- set_bit(__IXGB_DOWN, &adapter->flags);
- return 0;
-}
-
-/**
- * ixgb_open - Called when a network interface is made active
- * @netdev: network interface device structure
- *
- * Returns 0 on success, negative value on failure
- *
- * The open entry point is called when a network interface is made
- * active by the system (IFF_UP). At this point all resources needed
- * for transmit and receive operations are allocated, the interrupt
- * handler is registered with the OS, the watchdog timer is started,
- * and the stack is notified that the interface is ready.
- **/
-
-static int
-ixgb_open(struct net_device *netdev)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- int err;
-
- /* allocate transmit descriptors */
- err = ixgb_setup_tx_resources(adapter);
- if (err)
- goto err_setup_tx;
-
- netif_carrier_off(netdev);
-
- /* allocate receive descriptors */
-
- err = ixgb_setup_rx_resources(adapter);
- if (err)
- goto err_setup_rx;
-
- err = ixgb_up(adapter);
- if (err)
- goto err_up;
-
- netif_start_queue(netdev);
-
- return 0;
-
-err_up:
- ixgb_free_rx_resources(adapter);
-err_setup_rx:
- ixgb_free_tx_resources(adapter);
-err_setup_tx:
- ixgb_reset(adapter);
-
- return err;
-}
-
-/**
- * ixgb_close - Disables a network interface
- * @netdev: network interface device structure
- *
- * Returns 0, this is not allowed to fail
- *
- * The close entry point is called when an interface is de-activated
- * by the OS. The hardware is still under the drivers control, but
- * needs to be disabled. A global MAC reset is issued to stop the
- * hardware, and all transmit and receive resources are freed.
- **/
-
-static int
-ixgb_close(struct net_device *netdev)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- ixgb_down(adapter, true);
-
- ixgb_free_tx_resources(adapter);
- ixgb_free_rx_resources(adapter);
-
- return 0;
-}
-
-/**
- * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
- *
- * Return 0 on success, negative on failure
- **/
-
-int
-ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
-{
- struct ixgb_desc_ring *txdr = &adapter->tx_ring;
- struct pci_dev *pdev = adapter->pdev;
- int size;
-
- size = sizeof(struct ixgb_buffer) * txdr->count;
- txdr->buffer_info = vzalloc(size);
- if (!txdr->buffer_info)
- return -ENOMEM;
-
- /* round up to nearest 4K */
-
- txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
- txdr->size = ALIGN(txdr->size, 4096);
-
- txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL);
- if (!txdr->desc) {
- vfree(txdr->buffer_info);
- return -ENOMEM;
- }
-
- txdr->next_to_use = 0;
- txdr->next_to_clean = 0;
-
- return 0;
-}
-
-/**
- * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
- * @adapter: board private structure
- *
- * Configure the Tx unit of the MAC after a reset.
- **/
-
-static void
-ixgb_configure_tx(struct ixgb_adapter *adapter)
-{
- u64 tdba = adapter->tx_ring.dma;
- u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
- u32 tctl;
- struct ixgb_hw *hw = &adapter->hw;
-
- /* Setup the Base and Length of the Tx Descriptor Ring
- * tx_ring.dma can be either a 32 or 64 bit value
- */
-
- IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
- IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
-
- IXGB_WRITE_REG(hw, TDLEN, tdlen);
-
- /* Setup the HW Tx Head and Tail descriptor pointers */
-
- IXGB_WRITE_REG(hw, TDH, 0);
- IXGB_WRITE_REG(hw, TDT, 0);
-
- /* don't set up txdctl, it induces performance problems if configured
- * incorrectly */
- /* Set the Tx Interrupt Delay register */
-
- IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
-
- /* Program the Transmit Control Register */
-
- tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
- IXGB_WRITE_REG(hw, TCTL, tctl);
-
- /* Setup Transmit Descriptor Settings for this adapter */
- adapter->tx_cmd_type =
- IXGB_TX_DESC_TYPE |
- (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
-}
-
-/**
- * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
- *
- * Returns 0 on success, negative on failure
- **/
-
-int
-ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
-{
- struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
- struct pci_dev *pdev = adapter->pdev;
- int size;
-
- size = sizeof(struct ixgb_buffer) * rxdr->count;
- rxdr->buffer_info = vzalloc(size);
- if (!rxdr->buffer_info)
- return -ENOMEM;
-
- /* Round up to nearest 4K */
-
- rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
- rxdr->size = ALIGN(rxdr->size, 4096);
-
- rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
- GFP_KERNEL);
-
- if (!rxdr->desc) {
- vfree(rxdr->buffer_info);
- return -ENOMEM;
- }
-
- rxdr->next_to_clean = 0;
- rxdr->next_to_use = 0;
-
- return 0;
-}
-
-/**
- * ixgb_setup_rctl - configure the receive control register
- * @adapter: Board private structure
- **/
-
-static void
-ixgb_setup_rctl(struct ixgb_adapter *adapter)
-{
- u32 rctl;
-
- rctl = IXGB_READ_REG(&adapter->hw, RCTL);
-
- rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
-
- rctl |=
- IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
- IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
- (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
-
- rctl |= IXGB_RCTL_SECRC;
-
- if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
- rctl |= IXGB_RCTL_BSIZE_2048;
- else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
- rctl |= IXGB_RCTL_BSIZE_4096;
- else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
- rctl |= IXGB_RCTL_BSIZE_8192;
- else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
- rctl |= IXGB_RCTL_BSIZE_16384;
-
- IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
-}
-
-/**
- * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
- * @adapter: board private structure
- *
- * Configure the Rx unit of the MAC after a reset.
- **/
-
-static void
-ixgb_configure_rx(struct ixgb_adapter *adapter)
-{
- u64 rdba = adapter->rx_ring.dma;
- u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
- struct ixgb_hw *hw = &adapter->hw;
- u32 rctl;
- u32 rxcsum;
-
- /* make sure receives are disabled while setting up the descriptors */
-
- rctl = IXGB_READ_REG(hw, RCTL);
- IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
-
- /* set the Receive Delay Timer Register */
-
- IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
-
- /* Setup the Base and Length of the Rx Descriptor Ring */
-
- IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
- IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
-
- IXGB_WRITE_REG(hw, RDLEN, rdlen);
-
- /* Setup the HW Rx Head and Tail Descriptor Pointers */
- IXGB_WRITE_REG(hw, RDH, 0);
- IXGB_WRITE_REG(hw, RDT, 0);
-
- /* due to the hardware errata with RXDCTL, we are unable to use any of
- * the performance enhancing features of it without causing other
- * subtle bugs, some of the bugs could include receive length
- * corruption at high data rates (WTHRESH > 0) and/or receive
- * descriptor ring irregularites (particularly in hardware cache) */
- IXGB_WRITE_REG(hw, RXDCTL, 0);
-
- /* Enable Receive Checksum Offload for TCP and UDP */
- if (adapter->rx_csum) {
- rxcsum = IXGB_READ_REG(hw, RXCSUM);
- rxcsum |= IXGB_RXCSUM_TUOFL;
- IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
- }
-
- /* Enable Receives */
-
- IXGB_WRITE_REG(hw, RCTL, rctl);
-}
-
-/**
- * ixgb_free_tx_resources - Free Tx Resources
- * @adapter: board private structure
- *
- * Free all transmit software resources
- **/
-
-void
-ixgb_free_tx_resources(struct ixgb_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgb_clean_tx_ring(adapter);
-
- vfree(adapter->tx_ring.buffer_info);
- adapter->tx_ring.buffer_info = NULL;
-
- dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
- adapter->tx_ring.desc, adapter->tx_ring.dma);
-
- adapter->tx_ring.desc = NULL;
-}
-
-static void
-ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
- struct ixgb_buffer *buffer_info)
-{
- if (buffer_info->dma) {
- if (buffer_info->mapped_as_page)
- dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
- buffer_info->length, DMA_TO_DEVICE);
- else
- dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
- buffer_info->length, DMA_TO_DEVICE);
- buffer_info->dma = 0;
- }
-
- if (buffer_info->skb) {
- dev_kfree_skb_any(buffer_info->skb);
- buffer_info->skb = NULL;
- }
- buffer_info->time_stamp = 0;
- /* these fields must always be initialized in tx
- * buffer_info->length = 0;
- * buffer_info->next_to_watch = 0; */
-}
-
-/**
- * ixgb_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
- **/
-
-static void
-ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
-{
- struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
- struct ixgb_buffer *buffer_info;
- unsigned long size;
- unsigned int i;
-
- /* Free all the Tx ring sk_buffs */
-
- for (i = 0; i < tx_ring->count; i++) {
- buffer_info = &tx_ring->buffer_info[i];
- ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
- }
-
- size = sizeof(struct ixgb_buffer) * tx_ring->count;
- memset(tx_ring->buffer_info, 0, size);
-
- /* Zero out the descriptor ring */
-
- memset(tx_ring->desc, 0, tx_ring->size);
-
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
-
- IXGB_WRITE_REG(&adapter->hw, TDH, 0);
- IXGB_WRITE_REG(&adapter->hw, TDT, 0);
-}
-
-/**
- * ixgb_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
- *
- * Free all receive software resources
- **/
-
-void
-ixgb_free_rx_resources(struct ixgb_adapter *adapter)
-{
- struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
- struct pci_dev *pdev = adapter->pdev;
-
- ixgb_clean_rx_ring(adapter);
-
- vfree(rx_ring->buffer_info);
- rx_ring->buffer_info = NULL;
-
- dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
-
- rx_ring->desc = NULL;
-}
-
-/**
- * ixgb_clean_rx_ring - Free Rx Buffers
- * @adapter: board private structure
- **/
-
-static void
-ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
-{
- struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
- struct ixgb_buffer *buffer_info;
- struct pci_dev *pdev = adapter->pdev;
- unsigned long size;
- unsigned int i;
-
- /* Free all the Rx ring sk_buffs */
-
- for (i = 0; i < rx_ring->count; i++) {
- buffer_info = &rx_ring->buffer_info[i];
- if (buffer_info->dma) {
- dma_unmap_single(&pdev->dev,
- buffer_info->dma,
- buffer_info->length,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
- buffer_info->length = 0;
- }
-
- if (buffer_info->skb) {
- dev_kfree_skb(buffer_info->skb);
- buffer_info->skb = NULL;
- }
- }
-
- size = sizeof(struct ixgb_buffer) * rx_ring->count;
- memset(rx_ring->buffer_info, 0, size);
-
- /* Zero out the descriptor ring */
-
- memset(rx_ring->desc, 0, rx_ring->size);
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
-
- IXGB_WRITE_REG(&adapter->hw, RDH, 0);
- IXGB_WRITE_REG(&adapter->hw, RDT, 0);
-}
-
-/**
- * ixgb_set_mac - Change the Ethernet Address of the NIC
- * @netdev: network interface device structure
- * @p: pointer to an address structure
- *
- * Returns 0 on success, negative on failure
- **/
-
-static int
-ixgb_set_mac(struct net_device *netdev, void *p)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- eth_hw_addr_set(netdev, addr->sa_data);
-
- ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
-
- return 0;
-}
-
-/**
- * ixgb_set_multi - Multicast and Promiscuous mode set
- * @netdev: network interface device structure
- *
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper multicast,
- * promiscuous mode, and all-multi behavior.
- **/
-
-static void
-ixgb_set_multi(struct net_device *netdev)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_hw *hw = &adapter->hw;
- struct netdev_hw_addr *ha;
- u32 rctl;
-
- /* Check for Promiscuous and All Multicast modes */
-
- rctl = IXGB_READ_REG(hw, RCTL);
-
- if (netdev->flags & IFF_PROMISC) {
- rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
- /* disable VLAN filtering */
- rctl &= ~IXGB_RCTL_CFIEN;
- rctl &= ~IXGB_RCTL_VFE;
- } else {
- if (netdev->flags & IFF_ALLMULTI) {
- rctl |= IXGB_RCTL_MPE;
- rctl &= ~IXGB_RCTL_UPE;
- } else {
- rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
- }
- /* enable VLAN filtering */
- rctl |= IXGB_RCTL_VFE;
- rctl &= ~IXGB_RCTL_CFIEN;
- }
-
- if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
- rctl |= IXGB_RCTL_MPE;
- IXGB_WRITE_REG(hw, RCTL, rctl);
- } else {
- u8 *mta = kmalloc_array(ETH_ALEN,
- IXGB_MAX_NUM_MULTICAST_ADDRESSES,
- GFP_ATOMIC);
- u8 *addr;
- if (!mta)
- goto alloc_failed;
-
- IXGB_WRITE_REG(hw, RCTL, rctl);
-
- addr = mta;
- netdev_for_each_mc_addr(ha, netdev) {
- memcpy(addr, ha->addr, ETH_ALEN);
- addr += ETH_ALEN;
- }
-
- ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
- kfree(mta);
- }
-
-alloc_failed:
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
- ixgb_vlan_strip_enable(adapter);
- else
- ixgb_vlan_strip_disable(adapter);
-
-}
-
-/**
- * ixgb_watchdog - Timer Call-back
- * @t: pointer to timer_list containing our private info pointer
- **/
-
-static void
-ixgb_watchdog(struct timer_list *t)
-{
- struct ixgb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
- struct net_device *netdev = adapter->netdev;
- struct ixgb_desc_ring *txdr = &adapter->tx_ring;
-
- ixgb_check_for_link(&adapter->hw);
-
- if (ixgb_check_for_bad_link(&adapter->hw)) {
- /* force the reset path */
- netif_stop_queue(netdev);
- }
-
- if (adapter->hw.link_up) {
- if (!netif_carrier_ok(netdev)) {
- netdev_info(netdev,
- "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
- (adapter->hw.fc.type == ixgb_fc_full) ?
- "RX/TX" :
- (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
- "RX" :
- (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
- "TX" : "None");
- adapter->link_speed = 10000;
- adapter->link_duplex = FULL_DUPLEX;
- netif_carrier_on(netdev);
- }
- } else {
- if (netif_carrier_ok(netdev)) {
- adapter->link_speed = 0;
- adapter->link_duplex = 0;
- netdev_info(netdev, "NIC Link is Down\n");
- netif_carrier_off(netdev);
- }
- }
-
- ixgb_update_stats(adapter);
-
- if (!netif_carrier_ok(netdev)) {
- if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
- /* We've lost link, so the controller stops DMA,
- * but we've got queued Tx work that's never going
- * to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context). */
- schedule_work(&adapter->tx_timeout_task);
- /* return immediately since reset is imminent */
- return;
- }
- }
-
- /* Force detection of hung controller every watchdog period */
- adapter->detect_tx_hung = true;
-
- /* generate an interrupt to force clean up of any stragglers */
- IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
-
- /* Reset the timer */
- mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
-}
-
-#define IXGB_TX_FLAGS_CSUM 0x00000001
-#define IXGB_TX_FLAGS_VLAN 0x00000002
-#define IXGB_TX_FLAGS_TSO 0x00000004
-
-static int
-ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
-{
- struct ixgb_context_desc *context_desc;
- unsigned int i;
- u8 ipcss, ipcso, tucss, tucso, hdr_len;
- u16 ipcse, tucse, mss;
-
- if (likely(skb_is_gso(skb))) {
- struct ixgb_buffer *buffer_info;
- struct iphdr *iph;
- int err;
-
- err = skb_cow_head(skb, 0);
- if (err < 0)
- return err;
-
- hdr_len = skb_tcp_all_headers(skb);
- mss = skb_shinfo(skb)->gso_size;
- iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP, 0);
- ipcss = skb_network_offset(skb);
- ipcso = (void *)&(iph->check) - (void *)skb->data;
- ipcse = skb_transport_offset(skb) - 1;
- tucss = skb_transport_offset(skb);
- tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
- tucse = 0;
-
- i = adapter->tx_ring.next_to_use;
- context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
- buffer_info = &adapter->tx_ring.buffer_info[i];
- WARN_ON(buffer_info->dma != 0);
-
- context_desc->ipcss = ipcss;
- context_desc->ipcso = ipcso;
- context_desc->ipcse = cpu_to_le16(ipcse);
- context_desc->tucss = tucss;
- context_desc->tucso = tucso;
- context_desc->tucse = cpu_to_le16(tucse);
- context_desc->mss = cpu_to_le16(mss);
- context_desc->hdr_len = hdr_len;
- context_desc->status = 0;
- context_desc->cmd_type_len = cpu_to_le32(
- IXGB_CONTEXT_DESC_TYPE
- | IXGB_CONTEXT_DESC_CMD_TSE
- | IXGB_CONTEXT_DESC_CMD_IP
- | IXGB_CONTEXT_DESC_CMD_TCP
- | IXGB_CONTEXT_DESC_CMD_IDE
- | (skb->len - (hdr_len)));
-
-
- if (++i == adapter->tx_ring.count) i = 0;
- adapter->tx_ring.next_to_use = i;
-
- return 1;
- }
-
- return 0;
-}
-
-static bool
-ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
-{
- struct ixgb_context_desc *context_desc;
- unsigned int i;
- u8 css, cso;
-
- if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
- struct ixgb_buffer *buffer_info;
- css = skb_checksum_start_offset(skb);
- cso = css + skb->csum_offset;
-
- i = adapter->tx_ring.next_to_use;
- context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
- buffer_info = &adapter->tx_ring.buffer_info[i];
- WARN_ON(buffer_info->dma != 0);
-
- context_desc->tucss = css;
- context_desc->tucso = cso;
- context_desc->tucse = 0;
- /* zero out any previously existing data in one instruction */
- *(u32 *)&(context_desc->ipcss) = 0;
- context_desc->status = 0;
- context_desc->hdr_len = 0;
- context_desc->mss = 0;
- context_desc->cmd_type_len =
- cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
- | IXGB_TX_DESC_CMD_IDE);
-
- if (++i == adapter->tx_ring.count) i = 0;
- adapter->tx_ring.next_to_use = i;
-
- return true;
- }
-
- return false;
-}
-
-#define IXGB_MAX_TXD_PWR 14
-#define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
-
-static int
-ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
- unsigned int first)
-{
- struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
- struct pci_dev *pdev = adapter->pdev;
- struct ixgb_buffer *buffer_info;
- int len = skb_headlen(skb);
- unsigned int offset = 0, size, count = 0, i;
- unsigned int mss = skb_shinfo(skb)->gso_size;
- unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
- unsigned int f;
-
- i = tx_ring->next_to_use;
-
- while (len) {
- buffer_info = &tx_ring->buffer_info[i];
- size = min(len, IXGB_MAX_DATA_PER_TXD);
- /* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
- if (unlikely(mss && !nr_frags && size == len && size > 8))
- size -= 4;
-
- buffer_info->length = size;
- WARN_ON(buffer_info->dma != 0);
- buffer_info->time_stamp = jiffies;
- buffer_info->mapped_as_page = false;
- buffer_info->dma = dma_map_single(&pdev->dev,
- skb->data + offset,
- size, DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, buffer_info->dma))
- goto dma_error;
- buffer_info->next_to_watch = 0;
-
- len -= size;
- offset += size;
- count++;
- if (len) {
- i++;
- if (i == tx_ring->count)
- i = 0;
- }
- }
-
- for (f = 0; f < nr_frags; f++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
- len = skb_frag_size(frag);
- offset = 0;
-
- while (len) {
- i++;
- if (i == tx_ring->count)
- i = 0;
-
- buffer_info = &tx_ring->buffer_info[i];
- size = min(len, IXGB_MAX_DATA_PER_TXD);
-
- /* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
- if (unlikely(mss && (f == (nr_frags - 1))
- && size == len && size > 8))
- size -= 4;
-
- buffer_info->length = size;
- buffer_info->time_stamp = jiffies;
- buffer_info->mapped_as_page = true;
- buffer_info->dma =
- skb_frag_dma_map(&pdev->dev, frag, offset, size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, buffer_info->dma))
- goto dma_error;
- buffer_info->next_to_watch = 0;
-
- len -= size;
- offset += size;
- count++;
- }
- }
- tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[first].next_to_watch = i;
-
- return count;
-
-dma_error:
- dev_err(&pdev->dev, "TX DMA map failed\n");
- buffer_info->dma = 0;
- if (count)
- count--;
-
- while (count--) {
- if (i==0)
- i += tx_ring->count;
- i--;
- buffer_info = &tx_ring->buffer_info[i];
- ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
- }
-
- return 0;
-}
-
-static void
-ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
-{
- struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
- struct ixgb_tx_desc *tx_desc = NULL;
- struct ixgb_buffer *buffer_info;
- u32 cmd_type_len = adapter->tx_cmd_type;
- u8 status = 0;
- u8 popts = 0;
- unsigned int i;
-
- if (tx_flags & IXGB_TX_FLAGS_TSO) {
- cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
- popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
- }
-
- if (tx_flags & IXGB_TX_FLAGS_CSUM)
- popts |= IXGB_TX_DESC_POPTS_TXSM;
-
- if (tx_flags & IXGB_TX_FLAGS_VLAN)
- cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
-
- i = tx_ring->next_to_use;
-
- while (count--) {
- buffer_info = &tx_ring->buffer_info[i];
- tx_desc = IXGB_TX_DESC(*tx_ring, i);
- tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
- tx_desc->cmd_type_len =
- cpu_to_le32(cmd_type_len | buffer_info->length);
- tx_desc->status = status;
- tx_desc->popts = popts;
- tx_desc->vlan = cpu_to_le16(vlan_id);
-
- if (++i == tx_ring->count) i = 0;
- }
-
- tx_desc->cmd_type_len |=
- cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
-
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64). */
- wmb();
-
- tx_ring->next_to_use = i;
- IXGB_WRITE_REG(&adapter->hw, TDT, i);
-}
-
-static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
-
- netif_stop_queue(netdev);
- /* Herbert's original patch had:
- * smp_mb__after_netif_stop_queue();
- * but since that doesn't exist yet, just open code it. */
- smp_mb();
-
- /* We need to check again in a case another CPU has just
- * made room available. */
- if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
- return -EBUSY;
-
- /* A reprieve! */
- netif_start_queue(netdev);
- ++adapter->restart_queue;
- return 0;
-}
-
-static int ixgb_maybe_stop_tx(struct net_device *netdev,
- struct ixgb_desc_ring *tx_ring, int size)
-{
- if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
- return 0;
- return __ixgb_maybe_stop_tx(netdev, size);
-}
-
-
-/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
- (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
-#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
- MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
- + 1 /* one more needed for sentinel TSO workaround */
-
-static netdev_tx_t
-ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- unsigned int first;
- unsigned int tx_flags = 0;
- int vlan_id = 0;
- int count = 0;
- int tso;
-
- if (test_bit(__IXGB_DOWN, &adapter->flags)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (skb->len <= 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
- DESC_NEEDED)))
- return NETDEV_TX_BUSY;
-
- if (skb_vlan_tag_present(skb)) {
- tx_flags |= IXGB_TX_FLAGS_VLAN;
- vlan_id = skb_vlan_tag_get(skb);
- }
-
- first = adapter->tx_ring.next_to_use;
-
- tso = ixgb_tso(adapter, skb);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (likely(tso))
- tx_flags |= IXGB_TX_FLAGS_TSO;
- else if (ixgb_tx_csum(adapter, skb))
- tx_flags |= IXGB_TX_FLAGS_CSUM;
-
- count = ixgb_tx_map(adapter, skb, first);
-
- if (count) {
- ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
- /* Make sure there is space in the ring for the next send. */
- ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
-
- } else {
- dev_kfree_skb_any(skb);
- adapter->tx_ring.buffer_info[first].time_stamp = 0;
- adapter->tx_ring.next_to_use = first;
- }
-
- return NETDEV_TX_OK;
-}
-
-/**
- * ixgb_tx_timeout - Respond to a Tx Hang
- * @netdev: network interface device structure
- * @txqueue: queue hanging (unused)
- **/
-
-static void
-ixgb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- /* Do the reset outside of interrupt context */
- schedule_work(&adapter->tx_timeout_task);
-}
-
-static void
-ixgb_tx_timeout_task(struct work_struct *work)
-{
- struct ixgb_adapter *adapter =
- container_of(work, struct ixgb_adapter, tx_timeout_task);
-
- adapter->tx_timeout_count++;
- ixgb_down(adapter, true);
- ixgb_up(adapter);
-}
-
-/**
- * ixgb_change_mtu - Change the Maximum Transfer Unit
- * @netdev: network interface device structure
- * @new_mtu: new value for maximum frame size
- *
- * Returns 0 on success, negative on failure
- **/
-
-static int
-ixgb_change_mtu(struct net_device *netdev, int new_mtu)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
-
- if (netif_running(netdev))
- ixgb_down(adapter, true);
-
- adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
-
- netdev->mtu = new_mtu;
-
- if (netif_running(netdev))
- ixgb_up(adapter);
-
- return 0;
-}
-
-/**
- * ixgb_update_stats - Update the board statistics counters.
- * @adapter: board private structure
- **/
-
-void
-ixgb_update_stats(struct ixgb_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
-
- /* Prevent stats update while adapter is being reset */
- if (pci_channel_offline(pdev))
- return;
-
- if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
- (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
- u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
- u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
- u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
- u64 bcast = ((u64)bcast_h << 32) | bcast_l;
-
- multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
- /* fix up multicast stats by removing broadcasts */
- if (multi >= bcast)
- multi -= bcast;
-
- adapter->stats.mprcl += (multi & 0xFFFFFFFF);
- adapter->stats.mprch += (multi >> 32);
- adapter->stats.bprcl += bcast_l;
- adapter->stats.bprch += bcast_h;
- } else {
- adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
- adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
- adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
- adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
- }
- adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
- adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
- adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
- adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
- adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
- adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
- adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
- adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
- adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
- adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
- adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
- adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
- adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
- adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
- adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
- adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
- adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
- adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
- adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
- adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
- adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
- adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
- adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
- adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
- adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
- adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
- adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
- adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
- adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
- adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
- adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
- adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
- adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
- adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
- adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
- adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
- adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
- adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
- adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
- adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
- adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
- adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
- adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
- adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
- adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
- adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
- adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
- adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
- adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
- adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
- adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
- adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
- adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
- adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
- adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
- adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
-
- /* Fill out the OS statistics structure */
-
- netdev->stats.rx_packets = adapter->stats.gprcl;
- netdev->stats.tx_packets = adapter->stats.gptcl;
- netdev->stats.rx_bytes = adapter->stats.gorcl;
- netdev->stats.tx_bytes = adapter->stats.gotcl;
- netdev->stats.multicast = adapter->stats.mprcl;
- netdev->stats.collisions = 0;
-
- /* ignore RLEC as it reports errors for padded (<64bytes) frames
- * with a length in the type/len field */
- netdev->stats.rx_errors =
- /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
- adapter->stats.ruc +
- adapter->stats.roc /*+ adapter->stats.rlec */ +
- adapter->stats.icbc +
- adapter->stats.ecbc + adapter->stats.mpc;
-
- /* see above
- * netdev->stats.rx_length_errors = adapter->stats.rlec;
- */
-
- netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
- netdev->stats.rx_fifo_errors = adapter->stats.mpc;
- netdev->stats.rx_missed_errors = adapter->stats.mpc;
- netdev->stats.rx_over_errors = adapter->stats.mpc;
-
- netdev->stats.tx_errors = 0;
- netdev->stats.rx_frame_errors = 0;
- netdev->stats.tx_aborted_errors = 0;
- netdev->stats.tx_carrier_errors = 0;
- netdev->stats.tx_fifo_errors = 0;
- netdev->stats.tx_heartbeat_errors = 0;
- netdev->stats.tx_window_errors = 0;
-}
-
-/**
- * ixgb_intr - Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- **/
-
-static irqreturn_t
-ixgb_intr(int irq, void *data)
-{
- struct net_device *netdev = data;
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_hw *hw = &adapter->hw;
- u32 icr = IXGB_READ_REG(hw, ICR);
-
- if (unlikely(!icr))
- return IRQ_NONE; /* Not our interrupt */
-
- if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
- if (!test_bit(__IXGB_DOWN, &adapter->flags))
- mod_timer(&adapter->watchdog_timer, jiffies);
-
- if (napi_schedule_prep(&adapter->napi)) {
-
- /* Disable interrupts and register for poll. The flush
- of the posted write is intentionally left out.
- */
-
- IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
- __napi_schedule(&adapter->napi);
- }
- return IRQ_HANDLED;
-}
-
-/**
- * ixgb_clean - NAPI Rx polling callback
- * @napi: napi struct pointer
- * @budget: max number of receives to clean
- **/
-
-static int
-ixgb_clean(struct napi_struct *napi, int budget)
-{
- struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
- int work_done = 0;
-
- ixgb_clean_tx_irq(adapter);
- ixgb_clean_rx_irq(adapter, &work_done, budget);
-
- /* If budget not fully consumed, exit the polling mode */
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- if (!test_bit(__IXGB_DOWN, &adapter->flags))
- ixgb_irq_enable(adapter);
- }
-
- return work_done;
-}
-
-/**
- * ixgb_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
- **/
-
-static bool
-ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
-{
- struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
- struct net_device *netdev = adapter->netdev;
- struct ixgb_tx_desc *tx_desc, *eop_desc;
- struct ixgb_buffer *buffer_info;
- unsigned int i, eop;
- bool cleaned = false;
-
- i = tx_ring->next_to_clean;
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = IXGB_TX_DESC(*tx_ring, eop);
-
- while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
-
- rmb(); /* read buffer_info after eop_desc */
- for (cleaned = false; !cleaned; ) {
- tx_desc = IXGB_TX_DESC(*tx_ring, i);
- buffer_info = &tx_ring->buffer_info[i];
-
- if (tx_desc->popts &
- (IXGB_TX_DESC_POPTS_TXSM |
- IXGB_TX_DESC_POPTS_IXSM))
- adapter->hw_csum_tx_good++;
-
- ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
-
- *(u32 *)&(tx_desc->status) = 0;
-
- cleaned = (i == eop);
- if (++i == tx_ring->count) i = 0;
- }
-
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = IXGB_TX_DESC(*tx_ring, eop);
- }
-
- tx_ring->next_to_clean = i;
-
- if (unlikely(cleaned && netif_carrier_ok(netdev) &&
- IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
- /* Make sure that anybody stopping the queue after this
- * sees the new next_to_clean. */
- smp_mb();
-
- if (netif_queue_stopped(netdev) &&
- !(test_bit(__IXGB_DOWN, &adapter->flags))) {
- netif_wake_queue(netdev);
- ++adapter->restart_queue;
- }
- }
-
- if (adapter->detect_tx_hung) {
- /* detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of i */
- adapter->detect_tx_hung = false;
- if (tx_ring->buffer_info[eop].time_stamp &&
- time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
- && !(IXGB_READ_REG(&adapter->hw, STATUS) &
- IXGB_STATUS_TXOFF)) {
- /* detected Tx unit hang */
- netif_err(adapter, drv, adapter->netdev,
- "Detected Tx Unit Hang\n"
- " TDH <%x>\n"
- " TDT <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " next_to_watch <%x>\n"
- " jiffies <%lx>\n"
- " next_to_watch.status <%x>\n",
- IXGB_READ_REG(&adapter->hw, TDH),
- IXGB_READ_REG(&adapter->hw, TDT),
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- tx_ring->buffer_info[eop].time_stamp,
- eop,
- jiffies,
- eop_desc->status);
- netif_stop_queue(netdev);
- }
- }
-
- return cleaned;
-}
-
-/**
- * ixgb_rx_checksum - Receive Checksum Offload for 82597.
- * @adapter: board private structure
- * @rx_desc: receive descriptor
- * @skb: socket buffer with received data
- **/
-
-static void
-ixgb_rx_checksum(struct ixgb_adapter *adapter,
- struct ixgb_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- /* Ignore Checksum bit is set OR
- * TCP Checksum has not been calculated
- */
- if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
- (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
- skb_checksum_none_assert(skb);
- return;
- }
-
- /* At this point we know the hardware did the TCP checksum */
- /* now look at the TCP checksum error bit */
- if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
- /* let the stack verify checksum errors */
- skb_checksum_none_assert(skb);
- adapter->hw_csum_rx_error++;
- } else {
- /* TCP checksum is good */
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- adapter->hw_csum_rx_good++;
- }
-}
-
-/*
- * this should improve performance for small packets with large amounts
- * of reassembly being done in the stack
- */
-static void ixgb_check_copybreak(struct napi_struct *napi,
- struct ixgb_buffer *buffer_info,
- u32 length, struct sk_buff **skb)
-{
- struct sk_buff *new_skb;
-
- if (length > copybreak)
- return;
-
- new_skb = napi_alloc_skb(napi, length);
- if (!new_skb)
- return;
-
- skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
- (*skb)->data - NET_IP_ALIGN,
- length + NET_IP_ALIGN);
- /* save the skb in buffer_info as good */
- buffer_info->skb = *skb;
- *skb = new_skb;
-}
-
-/**
- * ixgb_clean_rx_irq - Send received data up the network stack,
- * @adapter: board private structure
- * @work_done: output pointer to amount of packets cleaned
- * @work_to_do: how much work we can complete
- **/
-
-static bool
-ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
-{
- struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- struct ixgb_rx_desc *rx_desc, *next_rxd;
- struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
- u32 length;
- unsigned int i, j;
- int cleaned_count = 0;
- bool cleaned = false;
-
- i = rx_ring->next_to_clean;
- rx_desc = IXGB_RX_DESC(*rx_ring, i);
- buffer_info = &rx_ring->buffer_info[i];
-
- while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
- struct sk_buff *skb;
- u8 status;
-
- if (*work_done >= work_to_do)
- break;
-
- (*work_done)++;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
- status = rx_desc->status;
- skb = buffer_info->skb;
- buffer_info->skb = NULL;
-
- prefetch(skb->data - NET_IP_ALIGN);
-
- if (++i == rx_ring->count)
- i = 0;
- next_rxd = IXGB_RX_DESC(*rx_ring, i);
- prefetch(next_rxd);
-
- j = i + 1;
- if (j == rx_ring->count)
- j = 0;
- next2_buffer = &rx_ring->buffer_info[j];
- prefetch(next2_buffer);
-
- next_buffer = &rx_ring->buffer_info[i];
-
- cleaned = true;
- cleaned_count++;
-
- dma_unmap_single(&pdev->dev,
- buffer_info->dma,
- buffer_info->length,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
-
- length = le16_to_cpu(rx_desc->length);
- rx_desc->length = 0;
-
- if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
-
- /* All receives must fit into a single buffer */
-
- pr_debug("Receive packet consumed multiple buffers length<%x>\n",
- length);
-
- dev_kfree_skb_irq(skb);
- goto rxdesc_done;
- }
-
- if (unlikely(rx_desc->errors &
- (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
- IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
- dev_kfree_skb_irq(skb);
- goto rxdesc_done;
- }
-
- ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
-
- /* Good Receive */
- skb_put(skb, length);
-
- /* Receive Checksum Offload */
- ixgb_rx_checksum(adapter, rx_desc, skb);
-
- skb->protocol = eth_type_trans(skb, netdev);
- if (status & IXGB_RX_DESC_STATUS_VP)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(rx_desc->special));
-
- netif_receive_skb(skb);
-
-rxdesc_done:
- /* clean up descriptor, might be written over by hw */
- rx_desc->status = 0;
-
- /* return some buffers to hardware, one at a time is too slow */
- if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
- ixgb_alloc_rx_buffers(adapter, cleaned_count);
- cleaned_count = 0;
- }
-
- /* use prefetched values */
- rx_desc = next_rxd;
- buffer_info = next_buffer;
- }
-
- rx_ring->next_to_clean = i;
-
- cleaned_count = IXGB_DESC_UNUSED(rx_ring);
- if (cleaned_count)
- ixgb_alloc_rx_buffers(adapter, cleaned_count);
-
- return cleaned;
-}
-
-/**
- * ixgb_alloc_rx_buffers - Replace used receive buffers
- * @adapter: address of board private structure
- * @cleaned_count: how many buffers to allocate
- **/
-
-static void
-ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
-{
- struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- struct ixgb_rx_desc *rx_desc;
- struct ixgb_buffer *buffer_info;
- struct sk_buff *skb;
- unsigned int i;
- long cleancount;
-
- i = rx_ring->next_to_use;
- buffer_info = &rx_ring->buffer_info[i];
- cleancount = IXGB_DESC_UNUSED(rx_ring);
-
-
- /* leave three descriptors unused */
- while (--cleancount > 2 && cleaned_count--) {
- /* recycle! its good for you */
- skb = buffer_info->skb;
- if (skb) {
- skb_trim(skb, 0);
- goto map_skb;
- }
-
- skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
- if (unlikely(!skb)) {
- /* Better luck next round */
- adapter->alloc_rx_buff_failed++;
- break;
- }
-
- buffer_info->skb = skb;
- buffer_info->length = adapter->rx_buffer_len;
-map_skb:
- buffer_info->dma = dma_map_single(&pdev->dev,
- skb->data,
- adapter->rx_buffer_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
- adapter->alloc_rx_buff_failed++;
- break;
- }
-
- rx_desc = IXGB_RX_DESC(*rx_ring, i);
- rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
- /* guarantee DD bit not set now before h/w gets descriptor
- * this is the rest of the workaround for h/w double
- * writeback. */
- rx_desc->status = 0;
-
-
- if (++i == rx_ring->count)
- i = 0;
- buffer_info = &rx_ring->buffer_info[i];
- }
-
- if (likely(rx_ring->next_to_use != i)) {
- rx_ring->next_to_use = i;
- if (unlikely(i-- == 0))
- i = (rx_ring->count - 1);
-
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs, such
- * as IA-64). */
- wmb();
- IXGB_WRITE_REG(&adapter->hw, RDT, i);
- }
-}
-
-static void
-ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
-{
- u32 ctrl;
-
- /* enable VLAN tag insert/strip */
- ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
- ctrl |= IXGB_CTRL0_VME;
- IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
-}
-
-static void
-ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
-{
- u32 ctrl;
-
- /* disable VLAN tag insert/strip */
- ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
- ctrl &= ~IXGB_CTRL0_VME;
- IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
-}
-
-static int
-ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- u32 vfta, index;
-
- /* add VID to filter table */
-
- index = (vid >> 5) & 0x7F;
- vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
- vfta |= (1 << (vid & 0x1F));
- ixgb_write_vfta(&adapter->hw, index, vfta);
- set_bit(vid, adapter->active_vlans);
-
- return 0;
-}
-
-static int
-ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- u32 vfta, index;
-
- /* remove VID from filter table */
-
- index = (vid >> 5) & 0x7F;
- vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
- vfta &= ~(1 << (vid & 0x1F));
- ixgb_write_vfta(&adapter->hw, index, vfta);
- clear_bit(vid, adapter->active_vlans);
-
- return 0;
-}
-
-static void
-ixgb_restore_vlan(struct ixgb_adapter *adapter)
-{
- u16 vid;
-
- for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
- ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
-}
-
-/**
- * ixgb_io_error_detected - called when PCI error is detected
- * @pdev: pointer to pci device with error
- * @state: pci channel state after error
- *
- * This callback is called by the PCI subsystem whenever
- * a PCI bus error is detected.
- */
-static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- netif_device_detach(netdev);
-
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- if (netif_running(netdev))
- ixgb_down(adapter, true);
-
- pci_disable_device(pdev);
-
- /* Request a slot reset. */
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/**
- * ixgb_io_slot_reset - called after the pci bus has been reset.
- * @pdev: pointer to pci device with error
- *
- * This callback is called after the PCI bus has been reset.
- * Basically, this tries to restart the card from scratch.
- * This is a shortened version of the device probe/discovery code,
- * it resembles the first-half of the ixgb_probe() routine.
- */
-static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- u8 addr[ETH_ALEN];
-
- if (pci_enable_device(pdev)) {
- netif_err(adapter, probe, adapter->netdev,
- "Cannot re-enable PCI device after reset\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- /* Perform card reset only on one instance of the card */
- if (0 != PCI_FUNC (pdev->devfn))
- return PCI_ERS_RESULT_RECOVERED;
-
- pci_set_master(pdev);
-
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- ixgb_reset(adapter);
-
- /* Make sure the EEPROM is good */
- if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
- netif_err(adapter, probe, adapter->netdev,
- "After reset, the EEPROM checksum is not valid\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
- ixgb_get_ee_mac_addr(&adapter->hw, addr);
- eth_hw_addr_set(netdev, addr);
- memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
-
- if (!is_valid_ether_addr(netdev->perm_addr)) {
- netif_err(adapter, probe, adapter->netdev,
- "After reset, invalid MAC address\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-/**
- * ixgb_io_resume - called when its OK to resume normal operations
- * @pdev: pointer to pci device with error
- *
- * The error recovery driver tells us that its OK to resume
- * normal operation. Implementation resembles the second-half
- * of the ixgb_probe() routine.
- */
-static void ixgb_io_resume(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- pci_set_master(pdev);
-
- if (netif_running(netdev)) {
- if (ixgb_up(adapter)) {
- pr_err("can't bring device back up after reset\n");
- return;
- }
- }
-
- netif_device_attach(netdev);
- mod_timer(&adapter->watchdog_timer, jiffies);
-}
-
-/* ixgb_main.c */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h
deleted file mode 100644
index 7bd54efa698d..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-/* glue for the OS independent part of ixgb
- * includes register access macros
- */
-
-#ifndef _IXGB_OSDEP_H_
-#define _IXGB_OSDEP_H_
-
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <linux/if_ether.h>
-
-#undef ASSERT
-#define ASSERT(x) BUG_ON(!(x))
-
-#define ENTER() pr_debug("%s\n", __func__);
-
-#define IXGB_WRITE_REG(a, reg, value) ( \
- writel((value), ((a)->hw_addr + IXGB_##reg)))
-
-#define IXGB_READ_REG(a, reg) ( \
- readl((a)->hw_addr + IXGB_##reg))
-
-#define IXGB_WRITE_REG_ARRAY(a, reg, offset, value) ( \
- writel((value), ((a)->hw_addr + IXGB_##reg + ((offset) << 2))))
-
-#define IXGB_READ_REG_ARRAY(a, reg, offset) ( \
- readl((a)->hw_addr + IXGB_##reg + ((offset) << 2)))
-
-#define IXGB_WRITE_FLUSH(a) IXGB_READ_REG(a, STATUS)
-
-#define IXGB_MEMCPY memcpy
-
-#endif /* _IXGB_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
deleted file mode 100644
index d40f96250691..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_param.c
+++ /dev/null
@@ -1,442 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include "ixgb.h"
-
-/* This is the only thing that needs to be changed to adjust the
- * maximum number of ports that the driver can manage.
- */
-
-#define IXGB_MAX_NIC 8
-
-#define OPTION_UNSET -1
-#define OPTION_DISABLED 0
-#define OPTION_ENABLED 1
-
-/* All parameters are treated the same, as an integer array of values.
- * This macro just reduces the need to repeat the same declaration code
- * over and over (plus this helps to avoid typo bugs).
- */
-
-#define IXGB_PARAM_INIT { [0 ... IXGB_MAX_NIC] = OPTION_UNSET }
-#define IXGB_PARAM(X, desc) \
- static int X[IXGB_MAX_NIC+1] \
- = IXGB_PARAM_INIT; \
- static unsigned int num_##X = 0; \
- module_param_array_named(X, X, int, &num_##X, 0); \
- MODULE_PARM_DESC(X, desc);
-
-/* Transmit Descriptor Count
- *
- * Valid Range: 64-4096
- *
- * Default Value: 256
- */
-
-IXGB_PARAM(TxDescriptors, "Number of transmit descriptors");
-
-/* Receive Descriptor Count
- *
- * Valid Range: 64-4096
- *
- * Default Value: 1024
- */
-
-IXGB_PARAM(RxDescriptors, "Number of receive descriptors");
-
-/* User Specified Flow Control Override
- *
- * Valid Range: 0-3
- * - 0 - No Flow Control
- * - 1 - Rx only, respond to PAUSE frames but do not generate them
- * - 2 - Tx only, generate PAUSE frames but ignore them on receive
- * - 3 - Full Flow Control Support
- *
- * Default Value: 2 - Tx only (silicon bug avoidance)
- */
-
-IXGB_PARAM(FlowControl, "Flow Control setting");
-
-/* XsumRX - Receive Checksum Offload Enable/Disable
- *
- * Valid Range: 0, 1
- * - 0 - disables all checksum offload
- * - 1 - enables receive IP/TCP/UDP checksum offload
- * on 82597 based NICs
- *
- * Default Value: 1
- */
-
-IXGB_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
-
-/* Transmit Interrupt Delay in units of 0.8192 microseconds
- *
- * Valid Range: 0-65535
- *
- * Default Value: 32
- */
-
-IXGB_PARAM(TxIntDelay, "Transmit Interrupt Delay");
-
-/* Receive Interrupt Delay in units of 0.8192 microseconds
- *
- * Valid Range: 0-65535
- *
- * Default Value: 72
- */
-
-IXGB_PARAM(RxIntDelay, "Receive Interrupt Delay");
-
-/* Receive Flow control high threshold (when we send a pause frame)
- * (FCRTH)
- *
- * Valid Range: 1,536 - 262,136 (0x600 - 0x3FFF8, 8 byte granularity)
- *
- * Default Value: 196,608 (0x30000)
- */
-
-IXGB_PARAM(RxFCHighThresh, "Receive Flow Control High Threshold");
-
-/* Receive Flow control low threshold (when we send a resume frame)
- * (FCRTL)
- *
- * Valid Range: 64 - 262,136 (0x40 - 0x3FFF8, 8 byte granularity)
- * must be less than high threshold by at least 8 bytes
- *
- * Default Value: 163,840 (0x28000)
- */
-
-IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold");
-
-/* Flow control request timeout (how long to pause the link partner's tx)
- * (PAP 15:0)
- *
- * Valid Range: 1 - 65535
- *
- * Default Value: 65535 (0xffff) (we'll send an xon if we recover)
- */
-
-IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout");
-
-/* Interrupt Delay Enable
- *
- * Valid Range: 0, 1
- *
- * - 0 - disables transmit interrupt delay
- * - 1 - enables transmmit interrupt delay
- *
- * Default Value: 1
- */
-
-IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable");
-
-
-#define DEFAULT_TIDV 32
-#define MAX_TIDV 0xFFFF
-#define MIN_TIDV 0
-
-#define DEFAULT_RDTR 72
-#define MAX_RDTR 0xFFFF
-#define MIN_RDTR 0
-
-#define DEFAULT_FCRTL 0x28000
-#define DEFAULT_FCRTH 0x30000
-#define MIN_FCRTL 0
-#define MAX_FCRTL 0x3FFE8
-#define MIN_FCRTH 8
-#define MAX_FCRTH 0x3FFF0
-
-#define MIN_FCPAUSE 1
-#define MAX_FCPAUSE 0xffff
-#define DEFAULT_FCPAUSE 0xFFFF /* this may be too long */
-
-struct ixgb_option {
- enum { enable_option, range_option, list_option } type;
- const char *name;
- const char *err;
- int def;
- union {
- struct { /* range_option info */
- int min;
- int max;
- } r;
- struct { /* list_option info */
- int nr;
- const struct ixgb_opt_list {
- int i;
- const char *str;
- } *p;
- } l;
- } arg;
-};
-
-static int
-ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
-{
- if (*value == OPTION_UNSET) {
- *value = opt->def;
- return 0;
- }
-
- switch (opt->type) {
- case enable_option:
- switch (*value) {
- case OPTION_ENABLED:
- pr_info("%s Enabled\n", opt->name);
- return 0;
- case OPTION_DISABLED:
- pr_info("%s Disabled\n", opt->name);
- return 0;
- }
- break;
- case range_option:
- if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- pr_info("%s set to %i\n", opt->name, *value);
- return 0;
- }
- break;
- case list_option: {
- int i;
- const struct ixgb_opt_list *ent;
-
- for (i = 0; i < opt->arg.l.nr; i++) {
- ent = &opt->arg.l.p[i];
- if (*value == ent->i) {
- if (ent->str[0] != '\0')
- pr_info("%s\n", ent->str);
- return 0;
- }
- }
- }
- break;
- default:
- BUG();
- }
-
- pr_info("Invalid %s specified (%i) %s\n", opt->name, *value, opt->err);
- *value = opt->def;
- return -1;
-}
-
-/**
- * ixgb_check_options - Range Checking for Command Line Parameters
- * @adapter: board private structure
- *
- * This routine checks all command line parameters for valid user
- * input. If an invalid value is given, or if no user specified
- * value exists, a default value is used. The final value is stored
- * in a variable in the adapter structure.
- **/
-
-void
-ixgb_check_options(struct ixgb_adapter *adapter)
-{
- int bd = adapter->bd_number;
- if (bd >= IXGB_MAX_NIC) {
- pr_notice("Warning: no configuration for board #%i\n", bd);
- pr_notice("Using defaults for all values\n");
- }
-
- { /* Transmit Descriptor Count */
- static const struct ixgb_option opt = {
- .type = range_option,
- .name = "Transmit Descriptors",
- .err = "using default of " __MODULE_STRING(DEFAULT_TXD),
- .def = DEFAULT_TXD,
- .arg = { .r = { .min = MIN_TXD,
- .max = MAX_TXD}}
- };
- struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
-
- if (num_TxDescriptors > bd) {
- tx_ring->count = TxDescriptors[bd];
- ixgb_validate_option(&tx_ring->count, &opt);
- } else {
- tx_ring->count = opt.def;
- }
- tx_ring->count = ALIGN(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
- }
- { /* Receive Descriptor Count */
- static const struct ixgb_option opt = {
- .type = range_option,
- .name = "Receive Descriptors",
- .err = "using default of " __MODULE_STRING(DEFAULT_RXD),
- .def = DEFAULT_RXD,
- .arg = { .r = { .min = MIN_RXD,
- .max = MAX_RXD}}
- };
- struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
-
- if (num_RxDescriptors > bd) {
- rx_ring->count = RxDescriptors[bd];
- ixgb_validate_option(&rx_ring->count, &opt);
- } else {
- rx_ring->count = opt.def;
- }
- rx_ring->count = ALIGN(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
- }
- { /* Receive Checksum Offload Enable */
- static const struct ixgb_option opt = {
- .type = enable_option,
- .name = "Receive Checksum Offload",
- .err = "defaulting to Enabled",
- .def = OPTION_ENABLED
- };
-
- if (num_XsumRX > bd) {
- unsigned int rx_csum = XsumRX[bd];
- ixgb_validate_option(&rx_csum, &opt);
- adapter->rx_csum = rx_csum;
- } else {
- adapter->rx_csum = opt.def;
- }
- }
- { /* Flow Control */
-
- static const struct ixgb_opt_list fc_list[] = {
- { ixgb_fc_none, "Flow Control Disabled" },
- { ixgb_fc_rx_pause, "Flow Control Receive Only" },
- { ixgb_fc_tx_pause, "Flow Control Transmit Only" },
- { ixgb_fc_full, "Flow Control Enabled" },
- { ixgb_fc_default, "Flow Control Hardware Default" }
- };
-
- static const struct ixgb_option opt = {
- .type = list_option,
- .name = "Flow Control",
- .err = "reading default settings from EEPROM",
- .def = ixgb_fc_tx_pause,
- .arg = { .l = { .nr = ARRAY_SIZE(fc_list),
- .p = fc_list }}
- };
-
- if (num_FlowControl > bd) {
- unsigned int fc = FlowControl[bd];
- ixgb_validate_option(&fc, &opt);
- adapter->hw.fc.type = fc;
- } else {
- adapter->hw.fc.type = opt.def;
- }
- }
- { /* Receive Flow Control High Threshold */
- static const struct ixgb_option opt = {
- .type = range_option,
- .name = "Rx Flow Control High Threshold",
- .err = "using default of " __MODULE_STRING(DEFAULT_FCRTH),
- .def = DEFAULT_FCRTH,
- .arg = { .r = { .min = MIN_FCRTH,
- .max = MAX_FCRTH}}
- };
-
- if (num_RxFCHighThresh > bd) {
- adapter->hw.fc.high_water = RxFCHighThresh[bd];
- ixgb_validate_option(&adapter->hw.fc.high_water, &opt);
- } else {
- adapter->hw.fc.high_water = opt.def;
- }
- if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- pr_info("Ignoring RxFCHighThresh when no RxFC\n");
- }
- { /* Receive Flow Control Low Threshold */
- static const struct ixgb_option opt = {
- .type = range_option,
- .name = "Rx Flow Control Low Threshold",
- .err = "using default of " __MODULE_STRING(DEFAULT_FCRTL),
- .def = DEFAULT_FCRTL,
- .arg = { .r = { .min = MIN_FCRTL,
- .max = MAX_FCRTL}}
- };
-
- if (num_RxFCLowThresh > bd) {
- adapter->hw.fc.low_water = RxFCLowThresh[bd];
- ixgb_validate_option(&adapter->hw.fc.low_water, &opt);
- } else {
- adapter->hw.fc.low_water = opt.def;
- }
- if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- pr_info("Ignoring RxFCLowThresh when no RxFC\n");
- }
- { /* Flow Control Pause Time Request*/
- static const struct ixgb_option opt = {
- .type = range_option,
- .name = "Flow Control Pause Time Request",
- .err = "using default of "__MODULE_STRING(DEFAULT_FCPAUSE),
- .def = DEFAULT_FCPAUSE,
- .arg = { .r = { .min = MIN_FCPAUSE,
- .max = MAX_FCPAUSE}}
- };
-
- if (num_FCReqTimeout > bd) {
- unsigned int pause_time = FCReqTimeout[bd];
- ixgb_validate_option(&pause_time, &opt);
- adapter->hw.fc.pause_time = pause_time;
- } else {
- adapter->hw.fc.pause_time = opt.def;
- }
- if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- pr_info("Ignoring FCReqTimeout when no RxFC\n");
- }
- /* high low and spacing check for rx flow control thresholds */
- if (adapter->hw.fc.type & ixgb_fc_tx_pause) {
- /* high must be greater than low */
- if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
- /* set defaults */
- pr_info("RxFCHighThresh must be >= (RxFCLowThresh + 8), Using Defaults\n");
- adapter->hw.fc.high_water = DEFAULT_FCRTH;
- adapter->hw.fc.low_water = DEFAULT_FCRTL;
- }
- }
- { /* Receive Interrupt Delay */
- static const struct ixgb_option opt = {
- .type = range_option,
- .name = "Receive Interrupt Delay",
- .err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
- .def = DEFAULT_RDTR,
- .arg = { .r = { .min = MIN_RDTR,
- .max = MAX_RDTR}}
- };
-
- if (num_RxIntDelay > bd) {
- adapter->rx_int_delay = RxIntDelay[bd];
- ixgb_validate_option(&adapter->rx_int_delay, &opt);
- } else {
- adapter->rx_int_delay = opt.def;
- }
- }
- { /* Transmit Interrupt Delay */
- static const struct ixgb_option opt = {
- .type = range_option,
- .name = "Transmit Interrupt Delay",
- .err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
- .def = DEFAULT_TIDV,
- .arg = { .r = { .min = MIN_TIDV,
- .max = MAX_TIDV}}
- };
-
- if (num_TxIntDelay > bd) {
- adapter->tx_int_delay = TxIntDelay[bd];
- ixgb_validate_option(&adapter->tx_int_delay, &opt);
- } else {
- adapter->tx_int_delay = opt.def;
- }
- }
-
- { /* Transmit Interrupt Delay Enable */
- static const struct ixgb_option opt = {
- .type = enable_option,
- .name = "Tx Interrupt Delay Enable",
- .err = "defaulting to Enabled",
- .def = OPTION_ENABLED
- };
-
- if (num_IntDelayEnable > bd) {
- unsigned int ide = IntDelayEnable[bd];
- ixgb_validate_option(&ide, &opt);
- adapter->tx_int_delay_enable = ide;
- } else {
- adapter->tx_int_delay_enable = opt.def;
- }
- }
-}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 8736ca4b2628..63d4e32df029 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -9,7 +9,6 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/cpumask.h>
-#include <linux/aer.h>
#include <linux/if_vlan.h>
#include <linux/jiffies.h>
#include <linux/phy.h>
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 3ea00bc9b91c..adc953611913 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -6089,18 +6089,19 @@ static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
return true;
}
-static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
- struct fwnode_handle *fwnode,
- char **mac_from)
+static int mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
+ struct fwnode_handle *fwnode,
+ char **mac_from)
{
struct mvpp2_port *port = netdev_priv(dev);
char hw_mac_addr[ETH_ALEN] = {0};
char fw_mac_addr[ETH_ALEN];
+ int ret;
if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) {
*mac_from = "firmware node";
eth_hw_addr_set(dev, fw_mac_addr);
- return;
+ return 0;
}
if (priv->hw_version == MVPP21) {
@@ -6108,19 +6109,24 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
if (is_valid_ether_addr(hw_mac_addr)) {
*mac_from = "hardware";
eth_hw_addr_set(dev, hw_mac_addr);
- return;
+ return 0;
}
}
/* Only valid on OF enabled platforms */
- if (!of_get_mac_address_nvmem(to_of_node(fwnode), fw_mac_addr)) {
+ ret = of_get_mac_address_nvmem(to_of_node(fwnode), fw_mac_addr);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ if (!ret) {
*mac_from = "nvmem cell";
eth_hw_addr_set(dev, fw_mac_addr);
- return;
+ return 0;
}
*mac_from = "random";
eth_hw_addr_random(dev);
+
+ return 0;
}
static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
@@ -6823,7 +6829,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
mutex_init(&port->gather_stats_lock);
INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
- mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
+ err = mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
+ if (err < 0)
+ goto err_free_stats;
port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 5a898fb88e37..fdce78ceea87 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -8,7 +8,6 @@
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
@@ -1050,7 +1049,6 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_regions;
}
- pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
netdev = alloc_etherdev_mq(sizeof(struct octep_device),
@@ -1106,7 +1104,6 @@ register_dev_err:
err_octep_config:
free_netdev(netdev);
err_alloc_netdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_regions:
err_dma_mask:
@@ -1139,7 +1136,6 @@ static void octep_remove(struct pci_dev *pdev)
octep_device_cleanup(oct);
pci_release_mem_regions(pdev);
free_netdev(netdev);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 87fff539d39d..d5691b6a2bc5 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1586,7 +1586,7 @@ static struct platform_driver pxa168_eth_driver = {
.suspend = pxa168_eth_suspend,
.driver = {
.name = DRIVER_NAME,
- .of_match_table = of_match_ptr(pxa168_eth_of_match),
+ .of_match_table = pxa168_eth_of_match,
},
};
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 97374fb3ee79..da0db417ab69 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -19,6 +19,8 @@ config NET_MEDIATEK_SOC
select DIMLIB
select PAGE_POOL
select PAGE_POOL_STATS
+ select PCS_MTK_LYNXI
+ select REGMAP_MMIO
help
This driver supports the gigabit ethernet MACs in the
MediaTek SoC family.
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index 8e0c61c33ff8..03e008fbc859 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -4,7 +4,7 @@
#
obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
-mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
+mtk_eth-y := mtk_eth_soc.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o mtk_wed_wo.o
ifdef CONFIG_DEBUG_FS
mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
index 72648535a14d..317e447f4991 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_path.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -96,12 +96,20 @@ static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path)
static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
{
- unsigned int val = 0;
+ unsigned int val = 0, mask = 0, reg = 0;
bool updated = true;
switch (path) {
case MTK_ETH_PATH_GMAC2_SGMII:
- val = CO_QPHY_SEL;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_U3_COPHY_V2)) {
+ reg = USB_PHY_SWITCH_REG;
+ val = SGMII_QPHY_SEL;
+ mask = QPHY_SEL_MASK;
+ } else {
+ reg = INFRA_MISC2;
+ val = CO_QPHY_SEL;
+ mask = val;
+ }
break;
default:
updated = false;
@@ -109,7 +117,7 @@ static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
}
if (updated)
- regmap_update_bits(eth->infra, INFRA_MISC2, CO_QPHY_SEL, val);
+ regmap_update_bits(eth->infra, reg, mask, val);
dev_dbg(eth->dev, "path %s in %s updated = %d\n",
mtk_eth_path_name(path), __func__, updated);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 3cb43623d3db..4403d3bbfd4d 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -20,6 +20,7 @@
#include <linux/interrupt.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/phylink.h>
+#include <linux/pcs/pcs-mtk-lynxi.h>
#include <linux/jhash.h>
#include <linux/bitfield.h>
#include <net/dsa.h>
@@ -374,17 +375,6 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
{
u32 val;
- /* Check DDR memory type.
- * Currently TRGMII mode with DDR2 memory is not supported.
- */
- regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
- if (interface == PHY_INTERFACE_MODE_TRGMII &&
- val & SYSCFG_DRAM_TYPE_DDR2) {
- dev_err(eth->dev,
- "TRGMII mode with DDR2 memory is not supported!\n");
- return -EOPNOTSUPP;
- }
-
val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
@@ -397,38 +387,42 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
phy_interface_t interface, int speed)
{
- u32 val;
+ unsigned long rate;
+ u32 tck, rck, intf;
int ret;
if (interface == PHY_INTERFACE_MODE_TRGMII) {
mtk_w32(eth, TRGMII_MODE, INTF_MODE);
- val = 500000000;
- ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
if (ret)
dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
return;
}
- val = (speed == SPEED_1000) ?
- INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
- mtk_w32(eth, val, INTF_MODE);
+ if (speed == SPEED_1000) {
+ intf = INTF_MODE_RGMII_1000;
+ rate = 250000000;
+ rck = RCK_CTRL_RGMII_1000;
+ tck = TCK_CTRL_RGMII_1000;
+ } else {
+ intf = INTF_MODE_RGMII_10_100;
+ rate = 500000000;
+ rck = RCK_CTRL_RGMII_10_100;
+ tck = TCK_CTRL_RGMII_10_100;
+ }
+
+ mtk_w32(eth, intf, INTF_MODE);
regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
ETHSYS_TRGMII_CLK_SEL362_5,
ETHSYS_TRGMII_CLK_SEL362_5);
- val = (speed == SPEED_1000) ? 250000000 : 500000000;
- ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], rate);
if (ret)
dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
- val = (speed == SPEED_1000) ?
- RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
- mtk_w32(eth, val, TRGMII_RCK_CTRL);
-
- val = (speed == SPEED_1000) ?
- TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
- mtk_w32(eth, val, TRGMII_TCK_CTRL);
+ mtk_w32(eth, rck, TRGMII_RCK_CTRL);
+ mtk_w32(eth, tck, TRGMII_TCK_CTRL);
}
static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
@@ -444,7 +438,7 @@ static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
0 : mac->id;
- return mtk_sgmii_select_pcs(eth->sgmii, sid);
+ return eth->sgmii_pcs[sid];
}
return NULL;
@@ -465,19 +459,11 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
/* Setup soc pin functions */
switch (state->interface) {
case PHY_INTERFACE_MODE_TRGMII:
- if (mac->id)
- goto err_phy;
- if (!MTK_HAS_CAPS(mac->hw->soc->caps,
- MTK_GMAC1_TRGMII))
- goto err_phy;
- fallthrough;
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_MII:
- case PHY_INTERFACE_MODE_REVMII:
- case PHY_INTERFACE_MODE_RMII:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
err = mtk_gmac_rgmii_path_setup(eth, mac->id);
if (err)
@@ -487,11 +473,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_SGMII:
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
- err = mtk_gmac_sgmii_path_setup(eth, mac->id);
- if (err)
- goto init_err;
- }
+ err = mtk_gmac_sgmii_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
break;
case PHY_INTERFACE_MODE_GMII:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
@@ -539,21 +523,13 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
}
}
- ge_mode = 0;
switch (state->interface) {
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
ge_mode = 1;
break;
- case PHY_INTERFACE_MODE_REVMII:
- ge_mode = 2;
- break;
- case PHY_INTERFACE_MODE_RMII:
- if (mac->id)
- goto err_phy;
- ge_mode = 3;
- break;
default:
+ ge_mode = 0;
break;
}
@@ -790,8 +766,10 @@ static const struct phylink_mac_ops mtk_phylink_ops = {
static int mtk_mdio_init(struct mtk_eth *eth)
{
+ unsigned int max_clk = 2500000, divider;
struct device_node *mii_np;
int ret;
+ u32 val;
mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
if (!mii_np) {
@@ -819,6 +797,25 @@ static int mtk_mdio_init(struct mtk_eth *eth)
eth->mii_bus->parent = eth->dev;
snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
+
+ if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
+ if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
+ dev_err(eth->dev, "MDIO clock frequency out of range");
+ ret = -EINVAL;
+ goto err_put_node;
+ }
+ max_clk = val;
+ }
+ divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
+
+ /* Configure MDC Divider */
+ val = mtk_r32(eth, MTK_PPSC);
+ val &= ~PPSC_MDC_CFG;
+ val |= FIELD_PREP(PPSC_MDC_CFG, divider) | PPSC_MDC_TURBO;
+ mtk_w32(eth, val, MTK_PPSC);
+
+ dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
+
ret = of_mdiobus_register(eth->mii_bus, mii_np);
err_put_node:
@@ -4057,8 +4054,17 @@ static int mtk_unreg_dev(struct mtk_eth *eth)
return 0;
}
+static void mtk_sgmii_destroy(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++)
+ mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
+}
+
static int mtk_cleanup(struct mtk_eth *eth)
{
+ mtk_sgmii_destroy(eth);
mtk_unreg_dev(eth);
mtk_free_dev(eth);
cancel_work_sync(&eth->pending_work);
@@ -4330,6 +4336,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
struct mtk_mac *mac;
int id, err;
int txqs = 1;
+ u32 val;
if (!_id) {
dev_err(eth->dev, "missing mac id\n");
@@ -4406,6 +4413,15 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
__set_bit(PHY_INTERFACE_MODE_TRGMII,
mac->phylink_config.supported_interfaces);
+ /* TRGMII is not permitted on MT7621 if using DDR2 */
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
+ if (val & SYSCFG_DRAM_TYPE_DDR2)
+ __clear_bit(PHY_INTERFACE_MODE_TRGMII,
+ mac->phylink_config.supported_interfaces);
+ }
+
if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
__set_bit(PHY_INTERFACE_MODE_SGMII,
mac->phylink_config.supported_interfaces);
@@ -4494,6 +4510,36 @@ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
rtnl_unlock();
}
+static int mtk_sgmii_init(struct mtk_eth *eth)
+{
+ struct device_node *np;
+ struct regmap *regmap;
+ u32 flags;
+ int i;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
+ if (!np)
+ break;
+
+ regmap = syscon_node_to_regmap(np);
+ flags = 0;
+ if (of_property_read_bool(np, "mediatek,pnswap"))
+ flags |= MTK_SGMII_FLAG_PN_SWAP;
+
+ of_node_put(np);
+
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap,
+ eth->soc->ana_rgc3,
+ flags);
+ }
+
+ return 0;
+}
+
static int mtk_probe(struct platform_device *pdev)
{
struct resource *res = NULL;
@@ -4557,13 +4603,7 @@ static int mtk_probe(struct platform_device *pdev)
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
- eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
- GFP_KERNEL);
- if (!eth->sgmii)
- return -ENOMEM;
-
- err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
- eth->soc->ana_rgc3);
+ err = mtk_sgmii_init(eth);
if (err)
return err;
@@ -4574,14 +4614,17 @@ static int mtk_probe(struct platform_device *pdev)
"mediatek,pctl");
if (IS_ERR(eth->pctl)) {
dev_err(&pdev->dev, "no pctl regmap found\n");
- return PTR_ERR(eth->pctl);
+ err = PTR_ERR(eth->pctl);
+ goto err_destroy_sgmii;
}
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
+ if (!res) {
+ err = -EINVAL;
+ goto err_destroy_sgmii;
+ }
}
if (eth->soc->offload_version) {
@@ -4691,8 +4734,8 @@ static int mtk_probe(struct platform_device *pdev)
for (i = 0; i < num_ppe; i++) {
u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
- eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
- eth->soc->offload_version, i);
+ eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
+
if (!eth->ppe[i]) {
err = -ENOMEM;
goto err_deinit_ppe;
@@ -4740,6 +4783,8 @@ err_deinit_hw:
mtk_hw_deinit(eth);
err_wed_exit:
mtk_wed_exit();
+err_destroy_sgmii:
+ mtk_sgmii_destroy(eth);
return err;
}
@@ -4814,6 +4859,7 @@ static const struct mtk_soc_data mt7622_data = {
.required_pctl = false,
.offload_version = 2,
.hash_offset = 2,
+ .has_accounting = true,
.foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
@@ -4851,6 +4897,7 @@ static const struct mtk_soc_data mt7629_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7629_CLKS_BITMAP,
.required_pctl = false,
+ .has_accounting = true,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4861,6 +4908,27 @@ static const struct mtk_soc_data mt7629_data = {
},
};
+static const struct mtk_soc_data mt7981_data = {
+ .reg_map = &mt7986_reg_map,
+ .ana_rgc3 = 0x128,
+ .caps = MT7981_CAPS,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7981_CLKS_BITMAP,
+ .required_pctl = false,
+ .offload_version = 2,
+ .hash_offset = 4,
+ .foe_entry_size = sizeof(struct mtk_foe_entry),
+ .has_accounting = true,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma_v2),
+ .rxd_size = sizeof(struct mtk_rx_dma_v2),
+ .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+};
+
static const struct mtk_soc_data mt7986_data = {
.reg_map = &mt7986_reg_map,
.ana_rgc3 = 0x128,
@@ -4871,6 +4939,7 @@ static const struct mtk_soc_data mt7986_data = {
.offload_version = 2,
.hash_offset = 4,
.foe_entry_size = sizeof(struct mtk_foe_entry),
+ .has_accounting = true,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
@@ -4903,6 +4972,7 @@ const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
+ { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
{},
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 084a6badef6d..23c7abeb5c14 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -363,6 +363,13 @@
#define RX_DMA_VTAG_V2 BIT(0)
#define RX_DMA_L4_VALID_V2 BIT(2)
+/* PHY Polling and SMI Master Control registers */
+#define MTK_PPSC 0x10000
+#define PPSC_MDC_CFG GENMASK(29, 24)
+#define PPSC_MDC_TURBO BIT(20)
+#define MDC_MAX_FREQ 25000000
+#define MDC_MAX_DIVIDER 63
+
/* PHY Indirect Access Control registers */
#define MTK_PHY_IAC 0x10004
#define PHY_IAC_ACCESS BIT(31)
@@ -503,64 +510,16 @@
#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
-/* SGMII subsystem config registers */
-/* BMCR (low 16) BMSR (high 16) */
-#define SGMSYS_PCS_CONTROL_1 0x0
-#define SGMII_BMCR GENMASK(15, 0)
-#define SGMII_BMSR GENMASK(31, 16)
-#define SGMII_AN_RESTART BIT(9)
-#define SGMII_ISOLATE BIT(10)
-#define SGMII_AN_ENABLE BIT(12)
-#define SGMII_LINK_STATYS BIT(18)
-#define SGMII_AN_ABILITY BIT(19)
-#define SGMII_AN_COMPLETE BIT(21)
-#define SGMII_PCS_FAULT BIT(23)
-#define SGMII_AN_EXPANSION_CLR BIT(30)
-
-#define SGMSYS_PCS_ADVERTISE 0x8
-#define SGMII_ADVERTISE GENMASK(15, 0)
-#define SGMII_LPA GENMASK(31, 16)
-
-/* Register to programmable link timer, the unit in 2 * 8ns */
-#define SGMSYS_PCS_LINK_TIMER 0x18
-#define SGMII_LINK_TIMER_MASK GENMASK(19, 0)
-#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & SGMII_LINK_TIMER_MASK)
-
-/* Register to control remote fault */
-#define SGMSYS_SGMII_MODE 0x20
-#define SGMII_IF_MODE_SGMII BIT(0)
-#define SGMII_SPEED_DUPLEX_AN BIT(1)
-#define SGMII_SPEED_MASK GENMASK(3, 2)
-#define SGMII_SPEED_10 FIELD_PREP(SGMII_SPEED_MASK, 0)
-#define SGMII_SPEED_100 FIELD_PREP(SGMII_SPEED_MASK, 1)
-#define SGMII_SPEED_1000 FIELD_PREP(SGMII_SPEED_MASK, 2)
-#define SGMII_DUPLEX_HALF BIT(4)
-#define SGMII_IF_MODE_BIT5 BIT(5)
-#define SGMII_REMOTE_FAULT_DIS BIT(8)
-#define SGMII_CODE_SYNC_SET_VAL BIT(9)
-#define SGMII_CODE_SYNC_SET_EN BIT(10)
-#define SGMII_SEND_AN_ERROR_EN BIT(11)
-#define SGMII_IF_MODE_MASK GENMASK(5, 1)
-
-/* Register to reset SGMII design */
-#define SGMII_RESERVED_0 0x34
-#define SGMII_SW_RESET BIT(0)
-
-/* Register to set SGMII speed, ANA RG_ Control Signals III*/
-#define SGMSYS_ANA_RG_CS3 0x2028
-#define RG_PHY_SPEED_MASK (BIT(2) | BIT(3))
-#define RG_PHY_SPEED_1_25G 0x0
-#define RG_PHY_SPEED_3_125G BIT(2)
-
-/* Register to power up QPHY */
-#define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8
-#define SGMII_PHYA_PWD BIT(4)
-
/* Infrasys subsystem config registers */
#define INFRA_MISC2 0x70c
#define CO_QPHY_SEL BIT(0)
#define GEPHY_MAC_SEL BIT(1)
+/* Top misc registers */
+#define USB_PHY_SWITCH_REG 0x218
+#define QPHY_SEL_MASK GENMASK(1, 0)
+#define SGMII_QPHY_SEL 0x2
+
/* MT7628/88 specific stuff */
#define MT7628_PDMA_OFFSET 0x0800
#define MT7628_SDM_OFFSET 0x0c00
@@ -741,6 +700,17 @@ enum mtk_clks_map {
BIT(MTK_CLK_SGMII2_CDR_FB) | \
BIT(MTK_CLK_SGMII_CK) | \
BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
+#define MT7981_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
+ BIT(MTK_CLK_WOCPU0) | \
+ BIT(MTK_CLK_SGMII_TX_250M) | \
+ BIT(MTK_CLK_SGMII_RX_250M) | \
+ BIT(MTK_CLK_SGMII_CDR_REF) | \
+ BIT(MTK_CLK_SGMII_CDR_FB) | \
+ BIT(MTK_CLK_SGMII2_TX_250M) | \
+ BIT(MTK_CLK_SGMII2_RX_250M) | \
+ BIT(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT(MTK_CLK_SGMII2_CDR_FB) | \
+ BIT(MTK_CLK_SGMII_CK))
#define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \
BIT(MTK_CLK_SGMII_TX_250M) | \
@@ -854,6 +824,7 @@ enum mkt_eth_capabilities {
MTK_NETSYS_V2_BIT,
MTK_SOC_MT7628_BIT,
MTK_RSTCTRL_PPE1_BIT,
+ MTK_U3_COPHY_V2_BIT,
/* MUX BITS*/
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
@@ -888,6 +859,7 @@ enum mkt_eth_capabilities {
#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT)
#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT)
+#define MTK_U3_COPHY_V2 BIT(MTK_U3_COPHY_V2_BIT)
#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
@@ -960,6 +932,11 @@ enum mkt_eth_capabilities {
MTK_MUX_U3_GMAC2_TO_QPHY | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
+#define MT7981_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
+ MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \
+ MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
+
#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
@@ -1034,6 +1011,8 @@ struct mtk_reg_map {
* the extra setup for those pins used by GMAC.
* @hash_offset Flow table hash offset.
* @foe_entry_size Foe table entry size.
+ * @has_accounting Bool indicating support for accounting of
+ * offloaded flows.
* @txd_size Tx DMA descriptor size.
* @rxd_size Rx DMA descriptor size.
* @rx_irq_done_mask Rx irq done register mask.
@@ -1051,6 +1030,7 @@ struct mtk_soc_data {
u8 hash_offset;
u16 foe_entry_size;
netdev_features_t hw_features;
+ bool has_accounting;
struct {
u32 txd_size;
u32 rxd_size;
@@ -1066,29 +1046,6 @@ struct mtk_soc_data {
/* currently no SoC has more than 2 macs */
#define MTK_MAX_DEVS 2
-/* struct mtk_pcs - This structure holds each sgmii regmap and associated
- * data
- * @regmap: The register map pointing at the range used to setup
- * SGMII modes
- * @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
- * @interface: Currently configured interface mode
- * @pcs: Phylink PCS structure
- */
-struct mtk_pcs {
- struct regmap *regmap;
- u32 ana_rgc3;
- phy_interface_t interface;
- struct phylink_pcs pcs;
-};
-
-/* struct mtk_sgmii - This is the structure holding sgmii regmap and its
- * characteristics
- * @pcs Array of individual PCS structures
- */
-struct mtk_sgmii {
- struct mtk_pcs pcs[MTK_MAX_DEVS];
-};
-
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
* @dev: The device pointer
@@ -1108,6 +1065,7 @@ struct mtk_sgmii {
* MII modes
* @infra: The register map pointing at the range used to setup
* SGMII and GePHY path
+ * @sgmii_pcs: Pointers to mtk-pcs-lynxi phylink_pcs instances
* @pctl: The register map pointing at the range used to setup
* GMAC port drive/slew values
* @dma_refcnt: track how many netdevs are using the DMA engine
@@ -1148,8 +1106,8 @@ struct mtk_eth {
u32 msg_enable;
unsigned long sysclk;
struct regmap *ethsys;
- struct regmap *infra;
- struct mtk_sgmii *sgmii;
+ struct regmap *infra;
+ struct phylink_pcs *sgmii_pcs[MTK_MAX_DEVS];
struct regmap *pctl;
bool hwlro;
refcount_t dma_refcnt;
@@ -1311,10 +1269,6 @@ void mtk_stats_update_mac(struct mtk_mac *mac);
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
-struct phylink_pcs *mtk_sgmii_select_pcs(struct mtk_sgmii *ss, int id);
-int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np,
- u32 ana_rgc3);
-
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 6883eb34cd8b..c099e8736716 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -74,6 +74,48 @@ static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
return ret;
}
+static int mtk_ppe_mib_wait_busy(struct mtk_ppe *ppe)
+{
+ int ret;
+ u32 val;
+
+ ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
+ !(val & MTK_PPE_MIB_SER_CR_ST),
+ 20, MTK_PPE_WAIT_TIMEOUT_US);
+
+ if (ret)
+ dev_err(ppe->dev, "MIB table busy");
+
+ return ret;
+}
+
+static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
+{
+ u32 byte_cnt_low, byte_cnt_high, pkt_cnt_low, pkt_cnt_high;
+ u32 val, cnt_r0, cnt_r1, cnt_r2;
+ int ret;
+
+ val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
+ ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
+
+ ret = mtk_ppe_mib_wait_busy(ppe);
+ if (ret)
+ return ret;
+
+ cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
+ cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
+ cnt_r2 = readl(ppe->base + MTK_PPE_MIB_SER_R2);
+
+ byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
+ byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
+ pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
+ pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
+ *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
+ *packets = (pkt_cnt_high << 16) | pkt_cnt_low;
+
+ return 0;
+}
+
static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
{
ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
@@ -458,6 +500,13 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
hwe->ib1 &= ~MTK_FOE_IB1_STATE;
hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
dma_wmb();
+ if (ppe->accounting) {
+ struct mtk_foe_accounting *acct;
+
+ acct = ppe->acct_table + entry->hash * sizeof(*acct);
+ acct->packets = 0;
+ acct->bytes = 0;
+ }
}
entry->hash = 0xffff;
@@ -565,6 +614,9 @@ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
wmb();
hwe->ib1 = entry->ib1;
+ if (ppe->accounting)
+ *mtk_foe_entry_ib2(eth, hwe) |= MTK_FOE_IB2_MIB_CNT;
+
dma_wmb();
mtk_ppe_cache_clear(ppe);
@@ -756,11 +808,39 @@ int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
return mtk_ppe_wait_busy(ppe);
}
-struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
- int version, int index)
+struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
+ struct mtk_foe_accounting *diff)
+{
+ struct mtk_foe_accounting *acct;
+ int size = sizeof(struct mtk_foe_accounting);
+ u64 bytes, packets;
+
+ if (!ppe->accounting)
+ return NULL;
+
+ if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
+ return NULL;
+
+ acct = ppe->acct_table + index * size;
+
+ acct->bytes += bytes;
+ acct->packets += packets;
+
+ if (diff) {
+ diff->bytes = bytes;
+ diff->packets = packets;
+ }
+
+ return acct;
+}
+
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index)
{
+ bool accounting = eth->soc->has_accounting;
const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_foe_accounting *acct;
struct device *dev = eth->dev;
+ struct mtk_mib_entry *mib;
struct mtk_ppe *ppe;
u32 foe_flow_size;
void *foe;
@@ -777,7 +857,8 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
ppe->base = base;
ppe->eth = eth;
ppe->dev = dev;
- ppe->version = version;
+ ppe->version = eth->soc->offload_version;
+ ppe->accounting = accounting;
foe = dmam_alloc_coherent(ppe->dev,
MTK_PPE_ENTRIES * soc->foe_entry_size,
@@ -793,6 +874,23 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
if (!ppe->foe_flow)
goto err_free_l2_flows;
+ if (accounting) {
+ mib = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*mib),
+ &ppe->mib_phys, GFP_KERNEL);
+ if (!mib)
+ return NULL;
+
+ ppe->mib_table = mib;
+
+ acct = devm_kzalloc(dev, MTK_PPE_ENTRIES * sizeof(*acct),
+ GFP_KERNEL);
+
+ if (!acct)
+ return NULL;
+
+ ppe->acct_table = acct;
+ }
+
mtk_ppe_debugfs_init(ppe, index);
return ppe;
@@ -922,6 +1020,16 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
}
+
+ if (ppe->accounting && ppe->mib_phys) {
+ ppe_w32(ppe, MTK_PPE_MIB_TB_BASE, ppe->mib_phys);
+ ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_EN,
+ MTK_PPE_MIB_CFG_EN);
+ ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_RD_CLR,
+ MTK_PPE_MIB_CFG_RD_CLR);
+ ppe_m32(ppe, MTK_PPE_MIB_CACHE_CTL, MTK_PPE_MIB_CACHE_CTL_EN,
+ MTK_PPE_MIB_CFG_RD_CLR);
+ }
}
int mtk_ppe_stop(struct mtk_ppe *ppe)
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index 5e8bc48252b1..e1aab2e8e262 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -57,6 +57,7 @@ enum {
#define MTK_FOE_IB2_MULTICAST BIT(8)
#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12)
+#define MTK_FOE_IB2_MIB_CNT BIT(15)
#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16)
#define MTK_FOE_IB2_WDMA_WINFO BIT(17)
@@ -285,16 +286,34 @@ struct mtk_flow_entry {
unsigned long cookie;
};
+struct mtk_mib_entry {
+ u32 byt_cnt_l;
+ u16 byt_cnt_h;
+ u32 pkt_cnt_l;
+ u8 pkt_cnt_h;
+ u8 _rsv0;
+ u32 _rsv1;
+} __packed;
+
+struct mtk_foe_accounting {
+ u64 bytes;
+ u64 packets;
+};
+
struct mtk_ppe {
struct mtk_eth *eth;
struct device *dev;
void __iomem *base;
int version;
char dirname[5];
+ bool accounting;
void *foe_table;
dma_addr_t foe_phys;
+ struct mtk_mib_entry *mib_table;
+ dma_addr_t mib_phys;
+
u16 foe_check_time[MTK_PPE_ENTRIES];
struct hlist_head *foe_flow;
@@ -303,8 +322,8 @@ struct mtk_ppe {
void *acct_table;
};
-struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
- int version, int index);
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index);
+
void mtk_ppe_deinit(struct mtk_eth *eth);
void mtk_ppe_start(struct mtk_ppe *ppe);
int mtk_ppe_stop(struct mtk_ppe *ppe);
@@ -359,5 +378,7 @@ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
+struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
+ struct mtk_foe_accounting *diff);
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
index 391b071bcff3..53cf87e9acbb 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
@@ -82,6 +82,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
struct mtk_foe_entry *entry = mtk_foe_get_entry(ppe, i);
struct mtk_foe_mac_info *l2;
struct mtk_flow_addr_info ai = {};
+ struct mtk_foe_accounting *acct;
unsigned char h_source[ETH_ALEN];
unsigned char h_dest[ETH_ALEN];
int type, state;
@@ -95,6 +96,8 @@ mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
if (bind && state != MTK_FOE_STATE_BIND)
continue;
+ acct = mtk_foe_entry_get_mib(ppe, i, NULL);
+
type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
seq_printf(m, "%05x %s %7s", i,
mtk_foe_entry_state_str(state),
@@ -153,9 +156,11 @@ mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
*((__be16 *)&h_dest[4]) = htons(l2->dest_mac_lo);
seq_printf(m, " eth=%pM->%pM etype=%04x"
- " vlan=%d,%d ib1=%08x ib2=%08x\n",
+ " vlan=%d,%d ib1=%08x ib2=%08x"
+ " packets=%llu bytes=%llu\n",
h_source, h_dest, ntohs(l2->etype),
- l2->vlan1, l2->vlan2, entry->ib1, ib2);
+ l2->vlan1, l2->vlan2, entry->ib1, ib2,
+ acct ? acct->packets : 0, acct ? acct->bytes : 0);
}
return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 81afd5ee3fbf..f02ccffb1e79 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -497,6 +497,7 @@ static int
mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
{
struct mtk_flow_entry *entry;
+ struct mtk_foe_accounting diff;
u32 idle;
entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
@@ -507,6 +508,13 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
f->stats.lastused = jiffies - idle * HZ;
+ if (entry->hash != 0xFFFF &&
+ mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash,
+ &diff)) {
+ f->stats.pkts += diff.packets;
+ f->stats.bytes += diff.bytes;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
index 0fdb983b0a88..a2e61b3eb006 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
@@ -149,6 +149,20 @@ enum {
#define MTK_PPE_MIB_TB_BASE 0x338
+#define MTK_PPE_MIB_SER_CR 0x33C
+#define MTK_PPE_MIB_SER_CR_ST BIT(16)
+#define MTK_PPE_MIB_SER_CR_ADDR GENMASK(13, 0)
+
+#define MTK_PPE_MIB_SER_R0 0x340
+#define MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW GENMASK(31, 0)
+
+#define MTK_PPE_MIB_SER_R1 0x344
+#define MTK_PPE_MIB_SER_R1_PKT_CNT_LOW GENMASK(31, 16)
+#define MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH GENMASK(15, 0)
+
+#define MTK_PPE_MIB_SER_R2 0x348
+#define MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH GENMASK(23, 0)
+
#define MTK_PPE_MIB_CACHE_CTL 0x350
#define MTK_PPE_MIB_CACHE_CTL_EN BIT(0)
#define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2)
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
deleted file mode 100644
index 83976dc86887..000000000000
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
+++ /dev/null
@@ -1,207 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2018-2019 MediaTek Inc.
-
-/* A library for MediaTek SGMII circuit
- *
- * Author: Sean Wang <sean.wang@mediatek.com>
- *
- */
-
-#include <linux/mfd/syscon.h>
-#include <linux/of.h>
-#include <linux/phylink.h>
-#include <linux/regmap.h>
-
-#include "mtk_eth_soc.h"
-
-static struct mtk_pcs *pcs_to_mtk_pcs(struct phylink_pcs *pcs)
-{
- return container_of(pcs, struct mtk_pcs, pcs);
-}
-
-static void mtk_pcs_get_state(struct phylink_pcs *pcs,
- struct phylink_link_state *state)
-{
- struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
- unsigned int bm, adv;
-
- /* Read the BMSR and LPA */
- regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &bm);
- regmap_read(mpcs->regmap, SGMSYS_PCS_ADVERTISE, &adv);
-
- phylink_mii_c22_pcs_decode_state(state, FIELD_GET(SGMII_BMSR, bm),
- FIELD_GET(SGMII_LPA, adv));
-}
-
-static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertising,
- bool permit_pause_to_mac)
-{
- bool mode_changed = false, changed, use_an;
- struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
- unsigned int rgc3, sgm_mode, bmcr;
- int advertise, link_timer;
-
- advertise = phylink_mii_c22_pcs_encode_advertisement(interface,
- advertising);
- if (advertise < 0)
- return advertise;
-
- /* Clearing IF_MODE_BIT0 switches the PCS to BASE-X mode, and
- * we assume that fixes it's speed at bitrate = line rate (in
- * other words, 1000Mbps or 2500Mbps).
- */
- if (interface == PHY_INTERFACE_MODE_SGMII) {
- sgm_mode = SGMII_IF_MODE_SGMII;
- if (phylink_autoneg_inband(mode)) {
- sgm_mode |= SGMII_REMOTE_FAULT_DIS |
- SGMII_SPEED_DUPLEX_AN;
- use_an = true;
- } else {
- use_an = false;
- }
- } else if (phylink_autoneg_inband(mode)) {
- /* 1000base-X or 2500base-X autoneg */
- sgm_mode = SGMII_REMOTE_FAULT_DIS;
- use_an = linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- advertising);
- } else {
- /* 1000base-X or 2500base-X without autoneg */
- sgm_mode = 0;
- use_an = false;
- }
-
- if (use_an) {
- bmcr = SGMII_AN_ENABLE;
- } else {
- bmcr = 0;
- }
-
- if (mpcs->interface != interface) {
- link_timer = phylink_get_link_timer_ns(interface);
- if (link_timer < 0)
- return link_timer;
-
- /* PHYA power down */
- regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
- SGMII_PHYA_PWD, SGMII_PHYA_PWD);
-
- /* Reset SGMII PCS state */
- regmap_update_bits(mpcs->regmap, SGMII_RESERVED_0,
- SGMII_SW_RESET, SGMII_SW_RESET);
-
- if (interface == PHY_INTERFACE_MODE_2500BASEX)
- rgc3 = RG_PHY_SPEED_3_125G;
- else
- rgc3 = 0;
-
- /* Configure the underlying interface speed */
- regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
- RG_PHY_SPEED_3_125G, rgc3);
-
- /* Setup the link timer */
- regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, link_timer / 2 / 8);
-
- mpcs->interface = interface;
- mode_changed = true;
- }
-
- /* Update the advertisement, noting whether it has changed */
- regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE,
- SGMII_ADVERTISE, advertise, &changed);
-
- /* Update the sgmsys mode register */
- regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
- SGMII_REMOTE_FAULT_DIS | SGMII_SPEED_DUPLEX_AN |
- SGMII_IF_MODE_SGMII, sgm_mode);
-
- /* Update the BMCR */
- regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
- SGMII_AN_ENABLE, bmcr);
-
- /* Release PHYA power down state
- * Only removing bit SGMII_PHYA_PWD isn't enough.
- * There are cases when the SGMII_PHYA_PWD register contains 0x9 which
- * prevents SGMII from working. The SGMII still shows link but no traffic
- * can flow. Writing 0x0 to the PHYA_PWD register fix the issue. 0x0 was
- * taken from a good working state of the SGMII interface.
- * Unknown how much the QPHY needs but it is racy without a sleep.
- * Tested on mt7622 & mt7986.
- */
- usleep_range(50, 100);
- regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, 0);
-
- return changed || mode_changed;
-}
-
-static void mtk_pcs_restart_an(struct phylink_pcs *pcs)
-{
- struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
-
- regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
- SGMII_AN_RESTART, SGMII_AN_RESTART);
-}
-
-static void mtk_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
- phy_interface_t interface, int speed, int duplex)
-{
- struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
- unsigned int sgm_mode;
-
- if (!phylink_autoneg_inband(mode)) {
- /* Force the speed and duplex setting */
- if (speed == SPEED_10)
- sgm_mode = SGMII_SPEED_10;
- else if (speed == SPEED_100)
- sgm_mode = SGMII_SPEED_100;
- else
- sgm_mode = SGMII_SPEED_1000;
-
- if (duplex != DUPLEX_FULL)
- sgm_mode |= SGMII_DUPLEX_HALF;
-
- regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
- SGMII_DUPLEX_HALF | SGMII_SPEED_MASK,
- sgm_mode);
- }
-}
-
-static const struct phylink_pcs_ops mtk_pcs_ops = {
- .pcs_get_state = mtk_pcs_get_state,
- .pcs_config = mtk_pcs_config,
- .pcs_an_restart = mtk_pcs_restart_an,
- .pcs_link_up = mtk_pcs_link_up,
-};
-
-int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
-{
- struct device_node *np;
- int i;
-
- for (i = 0; i < MTK_MAX_DEVS; i++) {
- np = of_parse_phandle(r, "mediatek,sgmiisys", i);
- if (!np)
- break;
-
- ss->pcs[i].ana_rgc3 = ana_rgc3;
- ss->pcs[i].regmap = syscon_node_to_regmap(np);
- of_node_put(np);
- if (IS_ERR(ss->pcs[i].regmap))
- return PTR_ERR(ss->pcs[i].regmap);
-
- ss->pcs[i].pcs.ops = &mtk_pcs_ops;
- ss->pcs[i].pcs.poll = true;
- ss->pcs[i].interface = PHY_INTERFACE_MODE_NA;
- }
-
- return 0;
-}
-
-struct phylink_pcs *mtk_sgmii_select_pcs(struct mtk_sgmii *ss, int id)
-{
- if (!ss->pcs[id].regmap)
- return NULL;
-
- return &ss->pcs[id].pcs;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 544e09b97483..034733b13b1a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -323,7 +323,7 @@ struct mlx4_en_tx_ring {
struct mlx4_en_rx_desc {
/* actual number of entries depends on rx ring stride */
- struct mlx4_wqe_data_seg data[0];
+ DECLARE_FLEX_ARRAY(struct mlx4_wqe_data_seg, data);
};
struct mlx4_en_rx_ring {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 8d4e25cc54ea..6c2f1d4a58ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -77,6 +77,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o en/rep/bridge.o
+mlx5_core-$(CONFIG_THERMAL) += thermal.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index c5d2fdcabd56..25d1a04ef443 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -494,6 +494,61 @@ static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
return (val.vu32 >= 64 && val.vu32 <= 4096) ? 0 : -EINVAL;
}
+static int
+mlx5_devlink_hairpin_num_queues_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ return val.vu32 ? 0 : -EINVAL;
+}
+
+static int
+mlx5_devlink_hairpin_queue_size_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 val32 = val.vu32;
+
+ if (!is_power_of_2(val32)) {
+ NL_SET_ERR_MSG_MOD(extack, "Value is not power of two");
+ return -EINVAL;
+ }
+
+ if (val32 > BIT(MLX5_CAP_GEN(dev, log_max_hairpin_num_packets))) {
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack, "Maximum hairpin queue size is %lu",
+ BIT(MLX5_CAP_GEN(dev, log_max_hairpin_num_packets)));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mlx5_devlink_hairpin_params_init_values(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ union devlink_param_value value;
+ u32 link_speed = 0;
+ u64 link_speed64;
+
+ /* set hairpin pair per each 50Gbs share of the link */
+ mlx5_port_max_linkspeed(dev, &link_speed);
+ link_speed = max_t(u32, link_speed, 50000);
+ link_speed64 = link_speed;
+ do_div(link_speed64, 50000);
+
+ value.vu32 = link_speed64;
+ devl_param_driverinit_value_set(
+ devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES, value);
+
+ value.vu32 =
+ BIT(min_t(u32, 16 - MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(dev),
+ MLX5_CAP_GEN(dev, log_max_hairpin_num_packets)));
+ devl_param_driverinit_value_set(
+ devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE, value);
+}
+
static const struct devlink_param mlx5_devlink_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, mlx5_devlink_enable_roce_validate),
@@ -547,6 +602,14 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
static const struct devlink_param mlx5_devlink_eth_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_ETH, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, NULL),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES,
+ "hairpin_num_queues", DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_hairpin_num_queues_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE,
+ "hairpin_queue_size", DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_hairpin_queue_size_validate),
};
static int mlx5_devlink_eth_params_register(struct devlink *devlink)
@@ -567,6 +630,9 @@ static int mlx5_devlink_eth_params_register(struct devlink *devlink)
devl_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
value);
+
+ mlx5_devlink_hairpin_params_init_values(devlink);
+
return 0;
}
@@ -805,6 +871,11 @@ int mlx5_devlink_params_register(struct devlink *devlink)
{
int err;
+ /* Here only the driver init params should be registered.
+ * Runtime params should be registered by the code which
+ * behaviour they configure.
+ */
+
err = devl_params_register(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index 212b12424146..5dcfb4d86d8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -12,6 +12,8 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
MLX5_DEVLINK_PARAM_ID_ESW_MULTIPORT,
+ MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES,
+ MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE,
};
struct mlx5_trap_ctx {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 4a19ef4a9811..02237e630d13 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -335,15 +335,20 @@ static inline u8 mlx5e_get_dcb_num_tc(struct mlx5e_params *params)
params->mqprio.num_tc : 1;
}
+/* Keep this enum consistent with the corresponding strings array
+ * declared in en/reporter_rx.c
+ */
enum {
- MLX5E_RQ_STATE_ENABLED,
+ MLX5E_RQ_STATE_ENABLED = 0,
MLX5E_RQ_STATE_RECOVERING,
- MLX5E_RQ_STATE_AM,
+ MLX5E_RQ_STATE_DIM,
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */
MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */
MLX5E_RQ_STATE_MINI_CQE_ENHANCED, /* set when enhanced mini_cqe_cap is used */
+ MLX5E_RQ_STATE_XSK, /* set to indicate an xsk rq */
+ MLX5E_NUM_RQ_STATES, /* Must be kept last */
};
struct mlx5e_cq {
@@ -384,16 +389,20 @@ struct mlx5e_sq_dma {
enum mlx5e_dma_map_type type;
};
+/* Keep this enum consistent with with the corresponding strings array
+ * declared in en/reporter_tx.c
+ */
enum {
- MLX5E_SQ_STATE_ENABLED,
+ MLX5E_SQ_STATE_ENABLED = 0,
MLX5E_SQ_STATE_MPWQE,
MLX5E_SQ_STATE_RECOVERING,
MLX5E_SQ_STATE_IPSEC,
- MLX5E_SQ_STATE_AM,
+ MLX5E_SQ_STATE_DIM,
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
MLX5E_SQ_STATE_XDP_MULTIBUF,
+ MLX5E_NUM_SQ_STATES, /* Must be kept last */
};
struct mlx5e_tx_mpwqe {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index a21bd1179477..561da78d3b5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -553,7 +553,7 @@ bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
u32 link_speed = 0;
u32 pci_bw = 0;
- mlx5e_port_max_linkspeed(mdev, &link_speed);
+ mlx5_port_max_linkspeed(mdev, &link_speed);
pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
link_speed, pci_bw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 505ba41195b9..dbe2b19a9570 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -32,101 +32,6 @@
#include "port.h"
-/* speed in units of 1Mb */
-static const u32 mlx5e_link_speed[MLX5E_LINK_MODES_NUMBER] = {
- [MLX5E_1000BASE_CX_SGMII] = 1000,
- [MLX5E_1000BASE_KX] = 1000,
- [MLX5E_10GBASE_CX4] = 10000,
- [MLX5E_10GBASE_KX4] = 10000,
- [MLX5E_10GBASE_KR] = 10000,
- [MLX5E_20GBASE_KR2] = 20000,
- [MLX5E_40GBASE_CR4] = 40000,
- [MLX5E_40GBASE_KR4] = 40000,
- [MLX5E_56GBASE_R4] = 56000,
- [MLX5E_10GBASE_CR] = 10000,
- [MLX5E_10GBASE_SR] = 10000,
- [MLX5E_10GBASE_ER] = 10000,
- [MLX5E_40GBASE_SR4] = 40000,
- [MLX5E_40GBASE_LR4] = 40000,
- [MLX5E_50GBASE_SR2] = 50000,
- [MLX5E_100GBASE_CR4] = 100000,
- [MLX5E_100GBASE_SR4] = 100000,
- [MLX5E_100GBASE_KR4] = 100000,
- [MLX5E_100GBASE_LR4] = 100000,
- [MLX5E_100BASE_TX] = 100,
- [MLX5E_1000BASE_T] = 1000,
- [MLX5E_10GBASE_T] = 10000,
- [MLX5E_25GBASE_CR] = 25000,
- [MLX5E_25GBASE_KR] = 25000,
- [MLX5E_25GBASE_SR] = 25000,
- [MLX5E_50GBASE_CR2] = 50000,
- [MLX5E_50GBASE_KR2] = 50000,
-};
-
-static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
- [MLX5E_SGMII_100M] = 100,
- [MLX5E_1000BASE_X_SGMII] = 1000,
- [MLX5E_5GBASE_R] = 5000,
- [MLX5E_10GBASE_XFI_XAUI_1] = 10000,
- [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = 40000,
- [MLX5E_25GAUI_1_25GBASE_CR_KR] = 25000,
- [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000,
- [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000,
- [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
- [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000,
- [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
- [MLX5E_400GAUI_8] = 400000,
- [MLX5E_100GAUI_1_100GBASE_CR_KR] = 100000,
- [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = 200000,
- [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = 400000,
-};
-
-bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev)
-{
- struct mlx5e_port_eth_proto eproto;
- int err;
-
- if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet))
- return true;
-
- err = mlx5_port_query_eth_proto(mdev, 1, true, &eproto);
- if (err)
- return false;
-
- return !!eproto.cap;
-}
-
-static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
- const u32 **arr, u32 *size,
- bool force_legacy)
-{
- bool ext = force_legacy ? false : mlx5e_ptys_ext_supported(mdev);
-
- *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
- ARRAY_SIZE(mlx5e_link_speed);
- *arr = ext ? mlx5e_ext_link_speed : mlx5e_link_speed;
-}
-
-int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
- struct mlx5e_port_eth_proto *eproto)
-{
- u32 out[MLX5_ST_SZ_DW(ptys_reg)];
- int err;
-
- if (!eproto)
- return -EINVAL;
-
- err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
- if (err)
- return err;
-
- eproto->cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
- eth_proto_capability);
- eproto->admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_admin);
- eproto->oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
- return 0;
-}
-
void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status,
u8 *an_disable_cap, u8 *an_disable_admin)
{
@@ -172,30 +77,14 @@ int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
sizeof(out), MLX5_REG_PTYS, 0, 1);
}
-u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
- bool force_legacy)
-{
- unsigned long temp = eth_proto_oper;
- const u32 *table;
- u32 speed = 0;
- u32 max_size;
- int i;
-
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
- i = find_first_bit(&temp, max_size);
- if (i < max_size)
- speed = table[i];
- return speed;
-}
-
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
- struct mlx5e_port_eth_proto eproto;
+ struct mlx5_port_eth_proto eproto;
bool force_legacy = false;
bool ext;
int err;
- ext = mlx5e_ptys_ext_supported(mdev);
+ ext = mlx5_ptys_ext_supported(mdev);
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err)
goto out;
@@ -205,7 +94,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
if (err)
goto out;
}
- *speed = mlx5e_port_ptys2speed(mdev, eproto.oper, force_legacy);
+ *speed = mlx5_port_ptys2speed(mdev, eproto.oper, force_legacy);
if (!(*speed))
err = -EINVAL;
@@ -213,46 +102,6 @@ out:
return err;
}
-int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
-{
- struct mlx5e_port_eth_proto eproto;
- u32 max_speed = 0;
- const u32 *table;
- u32 max_size;
- bool ext;
- int err;
- int i;
-
- ext = mlx5e_ptys_ext_supported(mdev);
- err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
- if (err)
- return err;
-
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, false);
- for (i = 0; i < max_size; ++i)
- if (eproto.cap & MLX5E_PROT_MASK(i))
- max_speed = max(max_speed, table[i]);
-
- *speed = max_speed;
- return 0;
-}
-
-u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
- bool force_legacy)
-{
- u32 link_modes = 0;
- const u32 *table;
- u32 max_size;
- int i;
-
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
- for (i = 0; i < max_size; ++i) {
- if (table[i] == speed)
- link_modes |= MLX5E_PROT_MASK(i);
- }
- return link_modes;
-}
-
int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out)
{
int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index 3f474e370828..d1da225f35da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -36,25 +36,11 @@
#include <linux/mlx5/driver.h>
#include "en.h"
-struct mlx5e_port_eth_proto {
- u32 cap;
- u32 admin;
- u32 oper;
-};
-
-int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
- struct mlx5e_port_eth_proto *eproto);
void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status,
u8 *an_disable_cap, u8 *an_disable_admin);
int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
u32 proto_admin, bool ext);
-u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
- bool force_legacy);
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
-int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
-u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
- bool force_legacy);
-bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev);
int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
int mlx5e_port_query_sbpr(struct mlx5_core_dev *mdev, u32 desc, u8 dir,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 8f7452dc00ee..19c4a83982ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -426,39 +426,58 @@ static bool mlx5e_rep_macvlan_mode_supported(const struct net_device *dev)
return macvlan->mode == MACVLAN_MODE_PASSTHRU;
}
-static int
-mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
- struct mlx5e_rep_priv *rpriv,
- struct flow_block_offload *f,
- flow_setup_cb_t *setup_cb,
- void *data,
- void (*cleanup)(struct flow_block_cb *block_cb))
+static bool
+mlx5e_rep_check_indr_block_supported(struct mlx5e_rep_priv *rpriv,
+ struct net_device *netdev,
+ struct flow_block_offload *f)
{
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- bool is_ovs_int_port = netif_is_ovs_master(netdev);
- struct mlx5e_rep_indr_block_priv *indr_priv;
- struct flow_block_cb *block_cb;
+ struct net_device *macvlan_real_dev;
- if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
- !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev) &&
- !is_ovs_int_port) {
- if (!(netif_is_macvlan(netdev) && macvlan_dev_real_dev(netdev) == rpriv->netdev))
- return -EOPNOTSUPP;
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+ f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ return false;
+
+ if (mlx5e_tc_tun_device_to_offload(priv, netdev))
+ return true;
+
+ if (is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev)
+ return true;
+
+ if (netif_is_macvlan(netdev)) {
if (!mlx5e_rep_macvlan_mode_supported(netdev)) {
netdev_warn(netdev, "Offloading ingress filter is supported only with macvlan passthru mode");
- return -EOPNOTSUPP;
+ return false;
}
+
+ macvlan_real_dev = macvlan_dev_real_dev(netdev);
+
+ if (macvlan_real_dev == rpriv->netdev)
+ return true;
+ if (netif_is_bond_master(macvlan_real_dev))
+ return true;
}
- if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
- f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
- return -EOPNOTSUPP;
+ if (netif_is_ovs_master(netdev) && f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
+ mlx5e_tc_int_port_supported(esw))
+ return true;
- if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && !is_ovs_int_port)
- return -EOPNOTSUPP;
+ return false;
+}
+
+static int
+mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
+ struct mlx5e_rep_priv *rpriv,
+ struct flow_block_offload *f,
+ flow_setup_cb_t *setup_cb,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
+{
+ struct mlx5e_rep_indr_block_priv *indr_priv;
+ struct flow_block_cb *block_cb;
- if (is_ovs_int_port && !mlx5e_tc_int_port_supported(esw))
+ if (!mlx5e_rep_check_indr_block_supported(rpriv, netdev, f))
return -EOPNOTSUPP;
f->unlocked_driver_cb = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index c462fe76495b..b621f735cdc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -8,6 +8,19 @@
#include "ptp.h"
#include "lib/tout.h"
+/* Keep this string array consistent with the MLX5E_RQ_STATE_* enums in en.h */
+static const char * const rq_sw_state_type_name[] = {
+ [MLX5E_RQ_STATE_ENABLED] = "enabled",
+ [MLX5E_RQ_STATE_RECOVERING] = "recovering",
+ [MLX5E_RQ_STATE_DIM] = "dim",
+ [MLX5E_RQ_STATE_NO_CSUM_COMPLETE] = "no_csum_complete",
+ [MLX5E_RQ_STATE_CSUM_FULL] = "csum_full",
+ [MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX] = "mini_cqe_hw_stridx",
+ [MLX5E_RQ_STATE_SHAMPO] = "shampo",
+ [MLX5E_RQ_STATE_MINI_CQE_ENHANCED] = "mini_cqe_enhanced",
+ [MLX5E_RQ_STATE_XSK] = "xsk",
+};
+
static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state)
{
int outlen = MLX5_ST_SZ_BYTES(query_rq_out);
@@ -239,6 +252,35 @@ static int mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state,
return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
+static int mlx5e_health_rq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_rq *rq)
+{
+ int err;
+ int i;
+
+ BUILD_BUG_ON_MSG(ARRAY_SIZE(rq_sw_state_type_name) != MLX5E_NUM_RQ_STATES,
+ "rq_sw_state_type_name string array must be consistent with MLX5E_RQ_STATE_* enum in en.h");
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(rq_sw_state_type_name); ++i) {
+ err = devlink_fmsg_u32_pair_put(fmsg, rq_sw_state_type_name[i],
+ test_bit(i, &rq->state));
+ if (err)
+ return err;
+ }
+
+ err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return devlink_fmsg_obj_nest_end(fmsg);
+}
+
static int
mlx5e_rx_reporter_build_diagnose_output_rq_common(struct mlx5e_rq *rq,
struct devlink_fmsg *fmsg)
@@ -265,10 +307,6 @@ mlx5e_rx_reporter_build_diagnose_output_rq_common(struct mlx5e_rq *rq,
if (err)
return err;
- err = devlink_fmsg_u8_pair_put(fmsg, "SW state", rq->state);
- if (err)
- return err;
-
err = devlink_fmsg_u32_pair_put(fmsg, "WQE counter", wqe_counter);
if (err)
return err;
@@ -281,6 +319,10 @@ mlx5e_rx_reporter_build_diagnose_output_rq_common(struct mlx5e_rq *rq,
if (err)
return err;
+ err = mlx5e_health_rq_put_sw_state(fmsg, rq);
+ if (err)
+ return err;
+
err = mlx5e_health_cq_diag_fmsg(&rq->cq, fmsg);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 34666e2b3871..44c1926843a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -6,6 +6,19 @@
#include "en/devlink.h"
#include "lib/tout.h"
+/* Keep this string array consistent with the MLX5E_SQ_STATE_* enums in en.h */
+static const char * const sq_sw_state_type_name[] = {
+ [MLX5E_SQ_STATE_ENABLED] = "enabled",
+ [MLX5E_SQ_STATE_MPWQE] = "mpwqe",
+ [MLX5E_SQ_STATE_RECOVERING] = "recovering",
+ [MLX5E_SQ_STATE_IPSEC] = "ipsec",
+ [MLX5E_SQ_STATE_DIM] = "dim",
+ [MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
+ [MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
+ [MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
+ [MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf",
+};
+
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
{
struct mlx5_core_dev *dev = sq->mdev;
@@ -37,6 +50,35 @@ static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
sq->pc = 0;
}
+static int mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq)
+{
+ int err;
+ int i;
+
+ BUILD_BUG_ON_MSG(ARRAY_SIZE(sq_sw_state_type_name) != MLX5E_NUM_SQ_STATES,
+ "sq_sw_state_type_name string array must be consistent with MLX5E_SQ_STATE_* enum in en.h");
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(sq_sw_state_type_name); ++i) {
+ err = devlink_fmsg_u32_pair_put(fmsg, sq_sw_state_type_name[i],
+ test_bit(i, &sq->state));
+ if (err)
+ return err;
+ }
+
+ err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return devlink_fmsg_obj_nest_end(fmsg);
+}
+
static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
{
struct mlx5_core_dev *mdev;
@@ -190,6 +232,10 @@ mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
if (err)
return err;
+ err = mlx5e_health_sq_put_sw_state(fmsg, sq);
+ if (err)
+ return err;
+
err = mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index 07cc65596f89..291193f7120d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -234,6 +234,9 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
if (mlx5_lag_mpesw_do_mirred(priv->mdev, out_dev, extack))
return -EOPNOTSUPP;
+ if (netif_is_macvlan(out_dev))
+ out_dev = macvlan_dev_real_dev(out_dev);
+
out_dev = get_fdb_out_dev(uplink_dev, out_dev);
if (!out_dev)
return -ENODEV;
@@ -250,9 +253,6 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
return err;
}
- if (netif_is_macvlan(out_dev))
- out_dev = macvlan_dev_real_dev(out_dev);
-
err = verify_uplink_forwarding(priv, attr, out_dev, extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index b38f693bbb52..92065568bb19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -115,6 +115,9 @@ int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b);
+bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a,
+ struct mlx5e_encap_key *b,
+ __be16 tun_flags);
#endif /* CONFIG_MLX5_ESWITCH */
#endif //__MLX5_EN_TC_TUNNEL_H__
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 780224fd67a1..a108e73c9f66 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -3,6 +3,7 @@
#include <net/fib_notifier.h>
#include <net/nexthop.h>
+#include <net/ip_tunnels.h>
#include "tc_tun_encap.h"
#include "en_tc.h"
#include "tc_tun.h"
@@ -571,6 +572,37 @@ bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
a->tc_tunnel->tunnel_type == b->tc_tunnel->tunnel_type;
}
+bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a,
+ struct mlx5e_encap_key *b,
+ __be16 tun_flags)
+{
+ struct ip_tunnel_info *a_info;
+ struct ip_tunnel_info *b_info;
+ bool a_has_opts, b_has_opts;
+
+ if (!mlx5e_tc_tun_encap_info_equal_generic(a, b))
+ return false;
+
+ a_has_opts = !!(a->ip_tun_key->tun_flags & tun_flags);
+ b_has_opts = !!(b->ip_tun_key->tun_flags & tun_flags);
+
+ /* keys are equal when both don't have any options attached */
+ if (!a_has_opts && !b_has_opts)
+ return true;
+
+ if (a_has_opts != b_has_opts)
+ return false;
+
+ /* options stored in memory next to ip_tunnel_info struct */
+ a_info = container_of(a->ip_tun_key, struct ip_tunnel_info, key);
+ b_info = container_of(b->ip_tun_key, struct ip_tunnel_info, key);
+
+ return a_info->options_len == b_info->options_len &&
+ !memcmp(ip_tunnel_info_opts(a_info),
+ ip_tunnel_info_opts(b_info),
+ a_info->options_len);
+}
+
static int cmp_decap_info(struct mlx5e_decap_key *a,
struct mlx5e_decap_key *b)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
index 054d80c4e65c..2bcd10b6d653 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
@@ -337,29 +337,7 @@ static int mlx5e_tc_tun_parse_geneve(struct mlx5e_priv *priv,
static bool mlx5e_tc_tun_encap_info_equal_geneve(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b)
{
- struct ip_tunnel_info *a_info;
- struct ip_tunnel_info *b_info;
- bool a_has_opts, b_has_opts;
-
- if (!mlx5e_tc_tun_encap_info_equal_generic(a, b))
- return false;
-
- a_has_opts = !!(a->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT);
- b_has_opts = !!(b->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT);
-
- /* keys are equal when both don't have any options attached */
- if (!a_has_opts && !b_has_opts)
- return true;
-
- if (a_has_opts != b_has_opts)
- return false;
-
- /* geneve options stored in memory next to ip_tunnel_info struct */
- a_info = container_of(a->ip_tun_key, struct ip_tunnel_info, key);
- b_info = container_of(b->ip_tun_key, struct ip_tunnel_info, key);
-
- return a_info->options_len == b_info->options_len &&
- memcmp(a_info + 1, b_info + 1, a_info->options_len) == 0;
+ return mlx5e_tc_tun_encap_info_equal_options(a, b, TUNNEL_GENEVE_OPT);
}
struct mlx5e_tc_tunnel geneve_tunnel = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index 1f62c702b625..a184d739d5f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2018 Mellanox Technologies. */
+#include <net/ip_tunnels.h>
#include <net/vxlan.h>
#include "lib/vxlan.h"
#include "en/tc_tun.h"
@@ -86,9 +87,11 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
__be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
struct udphdr *udp = (struct udphdr *)(buf);
+ const struct vxlan_metadata *md;
struct vxlanhdr *vxh;
- if (tun_key->tun_flags & TUNNEL_VXLAN_OPT)
+ if ((tun_key->tun_flags & TUNNEL_VXLAN_OPT) &&
+ e->tun_info->options_len != sizeof(*md))
return -EOPNOTSUPP;
vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
*ip_proto = IPPROTO_UDP;
@@ -96,6 +99,57 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
udp->dest = tun_key->tp_dst;
vxh->vx_flags = VXLAN_HF_VNI;
vxh->vx_vni = vxlan_vni_field(tun_id);
+ if (tun_key->tun_flags & TUNNEL_VXLAN_OPT) {
+ md = ip_tunnel_info_opts(e->tun_info);
+ vxlan_build_gbp_hdr(vxh, md);
+ }
+
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_vxlan_gbp_option(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_match_enc_opts enc_opts;
+ void *misc5_c, *misc5_v;
+ u32 *gbp, *gbp_mask;
+
+ flow_rule_match_enc_opts(rule, &enc_opts);
+
+ if (memchr_inv(&enc_opts.mask->data, 0, sizeof(enc_opts.mask->data)) &&
+ !MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(priv->mdev, tunnel_header_0_1)) {
+ NL_SET_ERR_MSG_MOD(extack, "Matching on VxLAN GBP is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (enc_opts.key->dst_opt_type != TUNNEL_VXLAN_OPT) {
+ NL_SET_ERR_MSG_MOD(extack, "Wrong VxLAN option type: not GBP");
+ return -EOPNOTSUPP;
+ }
+
+ if (enc_opts.key->len != sizeof(*gbp) ||
+ enc_opts.mask->len != sizeof(*gbp_mask)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN GBP option/mask len is not 32 bits");
+ return -EINVAL;
+ }
+
+ gbp = (u32 *)&enc_opts.key->data[0];
+ gbp_mask = (u32 *)&enc_opts.mask->data[0];
+
+ if (*gbp_mask & ~VXLAN_GBP_MASK) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Wrong VxLAN GBP mask(0x%08X)\n", *gbp_mask);
+ return -EINVAL;
+ }
+
+ misc5_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_5);
+ misc5_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_5);
+ MLX5_SET(fte_match_set_misc5, misc5_c, tunnel_header_0, *gbp_mask);
+ MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_0, *gbp);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
return 0;
}
@@ -122,6 +176,14 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
if (!enc_keyid.mask->keyid)
return 0;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ int err;
+
+ err = mlx5e_tc_tun_parse_vxlan_gbp_option(priv, spec, f);
+ if (err)
+ return err;
+ }
+
/* match on VNI is required */
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
@@ -143,6 +205,12 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
return 0;
}
+static bool mlx5e_tc_tun_encap_info_equal_vxlan(struct mlx5e_encap_key *a,
+ struct mlx5e_encap_key *b)
+{
+ return mlx5e_tc_tun_encap_info_equal_options(a, b, TUNNEL_VXLAN_OPT);
+}
+
static int mlx5e_tc_tun_get_remote_ifindex(struct net_device *mirred_dev)
{
const struct vxlan_dev *vxlan = netdev_priv(mirred_dev);
@@ -160,6 +228,6 @@ struct mlx5e_tc_tunnel vxlan_tunnel = {
.generate_ip_tun_hdr = mlx5e_gen_ip_tunnel_header_vxlan,
.parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan,
.parse_tunnel = mlx5e_tc_tun_parse_vxlan,
- .encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic,
+ .encap_info_equal = mlx5e_tc_tun_encap_info_equal_vxlan,
.get_remote_ifindex = mlx5e_tc_tun_get_remote_ifindex,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index b9c2f67d3794..816ea83e6413 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -86,7 +86,7 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
static inline bool
mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo *fifo)
{
- return (u16)(*fifo->pc - *fifo->cc) < fifo->mask;
+ return (u16)(*fifo->pc - *fifo->cc) <= fifo->mask;
}
static inline bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 81a567e17264..ed279f450976 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -93,13 +93,19 @@ static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *param
struct mlx5e_rq_param *rq_params, struct xsk_buff_pool *pool,
struct mlx5e_xsk_param *xsk)
{
+ struct mlx5e_rq *xskrq = &c->xskrq;
int err;
- err = mlx5e_init_xsk_rq(c, params, pool, xsk, &c->xskrq);
+ err = mlx5e_init_xsk_rq(c, params, pool, xsk, xskrq);
if (err)
return err;
- return mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), &c->xskrq);
+ err = mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), xskrq);
+ if (err)
+ return err;
+
+ __set_bit(MLX5E_RQ_STATE_XSK, &xskrq->state);
+ return 0;
}
int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 7b0d3de0ec6c..91fa0a366316 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -308,6 +308,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
struct net_device *netdev = x->xso.real_dev;
struct mlx5e_ipsec *ipsec;
struct mlx5e_priv *priv;
+ gfp_t gfp;
int err;
priv = netdev_priv(netdev);
@@ -315,16 +316,20 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
return -EOPNOTSUPP;
ipsec = priv->ipsec;
- err = mlx5e_xfrm_validate_state(priv->mdev, x, extack);
- if (err)
- return err;
-
- sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
+ gfp = (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) ? GFP_ATOMIC : GFP_KERNEL;
+ sa_entry = kzalloc(sizeof(*sa_entry), gfp);
if (!sa_entry)
return -ENOMEM;
sa_entry->x = x;
sa_entry->ipsec = ipsec;
+ /* Check if this SA is originated from acquire flow temporary SA */
+ if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
+ goto out;
+
+ err = mlx5e_xfrm_validate_state(priv->mdev, x, extack);
+ if (err)
+ goto err_xfrm;
/* check esn */
mlx5e_ipsec_update_esn_state(sa_entry);
@@ -353,6 +358,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state);
+out:
x->xso.offload_handle = (unsigned long)sa_entry;
return 0;
@@ -372,6 +378,9 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_sa_entry *old;
+ if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
+ return;
+
old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
WARN_ON(old != sa_entry);
}
@@ -380,9 +389,13 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+ if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
+ goto sa_entry_free;
+
cancel_work_sync(&sa_entry->modify_work.work);
mlx5e_accel_ipsec_fs_del_rule(sa_entry);
mlx5_ipsec_free_sa_ctx(sa_entry);
+sa_entry_free:
kfree(sa_entry);
}
@@ -482,26 +495,26 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- int err;
+ struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ u64 packets, bytes, lastuse;
- lockdep_assert_held(&x->lock);
+ lockdep_assert(lockdep_is_held(&x->lock) ||
+ lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex));
- if (sa_entry->attrs.soft_packet_limit == XFRM_INF)
- /* Limits are not configured, as soft limit
- * must be lowever than hard limit.
- */
+ if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
return;
- err = mlx5e_ipsec_aso_query(sa_entry, NULL);
- if (err)
- return;
-
- mlx5e_ipsec_aso_update_curlft(sa_entry, &x->curlft.packets);
+ mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
+ x->curlft.packets += packets;
+ x->curlft.bytes += bytes;
}
-static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x,
+static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
+ struct xfrm_policy *x,
struct netlink_ext_ack *extack)
{
+ struct xfrm_selector *sel = &x->selector;
+
if (x->type != XFRM_POLICY_TYPE_MAIN) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload non-main policy types");
return -EINVAL;
@@ -519,8 +532,9 @@ static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x,
return -EINVAL;
}
- if (!x->xfrm_vec[0].reqid) {
- NL_SET_ERR_MSG_MOD(extack, "Cannot offload policy without reqid");
+ if (!x->xfrm_vec[0].reqid && sel->proto == IPPROTO_IP &&
+ addr6_all_zero(sel->saddr.a6) && addr6_all_zero(sel->daddr.a6)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported policy with reqid 0 without at least one of upper protocol or ip addr(s) different than 0");
return -EINVAL;
}
@@ -529,12 +543,24 @@ static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x,
return -EINVAL;
}
- if (x->selector.proto != IPPROTO_IP &&
- (x->selector.proto != IPPROTO_UDP || x->xdo.dir != XFRM_DEV_OFFLOAD_OUT)) {
+ if (sel->proto != IPPROTO_IP &&
+ (sel->proto != IPPROTO_UDP || x->xdo.dir != XFRM_DEV_OFFLOAD_OUT)) {
NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction");
return -EINVAL;
}
+ if (x->priority) {
+ if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support policy priority");
+ return -EINVAL;
+ }
+
+ if (x->priority == U32_MAX) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support requested policy priority");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -560,6 +586,7 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
attrs->upspec.sport = ntohs(sel->sport);
attrs->upspec.sport_mask = ntohs(sel->sport_mask);
attrs->upspec.proto = sel->proto;
+ attrs->prio = x->priority;
}
static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
@@ -576,7 +603,7 @@ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
return -EOPNOTSUPP;
}
- err = mlx5e_xfrm_validate_policy(x, extack);
+ err = mlx5e_xfrm_validate_policy(priv->mdev, x, extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 12f044330639..68ae5230eb75 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -94,6 +94,7 @@ enum mlx5_ipsec_cap {
MLX5_IPSEC_CAP_ESN = 1 << 1,
MLX5_IPSEC_CAP_PACKET_OFFLOAD = 1 << 2,
MLX5_IPSEC_CAP_ROCE = 1 << 3,
+ MLX5_IPSEC_CAP_PRIO = 1 << 4,
};
struct mlx5e_priv;
@@ -161,6 +162,7 @@ struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
+ struct mlx5_fc *fc;
};
struct mlx5e_ipsec_modify_state_work {
@@ -198,6 +200,7 @@ struct mlx5_accel_pol_xfrm_attrs {
u8 type : 2;
u8 dir : 2;
u32 reqid;
+ u32 prio;
};
struct mlx5e_ipsec_pol_entry {
@@ -233,9 +236,6 @@ void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_wqe_aso_ctrl_seg *data);
-void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
- u64 *packets);
-
void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv,
void *ipsec_stats);
@@ -252,6 +252,13 @@ mlx5e_ipsec_pol2dev(struct mlx5e_ipsec_pol_entry *pol_entry)
{
return pol_entry->ipsec->mdev;
}
+
+static inline bool addr6_all_zero(__be32 *addr6)
+{
+ static const __be32 zaddr6[4] = {};
+
+ return !memcmp(addr6, zaddr6, sizeof(*zaddr6));
+}
#else
static inline void mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 9871ba1b25ff..0539640a4d88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -7,6 +7,7 @@
#include "ipsec.h"
#include "fs_core.h"
#include "lib/ipsec_fs_roce.h"
+#include "lib/fs_chains.h"
#define NUM_IPSEC_FTE BIT(15)
@@ -34,13 +35,16 @@ struct mlx5e_ipsec_rx {
struct mlx5e_ipsec_miss sa;
struct mlx5e_ipsec_rule status;
struct mlx5e_ipsec_fc *fc;
+ struct mlx5_fs_chains *chains;
};
struct mlx5e_ipsec_tx {
struct mlx5e_ipsec_ft ft;
struct mlx5e_ipsec_miss pol;
+ struct mlx5e_ipsec_rule status;
struct mlx5_flow_namespace *ns;
struct mlx5e_ipsec_fc *fc;
+ struct mlx5_fs_chains *chains;
};
/* IPsec RX flow steering */
@@ -51,6 +55,67 @@ static enum mlx5_traffic_types family2tt(u32 family)
return MLX5_TT_IPV6_IPSEC_ESP;
}
+static struct mlx5e_ipsec_rx *ipsec_rx(struct mlx5e_ipsec *ipsec, u32 family)
+{
+ if (family == AF_INET)
+ return ipsec->rx_ipv4;
+
+ return ipsec->rx_ipv6;
+}
+
+static struct mlx5_fs_chains *
+ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft,
+ enum mlx5_flow_namespace_type ns, int base_prio,
+ int base_level, struct mlx5_flow_table **root_ft)
+{
+ struct mlx5_chains_attr attr = {};
+ struct mlx5_fs_chains *chains;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
+ MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
+ attr.max_grp_num = 2;
+ attr.default_ft = miss_ft;
+ attr.ns = ns;
+ attr.fs_base_prio = base_prio;
+ attr.fs_base_level = base_level;
+ chains = mlx5_chains_create(mdev, &attr);
+ if (IS_ERR(chains))
+ return chains;
+
+ /* Create chain 0, prio 1, level 0 to connect chains to prev in fs_core */
+ ft = mlx5_chains_get_table(chains, 0, 1, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_chains_get;
+ }
+
+ *root_ft = ft;
+ return chains;
+
+err_chains_get:
+ mlx5_chains_destroy(chains);
+ return ERR_PTR(err);
+}
+
+static void ipsec_chains_destroy(struct mlx5_fs_chains *chains)
+{
+ mlx5_chains_put_table(chains, 0, 1, 0);
+ mlx5_chains_destroy(chains);
+}
+
+static struct mlx5_flow_table *
+ipsec_chains_get_table(struct mlx5_fs_chains *chains, u32 prio)
+{
+ return mlx5_chains_get_table(chains, 0, prio + 1, 0);
+}
+
+static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
+{
+ mlx5_chains_put_table(chains, 0, prio + 1, 0);
+}
+
static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
int level, int prio,
int max_num_groups)
@@ -170,9 +235,18 @@ out:
static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx, u32 family)
{
- mlx5_del_flow_rules(rx->pol.rule);
- mlx5_destroy_flow_group(rx->pol.group);
- mlx5_destroy_flow_table(rx->ft.pol);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+
+ /* disconnect */
+ mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
+
+ if (rx->chains) {
+ ipsec_chains_destroy(rx->chains);
+ } else {
+ mlx5_del_flow_rules(rx->pol.rule);
+ mlx5_destroy_flow_group(rx->pol.group);
+ mlx5_destroy_flow_table(rx->ft.pol);
+ }
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
@@ -238,6 +312,20 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (err)
goto err_fs;
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
+ rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
+ MLX5_FLOW_NAMESPACE_KERNEL,
+ MLX5E_NIC_PRIO,
+ MLX5E_ACCEL_FS_POL_FT_LEVEL,
+ &rx->ft.pol);
+ if (IS_ERR(rx->chains)) {
+ err = PTR_ERR(rx->chains);
+ goto err_pol_ft;
+ }
+
+ goto connect;
+ }
+
ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_POL_FT_LEVEL, MLX5E_NIC_PRIO,
2);
if (IS_ERR(ft)) {
@@ -252,6 +340,12 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (err)
goto err_pol_miss;
+connect:
+ /* connect */
+ memset(dest, 0x00, sizeof(*dest));
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = rx->ft.pol;
+ mlx5_ttc_fwd_dest(ttc, family2tt(family), &dest[0]);
return 0;
err_pol_miss:
@@ -271,69 +365,147 @@ err_fs_ft_status:
return err;
}
-static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
- struct mlx5e_ipsec *ipsec, u32 family)
+static int rx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx, u32 family)
{
- struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
- struct mlx5_flow_destination dest = {};
- struct mlx5e_ipsec_rx *rx;
- int err = 0;
-
- if (family == AF_INET)
- rx = ipsec->rx_ipv4;
- else
- rx = ipsec->rx_ipv6;
+ int err;
- mutex_lock(&rx->ft.mutex);
if (rx->ft.refcnt)
goto skip;
- /* create FT */
err = rx_create(mdev, ipsec, rx, family);
if (err)
- goto out;
-
- /* connect */
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = rx->ft.pol;
- mlx5_ttc_fwd_dest(ttc, family2tt(family), &dest);
+ return err;
skip:
rx->ft.refcnt++;
-out:
+ return 0;
+}
+
+static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
+ u32 family)
+{
+ if (--rx->ft.refcnt)
+ return;
+
+ rx_destroy(ipsec->mdev, ipsec, rx, family);
+}
+
+static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec *ipsec, u32 family)
+{
+ struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family);
+ int err;
+
+ mutex_lock(&rx->ft.mutex);
+ err = rx_get(mdev, ipsec, rx, family);
mutex_unlock(&rx->ft.mutex);
if (err)
return ERR_PTR(err);
+
return rx;
}
-static void rx_ft_put(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
- u32 family)
+static struct mlx5_flow_table *rx_ft_get_policy(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec *ipsec,
+ u32 family, u32 prio)
{
- struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
- struct mlx5e_ipsec_rx *rx;
+ struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family);
+ struct mlx5_flow_table *ft;
+ int err;
- if (family == AF_INET)
- rx = ipsec->rx_ipv4;
- else
- rx = ipsec->rx_ipv6;
+ mutex_lock(&rx->ft.mutex);
+ err = rx_get(mdev, ipsec, rx, family);
+ if (err)
+ goto err_get;
+
+ ft = rx->chains ? ipsec_chains_get_table(rx->chains, prio) : rx->ft.pol;
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_get_ft;
+ }
+
+ mutex_unlock(&rx->ft.mutex);
+ return ft;
+
+err_get_ft:
+ rx_put(ipsec, rx, family);
+err_get:
+ mutex_unlock(&rx->ft.mutex);
+ return ERR_PTR(err);
+}
+
+static void rx_ft_put(struct mlx5e_ipsec *ipsec, u32 family)
+{
+ struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family);
mutex_lock(&rx->ft.mutex);
- rx->ft.refcnt--;
- if (rx->ft.refcnt)
- goto out;
+ rx_put(ipsec, rx, family);
+ mutex_unlock(&rx->ft.mutex);
+}
- /* disconnect */
- mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
+static void rx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 family, u32 prio)
+{
+ struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family);
- /* remove FT */
- rx_destroy(mdev, ipsec, rx, family);
+ mutex_lock(&rx->ft.mutex);
+ if (rx->chains)
+ ipsec_chains_put_table(rx->chains, prio);
-out:
+ rx_put(ipsec, rx, family);
mutex_unlock(&rx->ft.mutex);
}
+static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *fte;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ /* create fte */
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(tx->fc->cnt);
+ fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1);
+ if (IS_ERR(fte)) {
+ err = PTR_ERR(fte);
+ mlx5_core_err(mdev, "Fail to add ipsec tx counter rule err=%d\n", err);
+ goto err_rule;
+ }
+
+ kvfree(spec);
+ tx->status.rule = fte;
+ return 0;
+
+err_rule:
+ kvfree(spec);
+ return err;
+}
+
/* IPsec TX flow steering */
+static void tx_destroy(struct mlx5e_ipsec_tx *tx, struct mlx5_ipsec_fs *roce)
+{
+ mlx5_ipsec_fs_roce_tx_destroy(roce);
+ if (tx->chains) {
+ ipsec_chains_destroy(tx->chains);
+ } else {
+ mlx5_del_flow_rules(tx->pol.rule);
+ mlx5_destroy_flow_group(tx->pol.group);
+ mlx5_destroy_flow_table(tx->ft.pol);
+ }
+
+ mlx5_destroy_flow_table(tx->ft.sa);
+ mlx5_del_flow_rules(tx->status.rule);
+ mlx5_destroy_flow_table(tx->ft.status);
+}
+
static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
struct mlx5_ipsec_fs *roce)
{
@@ -341,12 +513,34 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
struct mlx5_flow_table *ft;
int err;
- ft = ipsec_ft_create(tx->ns, 1, 0, 4);
+ ft = ipsec_ft_create(tx->ns, 2, 0, 1);
if (IS_ERR(ft))
return PTR_ERR(ft);
+ tx->ft.status = ft;
+ err = ipsec_counter_rule_tx(mdev, tx);
+ if (err)
+ goto err_status_rule;
+
+ ft = ipsec_ft_create(tx->ns, 1, 0, 4);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_sa_ft;
+ }
tx->ft.sa = ft;
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
+ tx->chains = ipsec_chains_create(
+ mdev, tx->ft.sa, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC, 0, 0,
+ &tx->ft.pol);
+ if (IS_ERR(tx->chains)) {
+ err = PTR_ERR(tx->chains);
+ goto err_pol_ft;
+ }
+
+ goto connect_roce;
+ }
+
ft = ipsec_ft_create(tx->ns, 0, 0, 2);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
@@ -356,44 +550,100 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = tx->ft.sa;
err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
- if (err)
- goto err_pol_miss;
+ if (err) {
+ mlx5_destroy_flow_table(tx->ft.pol);
+ goto err_pol_ft;
+ }
+connect_roce:
err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol);
if (err)
goto err_roce;
return 0;
err_roce:
- mlx5_del_flow_rules(tx->pol.rule);
- mlx5_destroy_flow_group(tx->pol.group);
-err_pol_miss:
- mlx5_destroy_flow_table(tx->ft.pol);
+ if (tx->chains) {
+ ipsec_chains_destroy(tx->chains);
+ } else {
+ mlx5_del_flow_rules(tx->pol.rule);
+ mlx5_destroy_flow_group(tx->pol.group);
+ mlx5_destroy_flow_table(tx->ft.pol);
+ }
err_pol_ft:
mlx5_destroy_flow_table(tx->ft.sa);
+err_sa_ft:
+ mlx5_del_flow_rules(tx->status.rule);
+err_status_rule:
+ mlx5_destroy_flow_table(tx->ft.status);
return err;
}
-static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
- struct mlx5e_ipsec *ipsec)
+static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_tx *tx)
{
- struct mlx5e_ipsec_tx *tx = ipsec->tx;
- int err = 0;
+ int err;
- mutex_lock(&tx->ft.mutex);
if (tx->ft.refcnt)
goto skip;
err = tx_create(mdev, tx, ipsec->roce);
if (err)
- goto out;
+ return err;
skip:
tx->ft.refcnt++;
-out:
+ return 0;
+}
+
+static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
+{
+ if (--tx->ft.refcnt)
+ return;
+
+ tx_destroy(tx, ipsec->roce);
+}
+
+static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec *ipsec,
+ u32 prio)
+{
+ struct mlx5e_ipsec_tx *tx = ipsec->tx;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ mutex_lock(&tx->ft.mutex);
+ err = tx_get(mdev, ipsec, tx);
+ if (err)
+ goto err_get;
+
+ ft = tx->chains ? ipsec_chains_get_table(tx->chains, prio) : tx->ft.pol;
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_get_ft;
+ }
+
+ mutex_unlock(&tx->ft.mutex);
+ return ft;
+
+err_get_ft:
+ tx_put(ipsec, tx);
+err_get:
+ mutex_unlock(&tx->ft.mutex);
+ return ERR_PTR(err);
+}
+
+static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec *ipsec)
+{
+ struct mlx5e_ipsec_tx *tx = ipsec->tx;
+ int err;
+
+ mutex_lock(&tx->ft.mutex);
+ err = tx_get(mdev, ipsec, tx);
mutex_unlock(&tx->ft.mutex);
if (err)
return ERR_PTR(err);
+
return tx;
}
@@ -402,53 +652,72 @@ static void tx_ft_put(struct mlx5e_ipsec *ipsec)
struct mlx5e_ipsec_tx *tx = ipsec->tx;
mutex_lock(&tx->ft.mutex);
- tx->ft.refcnt--;
- if (tx->ft.refcnt)
- goto out;
+ tx_put(ipsec, tx);
+ mutex_unlock(&tx->ft.mutex);
+}
- mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce);
- mlx5_del_flow_rules(tx->pol.rule);
- mlx5_destroy_flow_group(tx->pol.group);
- mlx5_destroy_flow_table(tx->ft.pol);
- mlx5_destroy_flow_table(tx->ft.sa);
-out:
+static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio)
+{
+ struct mlx5e_ipsec_tx *tx = ipsec->tx;
+
+ mutex_lock(&tx->ft.mutex);
+ if (tx->chains)
+ ipsec_chains_put_table(tx->chains, prio);
+
+ tx_put(ipsec, tx);
mutex_unlock(&tx->ft.mutex);
}
static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
__be32 *daddr)
{
+ if (!*saddr && !*daddr)
+ return;
+
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
- memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
- memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ if (*saddr) {
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ }
+
+ if (*daddr) {
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ }
}
static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
__be32 *daddr)
{
+ if (addr6_all_zero(saddr) && addr6_all_zero(daddr))
+ return;
+
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
- memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
- memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ if (!addr6_all_zero(saddr)) {
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ }
+
+ if (!addr6_all_zero(daddr)) {
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ }
}
static void setup_fte_esp(struct mlx5_flow_spec *spec)
@@ -607,11 +876,12 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
- struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_destination dest[2];
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
struct mlx5e_ipsec_rx *rx;
+ struct mlx5_fc *counter;
int err;
rx = rx_ft_get(mdev, ipsec, attrs->family);
@@ -648,14 +918,22 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
break;
}
+ counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_add_cnt;
+ }
flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
flow_act.flags |= FLOW_ACT_NO_APPEND;
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = rx->ft.status;
- rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, &dest, 1);
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = rx->ft.status;
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter_id = mlx5_fc_id(counter);
+ rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
@@ -665,10 +943,13 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
sa_entry->ipsec_rule.rule = rule;
sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
+ sa_entry->ipsec_rule.fc = counter;
sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
return 0;
err_add_flow:
+ mlx5_fc_destroy(mdev, counter);
+err_add_cnt:
if (flow_act.pkt_reformat)
mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
err_pkt_reformat:
@@ -676,7 +957,7 @@ err_pkt_reformat:
err_mod_header:
kvfree(spec);
err_alloc:
- rx_ft_put(mdev, ipsec, attrs->family);
+ rx_ft_put(ipsec, attrs->family);
return err;
}
@@ -685,12 +966,13 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
- struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_destination dest[2];
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
struct mlx5e_ipsec_tx *tx;
- int err = 0;
+ struct mlx5_fc *counter;
+ int err;
tx = tx_ft_get(mdev, ipsec);
if (IS_ERR(tx))
@@ -717,7 +999,8 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
setup_fte_reg_a(spec);
break;
case XFRM_DEV_OFFLOAD_PACKET:
- setup_fte_reg_c0(spec, attrs->reqid);
+ if (attrs->reqid)
+ setup_fte_reg_c0(spec, attrs->reqid);
err = setup_pkt_reformat(mdev, attrs, &flow_act);
if (err)
goto err_pkt_reformat;
@@ -726,15 +1009,23 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
break;
}
+ counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_add_cnt;
+ }
+
flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
flow_act.flags |= FLOW_ACT_NO_APPEND;
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW |
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx->fc->cnt);
- rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, &dest, 1);
+ dest[0].ft = tx->ft.status;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter_id = mlx5_fc_id(counter);
+ rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
@@ -743,10 +1034,13 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
kvfree(spec);
sa_entry->ipsec_rule.rule = rule;
+ sa_entry->ipsec_rule.fc = counter;
sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
return 0;
err_add_flow:
+ mlx5_fc_destroy(mdev, counter);
+err_add_cnt:
if (flow_act.pkt_reformat)
mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
err_pkt_reformat:
@@ -760,16 +1054,17 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
{
struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
+ struct mlx5e_ipsec_tx *tx = pol_entry->ipsec->tx;
struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
- struct mlx5e_ipsec_tx *tx;
+ struct mlx5_flow_table *ft;
int err, dstn = 0;
- tx = tx_ft_get(mdev, pol_entry->ipsec);
- if (IS_ERR(tx))
- return PTR_ERR(tx);
+ ft = tx_ft_get_policy(mdev, pol_entry->ipsec, attrs->prio);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
@@ -785,10 +1080,12 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
- err = setup_modify_header(mdev, attrs->reqid, XFRM_DEV_OFFLOAD_OUT,
- &flow_act);
- if (err)
- goto err_mod_header;
+ if (attrs->reqid) {
+ err = setup_modify_header(mdev, attrs->reqid,
+ XFRM_DEV_OFFLOAD_OUT, &flow_act);
+ if (err)
+ goto err_mod_header;
+ }
switch (attrs->action) {
case XFRM_POLICY_ALLOW:
@@ -811,7 +1108,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
dest[dstn].ft = tx->ft.sa;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dstn++;
- rule = mlx5_add_flow_rules(tx->ft.pol, spec, &flow_act, dest, dstn);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
@@ -824,11 +1121,12 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
return 0;
err_action:
- mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
+ if (attrs->reqid)
+ mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
err_mod_header:
kvfree(spec);
err_alloc:
- tx_ft_put(pol_entry->ipsec);
+ tx_ft_put_policy(pol_entry->ipsec, attrs->prio);
return err;
}
@@ -840,12 +1138,15 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
+ struct mlx5_flow_table *ft;
struct mlx5e_ipsec_rx *rx;
int err, dstn = 0;
- rx = rx_ft_get(mdev, pol_entry->ipsec, attrs->family);
- if (IS_ERR(rx))
- return PTR_ERR(rx);
+ ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ rx = ipsec_rx(pol_entry->ipsec, attrs->family);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
@@ -880,7 +1181,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[dstn].ft = rx->ft.sa;
dstn++;
- rule = mlx5_add_flow_rules(rx->ft.pol, spec, &flow_act, dest, dstn);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err);
@@ -894,7 +1195,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
err_action:
kvfree(spec);
err_alloc:
- rx_ft_put(mdev, pol_entry->ipsec, attrs->family);
+ rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio);
return err;
}
@@ -1022,7 +1323,7 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
mlx5_del_flow_rules(ipsec_rule->rule);
-
+ mlx5_fc_destroy(mdev, ipsec_rule->fc);
if (ipsec_rule->pkt_reformat)
mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
@@ -1032,7 +1333,7 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
}
mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
- rx_ft_put(mdev, sa_entry->ipsec, sa_entry->attrs.family);
+ rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family);
}
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
@@ -1051,12 +1352,15 @@ void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
mlx5_del_flow_rules(ipsec_rule->rule);
if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
- rx_ft_put(mdev, pol_entry->ipsec, pol_entry->attrs.family);
+ rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family,
+ pol_entry->attrs.prio);
return;
}
- mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
- tx_ft_put(pol_entry->ipsec);
+ if (ipsec_rule->modify_hdr)
+ mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+
+ tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio);
}
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 5fa7a4c40429..5342b0b07681 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -36,11 +36,18 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
caps |= MLX5_IPSEC_CAP_CRYPTO;
- if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
- MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_trasport) &&
- MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_del_esp_trasport) &&
- MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
- caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
+ if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload)) {
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
+ reformat_add_esp_trasport) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ reformat_del_esp_trasport) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
+ caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
+
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level))
+ caps |= MLX5_IPSEC_CAP_PRIO;
+ }
if (mlx5_get_roce_state(mdev) &&
MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA &&
@@ -482,18 +489,3 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
spin_unlock_bh(&aso->lock);
return ret;
}
-
-void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
- u64 *packets)
-{
- struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
- struct mlx5e_ipsec_aso *aso = ipsec->aso;
- u64 hard_cnt;
-
- hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
- /* HW decresases the limit till it reaches zero to fire an avent.
- * We need to fix the calculations, so the returned count is a total
- * number of passed packets and not how much left.
- */
- *packets = sa_entry->attrs.hard_packet_limit - hard_cnt;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 79fd21ecb9cb..1f5a2110d31f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -220,7 +220,7 @@ static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
struct ptys2ethtool_config **arr,
u32 *size)
{
- bool ext = mlx5e_ptys_ext_supported(mdev);
+ bool ext = mlx5_ptys_ext_supported(mdev);
*arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
*size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
@@ -895,7 +895,7 @@ static void get_speed_duplex(struct net_device *netdev,
if (!netif_carrier_ok(netdev))
goto out;
- speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
+ speed = mlx5_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
if (!speed) {
if (data_rate_oper)
speed = 100 * data_rate_oper;
@@ -980,7 +980,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
- bool ext = mlx5e_ptys_ext_supported(mdev);
+ bool ext = mlx5_ptys_ext_supported(mdev);
ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
}
@@ -1160,7 +1160,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_port_eth_proto eproto;
+ struct mlx5_port_eth_proto eproto;
const unsigned long *adver;
bool an_changes = false;
u8 an_disable_admin;
@@ -1180,7 +1180,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
autoneg = link_ksettings->base.autoneg;
speed = link_ksettings->base.speed;
- ext_supported = mlx5e_ptys_ext_supported(mdev);
+ ext_supported = mlx5_ptys_ext_supported(mdev);
ext = ext_requested(autoneg, adver, ext_supported);
if (!ext_supported && ext)
return -EOPNOTSUPP;
@@ -1194,7 +1194,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
goto out;
}
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
- mlx5e_port_speed2linkmodes(mdev, speed, !ext);
+ mlx5_port_speed2linkmodes(mdev, speed, !ext);
err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7ca7e9b57607..90eac1aa5f6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1188,7 +1188,7 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
__set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
if (params->rx_dim_enabled)
- __set_bit(MLX5E_RQ_STATE_AM, &rq->state);
+ __set_bit(MLX5E_RQ_STATE_DIM, &rq->state);
/* We disable csum_complete when XDP is enabled since
* XDP programs might manipulate packets which will render
@@ -1664,7 +1664,7 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
if (params->tx_dim_enabled)
- sq->state |= BIT(MLX5E_SQ_STATE_AM);
+ sq->state |= BIT(MLX5E_SQ_STATE_DIM);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 87a2850b32d0..083ce31f95b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -44,6 +44,7 @@
#include <net/bareudp.h>
#include <net/bonding.h>
#include <net/dst_metadata.h>
+#include "devlink.h"
#include "en.h"
#include "en/tc/post_act.h"
#include "en/tc/act_stats.h"
@@ -73,12 +74,6 @@
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
-struct mlx5e_hairpin_params {
- struct mlx5_core_dev *mdev;
- u32 num_queues;
- u32 queue_size;
-};
-
struct mlx5e_tc_table {
/* Protects the dynamic assignment of the t parameter
* which is the nic tc root table.
@@ -101,7 +96,6 @@ struct mlx5e_tc_table {
struct mlx5_tc_ct_priv *ct;
struct mapping_ctx *mapping;
- struct mlx5e_hairpin_params hairpin_params;
struct dentry *dfs_root;
/* tc action stats */
@@ -589,6 +583,7 @@ struct mlx5e_hairpin {
struct mlx5e_tir direct_tir;
int num_channels;
+ u8 log_num_packets;
struct mlx5e_rqt indir_rqt;
struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5_ttc_table *ttc;
@@ -935,6 +930,7 @@ mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params
hp->func_mdev = func_mdev;
hp->func_priv = priv;
hp->num_channels = params->num_channels;
+ hp->log_num_packets = params->log_num_packets;
err = mlx5e_hairpin_create_transport(hp);
if (err)
@@ -1076,9 +1072,11 @@ static int debugfs_hairpin_table_dump_show(struct seq_file *file, void *priv)
mutex_lock(&tc->hairpin_tbl_lock);
hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
- seq_printf(file, "Hairpin peer_vhca_id %u prio %u refcnt %u\n",
+ seq_printf(file,
+ "Hairpin peer_vhca_id %u prio %u refcnt %u num_channels %u num_packets %lu\n",
hpe->peer_vhca_id, hpe->prio,
- refcount_read(&hpe->refcnt));
+ refcount_read(&hpe->refcnt), hpe->hp->num_channels,
+ BIT(hpe->hp->log_num_packets));
mutex_unlock(&tc->hairpin_tbl_lock);
return 0;
@@ -1099,33 +1097,15 @@ static void mlx5e_tc_debugfs_init(struct mlx5e_tc_table *tc,
&debugfs_hairpin_table_dump_fops);
}
-static void
-mlx5e_hairpin_params_init(struct mlx5e_hairpin_params *hairpin_params,
- struct mlx5_core_dev *mdev)
-{
- u32 link_speed = 0;
- u64 link_speed64;
-
- hairpin_params->mdev = mdev;
- /* set hairpin pair per each 50Gbs share of the link */
- mlx5e_port_max_linkspeed(mdev, &link_speed);
- link_speed = max_t(u32, link_speed, 50000);
- link_speed64 = link_speed;
- do_div(link_speed64, 50000);
- hairpin_params->num_queues = link_speed64;
-
- hairpin_params->queue_size =
- BIT(min_t(u32, 16 - MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev),
- MLX5_CAP_GEN(mdev, log_max_hairpin_num_packets)));
-}
-
static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+ struct devlink *devlink = priv_to_devlink(priv->mdev);
int peer_ifindex = parse_attr->mirred_ifindex[0];
+ union devlink_param_value val = {};
struct mlx5_hairpin_params params;
struct mlx5_core_dev *peer_mdev;
struct mlx5e_hairpin_entry *hpe;
@@ -1182,7 +1162,14 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
hash_hairpin_info(peer_id, match_prio));
mutex_unlock(&tc->hairpin_tbl_lock);
- params.log_num_packets = ilog2(tc->hairpin_params.queue_size);
+ err = devl_param_driverinit_value_get(
+ devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE, &val);
+ if (err) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ params.log_num_packets = ilog2(val.vu32);
params.log_data_size =
clamp_t(u32,
params.log_num_packets +
@@ -1191,7 +1178,14 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
params.q_counter = priv->q_counter;
- params.num_channels = tc->hairpin_params.num_queues;
+ err = devl_param_driverinit_value_get(
+ devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES, &val);
+ if (err) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ params.num_channels = val.vu32;
hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
hpe->hp = hp;
@@ -5187,22 +5181,6 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
}
-static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
-{
- int tc_grp_size, tc_tbl_size;
- u32 max_flow_counter;
-
- max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
- MLX5_CAP_GEN(dev, max_flow_counter_15_0);
-
- tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
-
- tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
- BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
-
- return tc_tbl_size;
-}
-
static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
@@ -5275,10 +5253,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
- attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
attr.default_ft = tc->miss_t;
attr.mapping = chains_mapping;
+ attr.fs_base_prio = MLX5E_TC_PRIO;
tc->chains = mlx5_chains_create(dev, &attr);
if (IS_ERR(tc->chains)) {
@@ -5286,12 +5264,12 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
goto err_miss;
}
+ mlx5_chains_print_info(tc->chains);
+
tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr,
MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
- mlx5e_hairpin_params_init(&tc->hairpin_params, dev);
-
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
err = register_netdevice_notifier_dev_net(priv->netdev,
&tc->netdevice_nb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 9a458a5d9853..a50bfda18e96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -51,7 +51,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
struct mlx5e_sq_stats *stats = sq->stats;
struct dim_sample dim_sample = {};
- if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_DIM, &sq->state)))
return;
dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
@@ -63,7 +63,7 @@ static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
struct mlx5e_rq_stats *stats = rq->stats;
struct dim_sample dim_sample = {};
- if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_DIM, &rq->state)))
return;
dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 75015d370922..7c79476cc5f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -744,7 +744,7 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
u64 value;
int err;
- err = mlx5e_port_max_linkspeed(mdev, &link_speed_max);
+ err = mlx5_port_max_linkspeed(mdev, &link_speed_max);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed");
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 25a8076a77bf..48036dfddd5e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1374,14 +1374,11 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
struct mlx5_flow_table *nf_ft, *ft;
struct mlx5_chains_attr attr = {};
struct mlx5_fs_chains *chains;
- u32 fdb_max;
int err;
- fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
-
esw_init_chains_offload_flags(esw, &attr.flags);
attr.ns = MLX5_FLOW_NAMESPACE_FDB;
- attr.max_ft_sz = fdb_max;
+ attr.fs_base_prio = FDB_TC_OFFLOAD;
attr.max_grp_num = esw->params.large_group_num;
attr.default_ft = miss_fdb;
attr.mapping = esw->offloads.reg_c0_obj_pool;
@@ -1392,6 +1389,7 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
return err;
}
+ mlx5_chains_print_info(chains);
esw->fdb_table.offloads.esw_chains_priv = chains;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 731acbe22dc7..8e3da9d4fe1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -137,7 +137,7 @@
#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
#define KERNEL_TX_IPSEC_NUM_PRIOS 1
-#define KERNEL_TX_IPSEC_NUM_LEVELS 2
+#define KERNEL_TX_IPSEC_NUM_LEVELS 3
#define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
#define KERNEL_TX_MACSEC_NUM_PRIOS 1
@@ -1762,7 +1762,8 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
if (ignore_level) {
if (ft->type != FS_FT_FDB &&
- ft->type != FS_FT_NIC_RX)
+ ft->type != FS_FT_NIC_RX &&
+ ft->type != FS_FT_NIC_TX)
return false;
if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index f9438d4e43ca..016c5f99c470 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -325,6 +325,10 @@ int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
while (sensor_pci_not_working(dev)) {
if (time_after(jiffies, end))
return -ETIMEDOUT;
+ if (test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
+ mlx5_core_warn(dev, "device is being removed, stop waiting for PCI\n");
+ return -ENODEV;
+ }
msleep(100);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index 81ed91fee59b..db9df9798ffa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -14,10 +14,8 @@
#define chains_lock(chains) ((chains)->lock)
#define chains_ht(chains) ((chains)->chains_ht)
#define prios_ht(chains) ((chains)->prios_ht)
-#define tc_default_ft(chains) ((chains)->tc_default_ft)
-#define tc_end_ft(chains) ((chains)->tc_end_ft)
-#define ns_to_chains_fs_prio(ns) ((ns) == MLX5_FLOW_NAMESPACE_FDB ? \
- FDB_TC_OFFLOAD : MLX5E_TC_PRIO)
+#define chains_default_ft(chains) ((chains)->chains_default_ft)
+#define chains_end_ft(chains) ((chains)->chains_end_ft)
#define FT_TBL_SZ (64 * 1024)
struct mlx5_fs_chains {
@@ -28,13 +26,15 @@ struct mlx5_fs_chains {
/* Protects above chains_ht and prios_ht */
struct mutex lock;
- struct mlx5_flow_table *tc_default_ft;
- struct mlx5_flow_table *tc_end_ft;
+ struct mlx5_flow_table *chains_default_ft;
+ struct mlx5_flow_table *chains_end_ft;
struct mapping_ctx *chains_mapping;
enum mlx5_flow_namespace_type ns;
u32 group_num;
u32 flags;
+ int fs_base_prio;
+ int fs_base_level;
};
struct fs_chain {
@@ -145,7 +145,7 @@ void
mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
struct mlx5_flow_table *ft)
{
- tc_end_ft(chains) = ft;
+ chains_end_ft(chains) = ft;
}
static struct mlx5_flow_table *
@@ -164,11 +164,11 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
ft_attr.max_fte = sz;
- /* We use tc_default_ft(chains) as the table's next_ft till
+ /* We use chains_default_ft(chains) as the table's next_ft till
* ignore_flow_level is allowed on FT creation and not just for FTEs.
* Instead caller should add an explicit miss rule if needed.
*/
- ft_attr.next_ft = tc_default_ft(chains);
+ ft_attr.next_ft = chains_default_ft(chains);
/* The root table(chain 0, prio 1, level 0) is required to be
* connected to the previous fs_core managed prio.
@@ -177,22 +177,22 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
*/
if (!mlx5_chains_ignore_flow_level_supported(chains) ||
(chain == 0 && prio == 1 && level == 0)) {
- ft_attr.level = level;
- ft_attr.prio = prio - 1;
+ ft_attr.level = chains->fs_base_level;
+ ft_attr.prio = chains->fs_base_prio;
ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ?
mlx5_get_fdb_sub_ns(chains->dev, chain) :
mlx5_get_flow_namespace(chains->dev, chains->ns);
} else {
ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
- ft_attr.prio = ns_to_chains_fs_prio(chains->ns);
+ ft_attr.prio = chains->fs_base_prio;
/* Firmware doesn't allow us to create another level 0 table,
- * so we create all unmanaged tables as level 1.
+ * so we create all unmanaged tables as level 1 (base + 1).
*
* To connect them, we use explicit miss rules with
* ignore_flow_level. Caller is responsible to create
* these rules (if needed).
*/
- ft_attr.level = 1;
+ ft_attr.level = chains->fs_base_level + 1;
ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
}
@@ -220,7 +220,8 @@ create_chain_restore(struct fs_chain *chain)
int err;
if (chain->chain == mlx5_chains_get_nf_ft_chain(chains) ||
- !mlx5_chains_prios_supported(chains))
+ !mlx5_chains_prios_supported(chains) ||
+ !chains->chains_mapping)
return 0;
err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
@@ -380,7 +381,7 @@ mlx5_chains_add_miss_rule(struct fs_chain *chain,
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = next_ft;
- if (next_ft == tc_end_ft(chains) &&
+ if (chains->chains_mapping && next_ft == chains_end_ft(chains) &&
chain->chain != mlx5_chains_get_nf_ft_chain(chains) &&
mlx5_chains_prios_supported(chains)) {
act.modify_hdr = chain->miss_modify_hdr;
@@ -494,8 +495,8 @@ mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
/* Default miss for each chain: */
next_ft = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
- tc_default_ft(chains) :
- tc_end_ft(chains);
+ chains_default_ft(chains) :
+ chains_end_ft(chains);
list_for_each(pos, &chain_s->prios_list) {
struct prio *p = list_entry(pos, struct prio, list);
@@ -681,7 +682,7 @@ err_get_prio:
struct mlx5_flow_table *
mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains)
{
- return tc_end_ft(chains);
+ return chains_end_ft(chains);
}
struct mlx5_flow_table *
@@ -718,48 +719,38 @@ mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
static struct mlx5_fs_chains *
mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
{
- struct mlx5_fs_chains *chains_priv;
- u32 max_flow_counter;
+ struct mlx5_fs_chains *chains;
int err;
- chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
- if (!chains_priv)
+ chains = kzalloc(sizeof(*chains), GFP_KERNEL);
+ if (!chains)
return ERR_PTR(-ENOMEM);
- max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
- MLX5_CAP_GEN(dev, max_flow_counter_15_0);
-
- mlx5_core_dbg(dev,
- "Init flow table chains, max counters(%d), groups(%d), max flow table size(%d)\n",
- max_flow_counter, attr->max_grp_num, attr->max_ft_sz);
-
- chains_priv->dev = dev;
- chains_priv->flags = attr->flags;
- chains_priv->ns = attr->ns;
- chains_priv->group_num = attr->max_grp_num;
- chains_priv->chains_mapping = attr->mapping;
- tc_default_ft(chains_priv) = tc_end_ft(chains_priv) = attr->default_ft;
+ chains->dev = dev;
+ chains->flags = attr->flags;
+ chains->ns = attr->ns;
+ chains->group_num = attr->max_grp_num;
+ chains->chains_mapping = attr->mapping;
+ chains->fs_base_prio = attr->fs_base_prio;
+ chains->fs_base_level = attr->fs_base_level;
+ chains_default_ft(chains) = chains_end_ft(chains) = attr->default_ft;
- mlx5_core_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
- mlx5_chains_get_chain_range(chains_priv),
- mlx5_chains_get_prio_range(chains_priv));
-
- err = rhashtable_init(&chains_ht(chains_priv), &chain_params);
+ err = rhashtable_init(&chains_ht(chains), &chain_params);
if (err)
goto init_chains_ht_err;
- err = rhashtable_init(&prios_ht(chains_priv), &prio_params);
+ err = rhashtable_init(&prios_ht(chains), &prio_params);
if (err)
goto init_prios_ht_err;
- mutex_init(&chains_lock(chains_priv));
+ mutex_init(&chains_lock(chains));
- return chains_priv;
+ return chains;
init_prios_ht_err:
- rhashtable_destroy(&chains_ht(chains_priv));
+ rhashtable_destroy(&chains_ht(chains));
init_chains_ht_err:
- kfree(chains_priv);
+ kfree(chains);
return ERR_PTR(err);
}
@@ -808,3 +799,9 @@ mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains, u32 chain_mapping)
return mapping_remove(ctx, chain_mapping);
}
+
+void
+mlx5_chains_print_info(struct mlx5_fs_chains *chains)
+{
+ mlx5_core_dbg(chains->dev, "Flow table chains groups(%d)\n", chains->group_num);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
index d50bdb226cef..8972fe05723a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
@@ -17,8 +17,9 @@ enum mlx5_chains_flags {
struct mlx5_chains_attr {
enum mlx5_flow_namespace_type ns;
+ int fs_base_prio;
+ int fs_base_level;
u32 flags;
- u32 max_ft_sz;
u32 max_grp_num;
struct mlx5_flow_table *default_ft;
struct mapping_ctx *mapping;
@@ -68,6 +69,8 @@ void mlx5_chains_destroy(struct mlx5_fs_chains *chains);
void
mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
struct mlx5_flow_table *ft);
+void
+mlx5_chains_print_info(struct mlx5_fs_chains *chains);
#else /* CONFIG_MLX5_CLS_ACT */
@@ -89,7 +92,9 @@ static inline struct mlx5_fs_chains *
mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
{ return NULL; }
static inline void
-mlx5_chains_destroy(struct mlx5_fs_chains *chains) {};
+mlx5_chains_destroy(struct mlx5_fs_chains *chains) {}
+static inline void
+mlx5_chains_print_info(struct mlx5_fs_chains *chains) {}
#endif /* CONFIG_MLX5_CLS_ACT */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index f1de152a6113..597174ceadc9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -52,6 +52,7 @@
#include <linux/version.h>
#include <net/devlink.h>
#include "mlx5_core.h"
+#include "thermal.h"
#include "lib/eq.h"
#include "fs_core.h"
#include "lib/mpfs.h"
@@ -191,7 +192,7 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
if (!(fw_initializing >> 31))
break;
if (time_after(jiffies, end) ||
- test_and_clear_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
+ test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
err = -EBUSY;
break;
}
@@ -917,7 +918,6 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
return 0;
err_clr_master:
- pci_clear_master(dev->pdev);
release_bar(dev->pdev);
err_disable:
mlx5_pci_disable_device(dev);
@@ -932,7 +932,6 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev)
*/
mlx5_drain_health_wq(dev);
iounmap(dev->iseg);
- pci_clear_master(dev->pdev);
release_bar(dev->pdev);
mlx5_pci_disable_device(dev);
}
@@ -1768,6 +1767,10 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
+ err = mlx5_thermal_init(dev);
+ if (err)
+ dev_err(&pdev->dev, "mlx5_thermal_init failed with error code %d\n", err);
+
pci_save_state(pdev);
devlink_register(devlink);
return 0;
@@ -1796,6 +1799,7 @@ static void remove_one(struct pci_dev *pdev)
mlx5_drain_fw_reset(dev);
devlink_unregister(devlink);
mlx5_sriov_disable(pdev);
+ mlx5_thermal_uninit(dev);
mlx5_crdump_disable(dev);
mlx5_drain_health_wq(dev);
mlx5_uninit_one(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index a1548e6bfb35..0daeb4b72cca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -1054,3 +1054,154 @@ out:
kfree(out);
return err;
}
+
+/* speed in units of 1Mb */
+static const u32 mlx5e_link_speed[MLX5E_LINK_MODES_NUMBER] = {
+ [MLX5E_1000BASE_CX_SGMII] = 1000,
+ [MLX5E_1000BASE_KX] = 1000,
+ [MLX5E_10GBASE_CX4] = 10000,
+ [MLX5E_10GBASE_KX4] = 10000,
+ [MLX5E_10GBASE_KR] = 10000,
+ [MLX5E_20GBASE_KR2] = 20000,
+ [MLX5E_40GBASE_CR4] = 40000,
+ [MLX5E_40GBASE_KR4] = 40000,
+ [MLX5E_56GBASE_R4] = 56000,
+ [MLX5E_10GBASE_CR] = 10000,
+ [MLX5E_10GBASE_SR] = 10000,
+ [MLX5E_10GBASE_ER] = 10000,
+ [MLX5E_40GBASE_SR4] = 40000,
+ [MLX5E_40GBASE_LR4] = 40000,
+ [MLX5E_50GBASE_SR2] = 50000,
+ [MLX5E_100GBASE_CR4] = 100000,
+ [MLX5E_100GBASE_SR4] = 100000,
+ [MLX5E_100GBASE_KR4] = 100000,
+ [MLX5E_100GBASE_LR4] = 100000,
+ [MLX5E_100BASE_TX] = 100,
+ [MLX5E_1000BASE_T] = 1000,
+ [MLX5E_10GBASE_T] = 10000,
+ [MLX5E_25GBASE_CR] = 25000,
+ [MLX5E_25GBASE_KR] = 25000,
+ [MLX5E_25GBASE_SR] = 25000,
+ [MLX5E_50GBASE_CR2] = 50000,
+ [MLX5E_50GBASE_KR2] = 50000,
+};
+
+static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
+ [MLX5E_SGMII_100M] = 100,
+ [MLX5E_1000BASE_X_SGMII] = 1000,
+ [MLX5E_5GBASE_R] = 5000,
+ [MLX5E_10GBASE_XFI_XAUI_1] = 10000,
+ [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = 40000,
+ [MLX5E_25GAUI_1_25GBASE_CR_KR] = 25000,
+ [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000,
+ [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000,
+ [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
+ [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000,
+ [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
+ [MLX5E_400GAUI_8] = 400000,
+ [MLX5E_100GAUI_1_100GBASE_CR_KR] = 100000,
+ [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = 200000,
+ [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = 400000,
+};
+
+int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
+ struct mlx5_port_eth_proto *eproto)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ if (!eproto)
+ return -EINVAL;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
+ if (err)
+ return err;
+
+ eproto->cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
+ eth_proto_capability);
+ eproto->admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_admin);
+ eproto->oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
+ return 0;
+}
+
+bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_port_eth_proto eproto;
+ int err;
+
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet))
+ return true;
+
+ err = mlx5_port_query_eth_proto(mdev, 1, true, &eproto);
+ if (err)
+ return false;
+
+ return !!eproto.cap;
+}
+
+static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
+ const u32 **arr, u32 *size,
+ bool force_legacy)
+{
+ bool ext = force_legacy ? false : mlx5_ptys_ext_supported(mdev);
+
+ *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
+ ARRAY_SIZE(mlx5e_link_speed);
+ *arr = ext ? mlx5e_ext_link_speed : mlx5e_link_speed;
+}
+
+u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
+ bool force_legacy)
+{
+ unsigned long temp = eth_proto_oper;
+ const u32 *table;
+ u32 speed = 0;
+ u32 max_size;
+ int i;
+
+ mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
+ i = find_first_bit(&temp, max_size);
+ if (i < max_size)
+ speed = table[i];
+ return speed;
+}
+
+u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
+ bool force_legacy)
+{
+ u32 link_modes = 0;
+ const u32 *table;
+ u32 max_size;
+ int i;
+
+ mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
+ for (i = 0; i < max_size; ++i) {
+ if (table[i] == speed)
+ link_modes |= MLX5E_PROT_MASK(i);
+ }
+ return link_modes;
+}
+
+int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
+{
+ struct mlx5_port_eth_proto eproto;
+ u32 max_speed = 0;
+ const u32 *table;
+ u32 max_size;
+ bool ext;
+ int err;
+ int i;
+
+ ext = mlx5_ptys_ext_supported(mdev);
+ err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
+ if (err)
+ return err;
+
+ mlx5e_port_get_speed_arr(mdev, &table, &max_size, false);
+ for (i = 0; i < max_size; ++i)
+ if (eproto.cap & MLX5E_PROT_MASK(i))
+ max_speed = max(max_speed, table[i]);
+
+ *speed = max_speed;
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/thermal.c b/drivers/net/ethernet/mellanox/mlx5/core/thermal.c
new file mode 100644
index 000000000000..e47fa6fb836f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/thermal.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/thermal.h>
+#include <linux/err.h>
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+#include "thermal.h"
+
+#define MLX5_THERMAL_POLL_INT_MSEC 1000
+#define MLX5_THERMAL_NUM_TRIPS 0
+#define MLX5_THERMAL_ASIC_SENSOR_INDEX 0
+
+/* Bit string indicating the writeablility of trip points if any */
+#define MLX5_THERMAL_TRIP_MASK (BIT(MLX5_THERMAL_NUM_TRIPS) - 1)
+
+struct mlx5_thermal {
+ struct mlx5_core_dev *mdev;
+ struct thermal_zone_device *tzdev;
+};
+
+static int mlx5_thermal_get_mtmp_temp(struct mlx5_core_dev *mdev, u32 id, int *p_temp)
+{
+ u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)] = {};
+ u32 mtmp_in[MLX5_ST_SZ_DW(mtmp_reg)] = {};
+ int err;
+
+ MLX5_SET(mtmp_reg, mtmp_in, sensor_index, id);
+
+ err = mlx5_core_access_reg(mdev, mtmp_in, sizeof(mtmp_in),
+ mtmp_out, sizeof(mtmp_out),
+ MLX5_REG_MTMP, 0, 0);
+
+ if (err)
+ return err;
+
+ *p_temp = MLX5_GET(mtmp_reg, mtmp_out, temperature);
+
+ return 0;
+}
+
+static int mlx5_thermal_get_temp(struct thermal_zone_device *tzdev,
+ int *p_temp)
+{
+ struct mlx5_thermal *thermal = tzdev->devdata;
+ struct mlx5_core_dev *mdev = thermal->mdev;
+ int err;
+
+ err = mlx5_thermal_get_mtmp_temp(mdev, MLX5_THERMAL_ASIC_SENSOR_INDEX, p_temp);
+
+ if (err)
+ return err;
+
+ /* The unit of temp returned is in 0.125 C. The thermal
+ * framework expects the value in 0.001 C.
+ */
+ *p_temp *= 125;
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops mlx5_thermal_ops = {
+ .get_temp = mlx5_thermal_get_temp,
+};
+
+int mlx5_thermal_init(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_thermal *thermal;
+ struct thermal_zone_device *tzd;
+ const char *data = "mlx5";
+
+ tzd = thermal_zone_get_zone_by_name(data);
+ if (!IS_ERR(tzd))
+ return 0;
+
+ thermal = kzalloc(sizeof(*thermal), GFP_KERNEL);
+ if (!thermal)
+ return -ENOMEM;
+
+ thermal->mdev = mdev;
+ thermal->tzdev = thermal_zone_device_register(data,
+ MLX5_THERMAL_NUM_TRIPS,
+ MLX5_THERMAL_TRIP_MASK,
+ thermal,
+ &mlx5_thermal_ops,
+ NULL, 0, MLX5_THERMAL_POLL_INT_MSEC);
+ if (IS_ERR(thermal->tzdev)) {
+ dev_err(mdev->device, "Failed to register thermal zone device (%s) %ld\n",
+ data, PTR_ERR(thermal->tzdev));
+ kfree(thermal);
+ return -EINVAL;
+ }
+
+ mdev->thermal = thermal;
+ return 0;
+}
+
+void mlx5_thermal_uninit(struct mlx5_core_dev *mdev)
+{
+ if (!mdev->thermal)
+ return;
+
+ thermal_zone_device_unregister(mdev->thermal->tzdev);
+ kfree(mdev->thermal);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/thermal.h b/drivers/net/ethernet/mellanox/mlx5/core/thermal.h
new file mode 100644
index 000000000000..7d752c122192
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/thermal.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+ * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
+ */
+#ifndef __MLX5_THERMAL_DRIVER_H
+#define __MLX5_THERMAL_DRIVER_H
+
+#if IS_ENABLED(CONFIG_THERMAL)
+int mlx5_thermal_init(struct mlx5_core_dev *mdev);
+void mlx5_thermal_uninit(struct mlx5_core_dev *mdev);
+#else
+static inline int mlx5_thermal_init(struct mlx5_core_dev *mdev)
+{
+ mdev->thermal = NULL;
+ return 0;
+}
+
+static inline void mlx5_thermal_uninit(struct mlx5_core_dev *mdev) { }
+#endif
+
+#endif /* __MLX5_THERMAL_DRIVER_H */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 7e0871b631e4..957d96a91a8a 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1466,7 +1466,6 @@ static void lan743x_phy_close(struct lan743x_adapter *adapter)
phy_stop(netdev->phydev);
phy_disconnect(netdev->phydev);
- netdev->phydev = NULL;
}
static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
index 8bcd60f17d6d..571e6d4da1e9 100644
--- a/drivers/net/ethernet/microchip/lan966x/Kconfig
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -6,7 +6,6 @@ config LAN966X_SWITCH
depends on NET_SWITCHDEV
depends on BRIDGE || BRIDGE=n
select PHYLINK
- select PACKING
select PAGE_POOL
select VCAP
help
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
index 55b484b10562..2ed76bb61a73 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -517,7 +517,7 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
skb_trim(skb, skb->len - ETH_FCS_LEN);
- lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
+ lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
skb->protocol = eth_type_trans(skb, skb->dev);
if (lan966x->bridge_mask & BIT(src_port)) {
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 685e8cd7658c..9be6462f1cc5 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -7,7 +7,6 @@
#include <linux/ip.h>
#include <linux/of_platform.h>
#include <linux/of_net.h>
-#include <linux/packing.h>
#include <linux/phy/phy.h>
#include <linux/reset.h>
#include <net/addrconf.h>
@@ -305,46 +304,57 @@ err:
return NETDEV_TX_BUSY;
}
+static void lan966x_ifh_set(u8 *ifh, size_t val, size_t pos, size_t length)
+{
+ int i = 0;
+
+ do {
+ u8 p = IFH_LEN_BYTES - (pos + i) / 8 - 1;
+ u8 v = val >> i & 0xff;
+
+ /* There is no need to check for limits of the array, as these
+ * will never be written
+ */
+ ifh[p] |= v << ((pos + i) % 8);
+ ifh[p - 1] |= v >> (8 - (pos + i) % 8);
+
+ i += 8;
+ } while (i < length);
+}
+
void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
{
- packing(ifh, &bypass, IFH_POS_BYPASS + IFH_WID_BYPASS - 1,
- IFH_POS_BYPASS, IFH_LEN * 4, PACK, 0);
+ lan966x_ifh_set(ifh, bypass, IFH_POS_BYPASS, IFH_WID_BYPASS);
}
-void lan966x_ifh_set_port(void *ifh, u64 bypass)
+void lan966x_ifh_set_port(void *ifh, u64 port)
{
- packing(ifh, &bypass, IFH_POS_DSTS + IFH_WID_DSTS - 1,
- IFH_POS_DSTS, IFH_LEN * 4, PACK, 0);
+ lan966x_ifh_set(ifh, port, IFH_POS_DSTS, IFH_WID_DSTS);
}
-static void lan966x_ifh_set_qos_class(void *ifh, u64 bypass)
+static void lan966x_ifh_set_qos_class(void *ifh, u64 qos)
{
- packing(ifh, &bypass, IFH_POS_QOS_CLASS + IFH_WID_QOS_CLASS - 1,
- IFH_POS_QOS_CLASS, IFH_LEN * 4, PACK, 0);
+ lan966x_ifh_set(ifh, qos, IFH_POS_QOS_CLASS, IFH_WID_QOS_CLASS);
}
-static void lan966x_ifh_set_ipv(void *ifh, u64 bypass)
+static void lan966x_ifh_set_ipv(void *ifh, u64 ipv)
{
- packing(ifh, &bypass, IFH_POS_IPV + IFH_WID_IPV - 1,
- IFH_POS_IPV, IFH_LEN * 4, PACK, 0);
+ lan966x_ifh_set(ifh, ipv, IFH_POS_IPV, IFH_WID_IPV);
}
static void lan966x_ifh_set_vid(void *ifh, u64 vid)
{
- packing(ifh, &vid, IFH_POS_TCI + IFH_WID_TCI - 1,
- IFH_POS_TCI, IFH_LEN * 4, PACK, 0);
+ lan966x_ifh_set(ifh, vid, IFH_POS_TCI, IFH_WID_TCI);
}
static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op)
{
- packing(ifh, &rew_op, IFH_POS_REW_CMD + IFH_WID_REW_CMD - 1,
- IFH_POS_REW_CMD, IFH_LEN * 4, PACK, 0);
+ lan966x_ifh_set(ifh, rew_op, IFH_POS_REW_CMD, IFH_WID_REW_CMD);
}
static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
{
- packing(ifh, &timestamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1,
- IFH_POS_TIMESTAMP, IFH_LEN * 4, PACK, 0);
+ lan966x_ifh_set(ifh, timestamp, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP);
}
static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb,
@@ -582,22 +592,38 @@ static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval)
}
}
+static u64 lan966x_ifh_get(u8 *ifh, size_t pos, size_t length)
+{
+ u64 val = 0;
+ u8 v;
+
+ for (int i = 0; i < length ; i++) {
+ int j = pos + i;
+ int k = j % 8;
+
+ if (i == 0 || k == 0)
+ v = ifh[IFH_LEN_BYTES - (j / 8) - 1];
+
+ if (v & (1 << k))
+ val |= (1 << i);
+ }
+
+ return val;
+}
+
void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
{
- packing(ifh, src_port, IFH_POS_SRCPORT + IFH_WID_SRCPORT - 1,
- IFH_POS_SRCPORT, IFH_LEN * 4, UNPACK, 0);
+ *src_port = lan966x_ifh_get(ifh, IFH_POS_SRCPORT, IFH_WID_SRCPORT);
}
static void lan966x_ifh_get_len(void *ifh, u64 *len)
{
- packing(ifh, len, IFH_POS_LEN + IFH_WID_LEN - 1,
- IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0);
+ *len = lan966x_ifh_get(ifh, IFH_POS_LEN, IFH_WID_LEN);
}
void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp)
{
- packing(ifh, timestamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1,
- IFH_POS_TIMESTAMP, IFH_LEN * 4, UNPACK, 0);
+ *timestamp = lan966x_ifh_get(ifh, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP);
}
static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
@@ -668,7 +694,7 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
*buf = val;
}
- lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
+ lan966x_ptp_rxtstamp(lan966x, skb, src_port, timestamp);
skb->protocol = eth_type_trans(skb, dev);
if (lan966x->bridge_mask & BIT(src_port)) {
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 49f5159afbf3..851afb0166b1 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -92,6 +92,11 @@
#define SE_IDX_QUEUE 0 /* 0-79 : Queue scheduler elements */
#define SE_IDX_PORT 80 /* 80-89 : Port schedular elements */
+#define LAN966X_VCAP_CID_IS1_L0 VCAP_CID_INGRESS_L0 /* IS1 lookup 0 */
+#define LAN966X_VCAP_CID_IS1_L1 VCAP_CID_INGRESS_L1 /* IS1 lookup 1 */
+#define LAN966X_VCAP_CID_IS1_L2 VCAP_CID_INGRESS_L2 /* IS1 lookup 2 */
+#define LAN966X_VCAP_CID_IS1_MAX (VCAP_CID_INGRESS_L3 - 1) /* IS1 Max */
+
#define LAN966X_VCAP_CID_IS2_L0 VCAP_CID_INGRESS_STAGE2_L0 /* IS2 lookup 0 */
#define LAN966X_VCAP_CID_IS2_L1 VCAP_CID_INGRESS_STAGE2_L1 /* IS2 lookup 1 */
#define LAN966X_VCAP_CID_IS2_MAX (VCAP_CID_INGRESS_STAGE2_L2 - 1) /* IS2 Max */
@@ -139,6 +144,39 @@ enum vcap_is2_port_sel_ipv6 {
VCAP_IS2_PS_IPV6_MAC_ETYPE,
};
+enum vcap_is1_port_sel_other {
+ VCAP_IS1_PS_OTHER_NORMAL,
+ VCAP_IS1_PS_OTHER_7TUPLE,
+ VCAP_IS1_PS_OTHER_DBL_VID,
+ VCAP_IS1_PS_OTHER_DMAC_VID,
+};
+
+enum vcap_is1_port_sel_ipv4 {
+ VCAP_IS1_PS_IPV4_NORMAL,
+ VCAP_IS1_PS_IPV4_7TUPLE,
+ VCAP_IS1_PS_IPV4_5TUPLE_IP4,
+ VCAP_IS1_PS_IPV4_DBL_VID,
+ VCAP_IS1_PS_IPV4_DMAC_VID,
+};
+
+enum vcap_is1_port_sel_ipv6 {
+ VCAP_IS1_PS_IPV6_NORMAL,
+ VCAP_IS1_PS_IPV6_7TUPLE,
+ VCAP_IS1_PS_IPV6_5TUPLE_IP4,
+ VCAP_IS1_PS_IPV6_NORMAL_IP6,
+ VCAP_IS1_PS_IPV6_5TUPLE_IP6,
+ VCAP_IS1_PS_IPV6_DBL_VID,
+ VCAP_IS1_PS_IPV6_DMAC_VID,
+};
+
+enum vcap_is1_port_sel_rt {
+ VCAP_IS1_PS_RT_NORMAL,
+ VCAP_IS1_PS_RT_7TUPLE,
+ VCAP_IS1_PS_RT_DBL_VID,
+ VCAP_IS1_PS_RT_DMAC_VID,
+ VCAP_IS1_PS_RT_FOLLOW_OTHER = 7,
+};
+
struct lan966x_port;
struct lan966x_db {
@@ -369,7 +407,8 @@ struct lan966x_port {
struct phy *serdes;
struct fwnode_handle *fwnode;
- u8 ptp_cmd;
+ u8 ptp_tx_cmd;
+ bool ptp_rx_cmd;
u16 ts_id;
struct sk_buff_head tx_skbs;
@@ -489,7 +528,7 @@ void lan966x_ptp_deinit(struct lan966x *lan966x);
int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr);
int lan966x_ptp_hwtstamp_get(struct lan966x_port *port, struct ifreq *ifr);
void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
- u64 timestamp);
+ u64 src_port, u64 timestamp);
int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
struct sk_buff *skb);
void lan966x_ptp_txtstamp_release(struct lan966x_port *port,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
index 7d66fe75cd3b..7302df2300fd 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
@@ -49,8 +49,7 @@ static int lan966x_police_add(struct lan966x_port *port,
return 0;
}
-static int lan966x_police_del(struct lan966x_port *port,
- u16 pol_idx)
+static void lan966x_police_del(struct lan966x_port *port, u16 pol_idx)
{
struct lan966x *lan966x = port->lan966x;
@@ -67,8 +66,6 @@ static int lan966x_police_del(struct lan966x_port *port,
lan_wr(ANA_POL_PIR_CFG_PIR_RATE_SET(GENMASK(14, 0)) |
ANA_POL_PIR_CFG_PIR_BURST_SET(0),
lan966x, ANA_POL_PIR_CFG(pol_idx));
-
- return 0;
}
static int lan966x_police_validate(struct lan966x_port *port,
@@ -186,7 +183,6 @@ int lan966x_police_port_del(struct lan966x_port *port,
struct netlink_ext_ack *extack)
{
struct lan966x *lan966x = port->lan966x;
- int err;
if (port->tc.police_id != police_id) {
NL_SET_ERR_MSG_MOD(extack,
@@ -194,12 +190,7 @@ int lan966x_police_port_del(struct lan966x_port *port,
return -EINVAL;
}
- err = lan966x_police_del(port, POL_IDX_PORT + port->chip_port);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack,
- "Failed to add policer to port");
- return err;
- }
+ lan966x_police_del(port, POL_IDX_PORT + port->chip_port);
lan_rmw(ANA_POL_CFG_PORT_POL_ENA_SET(0) |
ANA_POL_CFG_POL_ORDER_SET(POL_ORDER),
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index 931e37b9a0ad..266a21a2d124 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -272,13 +272,13 @@ int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
switch (cfg.tx_type) {
case HWTSTAMP_TX_ON:
- port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ port->ptp_tx_cmd = IFH_REW_OP_TWO_STEP_PTP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
- port->ptp_cmd = IFH_REW_OP_ONE_STEP_PTP;
+ port->ptp_tx_cmd = IFH_REW_OP_ONE_STEP_PTP;
break;
case HWTSTAMP_TX_OFF:
- port->ptp_cmd = IFH_REW_OP_NOOP;
+ port->ptp_tx_cmd = IFH_REW_OP_NOOP;
break;
default:
return -ERANGE;
@@ -286,6 +286,7 @@ int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
+ port->ptp_rx_cmd = false;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
@@ -301,6 +302,7 @@ int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
+ port->ptp_rx_cmd = true;
cfg.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
@@ -332,7 +334,7 @@ static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb)
u8 msgtype;
int type;
- if (port->ptp_cmd == IFH_REW_OP_NOOP)
+ if (port->ptp_tx_cmd == IFH_REW_OP_NOOP)
return IFH_REW_OP_NOOP;
type = ptp_classify_raw(skb);
@@ -343,7 +345,7 @@ static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb)
if (!header)
return IFH_REW_OP_NOOP;
- if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
+ if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP)
return IFH_REW_OP_TWO_STEP_PTP;
/* If it is sync and run 1 step then set the correct operation,
@@ -1009,9 +1011,6 @@ static int lan966x_ptp_phc_init(struct lan966x *lan966x,
phc->index = index;
phc->lan966x = lan966x;
- /* PTP Rx stamping is always enabled. */
- phc->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
-
return 0;
}
@@ -1088,14 +1087,15 @@ void lan966x_ptp_deinit(struct lan966x *lan966x)
}
void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
- u64 timestamp)
+ u64 src_port, u64 timestamp)
{
struct skb_shared_hwtstamps *shhwtstamps;
struct lan966x_phc *phc;
struct timespec64 ts;
u64 full_ts_in_ns;
- if (!lan966x->ptp)
+ if (!lan966x->ptp ||
+ !lan966x->ports[src_port]->ptp_rx_cmd)
return;
phc = &lan966x->phc[LAN966X_PHC_PORT];
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
index 9767b5a1c958..f99f88b5caa8 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
@@ -316,6 +316,42 @@ enum lan966x_target {
#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_GET(x)\
FIELD_GET(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x)
+/* ANA:PORT:VCAP_CFG */
+#define ANA_VCAP_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 12, 0, 1, 4)
+
+#define ANA_VCAP_CFG_S1_ENA BIT(14)
+#define ANA_VCAP_CFG_S1_ENA_SET(x)\
+ FIELD_PREP(ANA_VCAP_CFG_S1_ENA, x)
+#define ANA_VCAP_CFG_S1_ENA_GET(x)\
+ FIELD_GET(ANA_VCAP_CFG_S1_ENA, x)
+
+/* ANA:PORT:VCAP_S1_KEY_CFG */
+#define ANA_VCAP_S1_CFG(g, r) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 16, r, 3, 4)
+
+#define ANA_VCAP_S1_CFG_KEY_RT_CFG GENMASK(11, 9)
+#define ANA_VCAP_S1_CFG_KEY_RT_CFG_SET(x)\
+ FIELD_PREP(ANA_VCAP_S1_CFG_KEY_RT_CFG, x)
+#define ANA_VCAP_S1_CFG_KEY_RT_CFG_GET(x)\
+ FIELD_GET(ANA_VCAP_S1_CFG_KEY_RT_CFG, x)
+
+#define ANA_VCAP_S1_CFG_KEY_IP6_CFG GENMASK(8, 6)
+#define ANA_VCAP_S1_CFG_KEY_IP6_CFG_SET(x)\
+ FIELD_PREP(ANA_VCAP_S1_CFG_KEY_IP6_CFG, x)
+#define ANA_VCAP_S1_CFG_KEY_IP6_CFG_GET(x)\
+ FIELD_GET(ANA_VCAP_S1_CFG_KEY_IP6_CFG, x)
+
+#define ANA_VCAP_S1_CFG_KEY_IP4_CFG GENMASK(5, 3)
+#define ANA_VCAP_S1_CFG_KEY_IP4_CFG_SET(x)\
+ FIELD_PREP(ANA_VCAP_S1_CFG_KEY_IP4_CFG, x)
+#define ANA_VCAP_S1_CFG_KEY_IP4_CFG_GET(x)\
+ FIELD_GET(ANA_VCAP_S1_CFG_KEY_IP4_CFG, x)
+
+#define ANA_VCAP_S1_CFG_KEY_OTHER_CFG GENMASK(2, 0)
+#define ANA_VCAP_S1_CFG_KEY_OTHER_CFG_SET(x)\
+ FIELD_PREP(ANA_VCAP_S1_CFG_KEY_OTHER_CFG, x)
+#define ANA_VCAP_S1_CFG_KEY_OTHER_CFG_GET(x)\
+ FIELD_GET(ANA_VCAP_S1_CFG_KEY_OTHER_CFG, x)
+
/* ANA:PORT:VCAP_S2_CFG */
#define ANA_VCAP_S2_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 28, 0, 1, 4)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
index f960727ecaee..47b2f7579dd2 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
@@ -5,14 +5,34 @@
#include "vcap_api_client.h"
#include "vcap_tc.h"
-static bool lan966x_tc_is_known_etype(u16 etype)
+static bool lan966x_tc_is_known_etype(struct vcap_tc_flower_parse_usage *st,
+ u16 etype)
{
- switch (etype) {
- case ETH_P_ALL:
- case ETH_P_ARP:
- case ETH_P_IP:
- case ETH_P_IPV6:
- return true;
+ switch (st->admin->vtype) {
+ case VCAP_TYPE_IS1:
+ switch (etype) {
+ case ETH_P_ALL:
+ case ETH_P_ARP:
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ return true;
+ }
+ break;
+ case VCAP_TYPE_IS2:
+ switch (etype) {
+ case ETH_P_ALL:
+ case ETH_P_ARP:
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ case ETH_P_SNAP:
+ case ETH_P_802_2:
+ return true;
+ }
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "VCAP type not supported");
+ return false;
}
return false;
@@ -69,7 +89,7 @@ lan966x_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st)
flow_rule_match_basic(st->frule, &match);
if (match.mask->n_proto) {
st->l3_proto = be16_to_cpu(match.key->n_proto);
- if (!lan966x_tc_is_known_etype(st->l3_proto)) {
+ if (!lan966x_tc_is_known_etype(st, st->l3_proto)) {
err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
st->l3_proto, ~0);
if (err)
@@ -79,18 +99,61 @@ lan966x_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st)
VCAP_BIT_1);
if (err)
goto out;
+ } else if (st->l3_proto == ETH_P_IPV6 &&
+ st->admin->vtype == VCAP_TYPE_IS1) {
+ /* Don't set any keys in this case */
+ } else if (st->l3_proto == ETH_P_SNAP &&
+ st->admin->vtype == VCAP_TYPE_IS1) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_ETYPE_LEN_IS,
+ VCAP_BIT_0);
+ if (err)
+ goto out;
+
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_IP_SNAP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else if (st->admin->vtype == VCAP_TYPE_IS1) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_ETYPE_LEN_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
+ st->l3_proto, ~0);
+ if (err)
+ goto out;
}
}
if (match.mask->ip_proto) {
st->l4_proto = match.key->ip_proto;
if (st->l4_proto == IPPROTO_TCP) {
+ if (st->admin->vtype == VCAP_TYPE_IS1) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_UDP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
+
err = vcap_rule_add_key_bit(st->vrule,
VCAP_KF_TCP_IS,
VCAP_BIT_1);
if (err)
goto out;
} else if (st->l4_proto == IPPROTO_UDP) {
+ if (st->admin->vtype == VCAP_TYPE_IS1) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_UDP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
+
err = vcap_rule_add_key_bit(st->vrule,
VCAP_KF_TCP_IS,
VCAP_BIT_0);
@@ -113,11 +176,29 @@ out:
}
static int
+lan966x_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ if (st->admin->vtype != VCAP_TYPE_IS1) {
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "cvlan not supported in this VCAP");
+ return -EINVAL;
+ }
+
+ return vcap_tc_flower_handler_cvlan_usage(st);
+}
+
+static int
lan966x_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st)
{
- return vcap_tc_flower_handler_vlan_usage(st,
- VCAP_KF_8021Q_VID_CLS,
- VCAP_KF_8021Q_PCP_CLS);
+ enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
+ enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
+
+ if (st->admin->vtype == VCAP_TYPE_IS1) {
+ vid_key = VCAP_KF_8021Q_VID0;
+ pcp_key = VCAP_KF_8021Q_PCP0;
+ }
+
+ return vcap_tc_flower_handler_vlan_usage(st, vid_key, pcp_key);
}
static int
@@ -128,6 +209,7 @@ static int
[FLOW_DISSECTOR_KEY_CONTROL] = lan966x_tc_flower_handler_control_usage,
[FLOW_DISSECTOR_KEY_PORTS] = vcap_tc_flower_handler_portnum_usage,
[FLOW_DISSECTOR_KEY_BASIC] = lan966x_tc_flower_handler_basic_usage,
+ [FLOW_DISSECTOR_KEY_CVLAN] = lan966x_tc_flower_handler_cvlan_usage,
[FLOW_DISSECTOR_KEY_VLAN] = lan966x_tc_flower_handler_vlan_usage,
[FLOW_DISSECTOR_KEY_TCP] = vcap_tc_flower_handler_tcp_usage,
[FLOW_DISSECTOR_KEY_ARP] = vcap_tc_flower_handler_arp_usage,
@@ -143,6 +225,7 @@ static int lan966x_tc_flower_use_dissectors(struct flow_cls_offload *f,
.fco = f,
.vrule = vrule,
.l3_proto = ETH_P_ALL,
+ .admin = admin,
};
int err = 0;
@@ -221,6 +304,100 @@ static int lan966x_tc_flower_action_check(struct vcap_control *vctrl,
return 0;
}
+/* Add the actionset that is the default for the VCAP type */
+static int lan966x_tc_set_actionset(struct vcap_admin *admin,
+ struct vcap_rule *vrule)
+{
+ enum vcap_actionfield_set aset;
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS1:
+ aset = VCAP_AFS_S1;
+ break;
+ case VCAP_TYPE_IS2:
+ aset = VCAP_AFS_BASE_TYPE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Do not overwrite any current actionset */
+ if (vrule->actionset == VCAP_AFS_NO_VALUE)
+ err = vcap_set_rule_set_actionset(vrule, aset);
+
+ return err;
+}
+
+static int lan966x_tc_add_rule_link_target(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ int target_cid)
+{
+ int link_val = target_cid % VCAP_CID_LOOKUP_SIZE;
+ int err;
+
+ if (!link_val)
+ return 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS1:
+ /* Choose IS1 specific NXT_IDX key (for chaining rules from IS1) */
+ err = vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX_SEL,
+ 1, ~0);
+ if (err)
+ return err;
+
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX,
+ link_val, ~0);
+ case VCAP_TYPE_IS2:
+ /* Add IS2 specific PAG key (for chaining rules from IS1) */
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_PAG,
+ link_val, ~0);
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int lan966x_tc_add_rule_link(struct vcap_control *vctrl,
+ struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *f,
+ int to_cid)
+{
+ struct vcap_admin *to_admin = vcap_find_admin(vctrl, to_cid);
+ int diff, err = 0;
+
+ if (!to_admin) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unknown destination chain");
+ return -EINVAL;
+ }
+
+ diff = vcap_chain_offset(vctrl, f->common.chain_index, to_cid);
+ if (!diff)
+ return 0;
+
+ /* Between IS1 and IS2 the PAG value is used */
+ if (admin->vtype == VCAP_TYPE_IS1 && to_admin->vtype == VCAP_TYPE_IS2) {
+ /* This works for IS1->IS2 */
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_PAG_VAL, diff);
+ if (err)
+ return err;
+
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_PAG_OVERRIDE_MASK,
+ 0xff);
+ if (err)
+ return err;
+ } else {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported chain destination");
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
static int lan966x_tc_flower_add(struct lan966x_port *port,
struct flow_cls_offload *f,
struct vcap_admin *admin,
@@ -248,11 +425,23 @@ static int lan966x_tc_flower_add(struct lan966x_port *port,
if (err)
goto out;
+ err = lan966x_tc_add_rule_link_target(admin, vrule,
+ f->common.chain_index);
+ if (err)
+ goto out;
+
frule = flow_cls_offload_flow_rule(f);
flow_action_for_each(idx, act, &frule->action) {
switch (act->id) {
case FLOW_ACTION_TRAP:
+ if (admin->vtype != VCAP_TYPE_IS2) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Trap action not supported in this VCAP");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
err = vcap_rule_add_action_bit(vrule,
VCAP_AF_CPU_COPY_ENA,
VCAP_BIT_1);
@@ -266,6 +455,16 @@ static int lan966x_tc_flower_add(struct lan966x_port *port,
break;
case FLOW_ACTION_GOTO:
+ err = lan966x_tc_set_actionset(admin, vrule);
+ if (err)
+ goto out;
+
+ err = lan966x_tc_add_rule_link(port->lan966x->vcap_ctrl,
+ admin, vrule,
+ f, act->chain_index);
+ if (err)
+ goto out;
+
break;
default:
NL_SET_ERR_MSG_MOD(f->common.extack,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c
index 928e711960e6..66400a082d02 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c
@@ -6,6 +6,965 @@
#include "lan966x_vcap_ag_api.h"
/* keyfields */
+static const struct vcap_field is1_normal_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 19,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 31,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 32,
+ .width = 3,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 35,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 100,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 101,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 102,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 103,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 104,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 105,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 111,
+ .width = 32,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 143,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 144,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 145,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 161,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is1_5tuple_ip4_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 19,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 31,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 32,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 35,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 36,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 48,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 49,
+ .width = 3,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 52,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 54,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 56,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 62,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 94,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 126,
+ .width = 8,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 134,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 135,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 136,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 144,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is1_normal_ip6_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 32,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 33,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 36,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 3,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 53,
+ .width = 48,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 101,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 107,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 235,
+ .width = 8,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 243,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 244,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_S1_IP6] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 252,
+ .width = 112,
+ },
+};
+
+static const struct vcap_field is1_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 32,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 33,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 36,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 53,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 101,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 149,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 150,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 166,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 167,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 168,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 169,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 170,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 171,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP6_DIP_MSB] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 177,
+ .width = 16,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 193,
+ .width = 64,
+ },
+ [VCAP_KF_L3_IP6_SIP_MSB] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 257,
+ .width = 16,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 273,
+ .width = 64,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 337,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 338,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 339,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 355,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is1_5tuple_ip6_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 32,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 33,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 36,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 53,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 59,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 187,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 315,
+ .width = 8,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 323,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 324,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 332,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is1_dbl_vid_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 9,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 17,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 32,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 33,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 36,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 37,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 3,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 53,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 54,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 70,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 71,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 72,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAG_OFS_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 73,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 74,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 75,
+ .width = 6,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 82,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field is1_rt_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 3,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 6,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 7,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 9,
+ .width = 48,
+ },
+ [VCAP_KF_RT_VLAN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 57,
+ .width = 3,
+ },
+ [VCAP_KF_RT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 60,
+ .width = 2,
+ },
+ [VCAP_KF_RT_FRMID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 62,
+ .width = 32,
+ },
+};
+
+static const struct vcap_field is1_dmac_vid_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_INDEX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 9,
+ },
+ [VCAP_KF_8021CB_R_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 29,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 3,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 33,
+ .width = 48,
+ },
+};
+
static const struct vcap_field is2_mac_etype_keyfield[] = {
[VCAP_KF_TYPE] = {
.type = VCAP_FIELD_U32,
@@ -1163,6 +2122,49 @@ static const struct vcap_field is2_smac_sip6_keyfield[] = {
};
/* keyfield_set */
+static const struct vcap_set is1_keyfield_set[] = {
+ [VCAP_KFS_NORMAL] = {
+ .type_id = 0,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_5TUPLE_IP4] = {
+ .type_id = 1,
+ .sw_per_item = 2,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_NORMAL_IP6] = {
+ .type_id = 0,
+ .sw_per_item = 4,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_7TUPLE] = {
+ .type_id = 1,
+ .sw_per_item = 4,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_5TUPLE_IP6] = {
+ .type_id = 2,
+ .sw_per_item = 4,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_DBL_VID] = {
+ .type_id = 0,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+ [VCAP_KFS_RT] = {
+ .type_id = 1,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+ [VCAP_KFS_DMAC_VID] = {
+ .type_id = 2,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+};
+
static const struct vcap_set is2_keyfield_set[] = {
[VCAP_KFS_MAC_ETYPE] = {
.type_id = 0,
@@ -1227,6 +2229,17 @@ static const struct vcap_set is2_keyfield_set[] = {
};
/* keyfield_set map */
+static const struct vcap_field *is1_keyfield_set_map[] = {
+ [VCAP_KFS_NORMAL] = is1_normal_keyfield,
+ [VCAP_KFS_5TUPLE_IP4] = is1_5tuple_ip4_keyfield,
+ [VCAP_KFS_NORMAL_IP6] = is1_normal_ip6_keyfield,
+ [VCAP_KFS_7TUPLE] = is1_7tuple_keyfield,
+ [VCAP_KFS_5TUPLE_IP6] = is1_5tuple_ip6_keyfield,
+ [VCAP_KFS_DBL_VID] = is1_dbl_vid_keyfield,
+ [VCAP_KFS_RT] = is1_rt_keyfield,
+ [VCAP_KFS_DMAC_VID] = is1_dmac_vid_keyfield,
+};
+
static const struct vcap_field *is2_keyfield_set_map[] = {
[VCAP_KFS_MAC_ETYPE] = is2_mac_etype_keyfield,
[VCAP_KFS_MAC_LLC] = is2_mac_llc_keyfield,
@@ -1243,6 +2256,17 @@ static const struct vcap_field *is2_keyfield_set_map[] = {
};
/* keyfield_set map sizes */
+static int is1_keyfield_set_map_size[] = {
+ [VCAP_KFS_NORMAL] = ARRAY_SIZE(is1_normal_keyfield),
+ [VCAP_KFS_5TUPLE_IP4] = ARRAY_SIZE(is1_5tuple_ip4_keyfield),
+ [VCAP_KFS_NORMAL_IP6] = ARRAY_SIZE(is1_normal_ip6_keyfield),
+ [VCAP_KFS_7TUPLE] = ARRAY_SIZE(is1_7tuple_keyfield),
+ [VCAP_KFS_5TUPLE_IP6] = ARRAY_SIZE(is1_5tuple_ip6_keyfield),
+ [VCAP_KFS_DBL_VID] = ARRAY_SIZE(is1_dbl_vid_keyfield),
+ [VCAP_KFS_RT] = ARRAY_SIZE(is1_rt_keyfield),
+ [VCAP_KFS_DMAC_VID] = ARRAY_SIZE(is1_dmac_vid_keyfield),
+};
+
static int is2_keyfield_set_map_size[] = {
[VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(is2_mac_etype_keyfield),
[VCAP_KFS_MAC_LLC] = ARRAY_SIZE(is2_mac_llc_keyfield),
@@ -1259,6 +2283,154 @@ static int is2_keyfield_set_map_size[] = {
};
/* actionfields */
+static const struct vcap_field is1_s1_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 6,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 8,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 9,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 22,
+ .width = 8,
+ },
+ [VCAP_AF_ISDX_REPLACE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 30,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_ADD_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 31,
+ .width = 8,
+ },
+ [VCAP_AF_VID_REPLACE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 39,
+ .width = 1,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 40,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 67,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 71,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 72,
+ .width = 1,
+ },
+ [VCAP_AF_VLAN_POP_CNT_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 73,
+ .width = 1,
+ },
+ [VCAP_AF_VLAN_POP_CNT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 74,
+ .width = 2,
+ },
+ [VCAP_AF_CUSTOM_ACE_TYPE_ENA] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 76,
+ .width = 4,
+ },
+ [VCAP_AF_SFID_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 80,
+ .width = 1,
+ },
+ [VCAP_AF_SFID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 81,
+ .width = 8,
+ },
+ [VCAP_AF_SGID_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 89,
+ .width = 1,
+ },
+ [VCAP_AF_SGID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 90,
+ .width = 8,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 98,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 99,
+ .width = 9,
+ },
+ [VCAP_AF_OAM_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 108,
+ .width = 3,
+ },
+ [VCAP_AF_MRP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 111,
+ .width = 2,
+ },
+ [VCAP_AF_DLR_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 113,
+ .width = 2,
+ },
+};
+
static const struct vcap_field is2_base_type_actionfield[] = {
[VCAP_AF_HIT_ME_ONCE] = {
.type = VCAP_FIELD_BIT,
@@ -1351,6 +2523,14 @@ static const struct vcap_field is2_smac_sip_actionfield[] = {
};
/* actionfield_set */
+static const struct vcap_set is1_actionfield_set[] = {
+ [VCAP_AFS_S1] = {
+ .type_id = 0,
+ .sw_per_item = 1,
+ .sw_cnt = 4,
+ },
+};
+
static const struct vcap_set is2_actionfield_set[] = {
[VCAP_AFS_BASE_TYPE] = {
.type_id = -1,
@@ -1365,18 +2545,73 @@ static const struct vcap_set is2_actionfield_set[] = {
};
/* actionfield_set map */
+static const struct vcap_field *is1_actionfield_set_map[] = {
+ [VCAP_AFS_S1] = is1_s1_actionfield,
+};
+
static const struct vcap_field *is2_actionfield_set_map[] = {
[VCAP_AFS_BASE_TYPE] = is2_base_type_actionfield,
[VCAP_AFS_SMAC_SIP] = is2_smac_sip_actionfield,
};
/* actionfield_set map size */
+static int is1_actionfield_set_map_size[] = {
+ [VCAP_AFS_S1] = ARRAY_SIZE(is1_s1_actionfield),
+};
+
static int is2_actionfield_set_map_size[] = {
[VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(is2_base_type_actionfield),
[VCAP_AFS_SMAC_SIP] = ARRAY_SIZE(is2_smac_sip_actionfield),
};
/* Type Groups */
+static const struct vcap_typegroup is1_x4_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 96,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 192,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 288,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is1_x2_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 96,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is1_x1_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
static const struct vcap_typegroup is2_x4_keyfield_set_typegroups[] = {
{
.offset = 0,
@@ -1424,6 +2659,13 @@ static const struct vcap_typegroup is2_x1_keyfield_set_typegroups[] = {
{}
};
+static const struct vcap_typegroup *is1_keyfield_set_typegroups[] = {
+ [4] = is1_x4_keyfield_set_typegroups,
+ [2] = is1_x2_keyfield_set_typegroups,
+ [1] = is1_x1_keyfield_set_typegroups,
+ [5] = NULL,
+};
+
static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = {
[4] = is2_x4_keyfield_set_typegroups,
[2] = is2_x2_keyfield_set_typegroups,
@@ -1431,6 +2673,10 @@ static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = {
[5] = NULL,
};
+static const struct vcap_typegroup is1_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
static const struct vcap_typegroup is2_x2_actionfield_set_typegroups[] = {
{
.offset = 0,
@@ -1454,6 +2700,11 @@ static const struct vcap_typegroup is2_x1_actionfield_set_typegroups[] = {
{}
};
+static const struct vcap_typegroup *is1_actionfield_set_typegroups[] = {
+ [1] = is1_x1_actionfield_set_typegroups,
+ [5] = NULL,
+};
+
static const struct vcap_typegroup *is2_actionfield_set_typegroups[] = {
[2] = is2_x2_actionfield_set_typegroups,
[1] = is2_x1_actionfield_set_typegroups,
@@ -1463,16 +2714,33 @@ static const struct vcap_typegroup *is2_actionfield_set_typegroups[] = {
/* Keyfieldset names */
static const char * const vcap_keyfield_set_names[] = {
[VCAP_KFS_NO_VALUE] = "(None)",
+ [VCAP_KFS_5TUPLE_IP4] = "VCAP_KFS_5TUPLE_IP4",
+ [VCAP_KFS_5TUPLE_IP6] = "VCAP_KFS_5TUPLE_IP6",
+ [VCAP_KFS_7TUPLE] = "VCAP_KFS_7TUPLE",
[VCAP_KFS_ARP] = "VCAP_KFS_ARP",
+ [VCAP_KFS_DBL_VID] = "VCAP_KFS_DBL_VID",
+ [VCAP_KFS_DMAC_VID] = "VCAP_KFS_DMAC_VID",
+ [VCAP_KFS_ETAG] = "VCAP_KFS_ETAG",
[VCAP_KFS_IP4_OTHER] = "VCAP_KFS_IP4_OTHER",
[VCAP_KFS_IP4_TCP_UDP] = "VCAP_KFS_IP4_TCP_UDP",
+ [VCAP_KFS_IP4_VID] = "VCAP_KFS_IP4_VID",
[VCAP_KFS_IP6_OTHER] = "VCAP_KFS_IP6_OTHER",
[VCAP_KFS_IP6_STD] = "VCAP_KFS_IP6_STD",
[VCAP_KFS_IP6_TCP_UDP] = "VCAP_KFS_IP6_TCP_UDP",
+ [VCAP_KFS_IP6_VID] = "VCAP_KFS_IP6_VID",
+ [VCAP_KFS_IP_7TUPLE] = "VCAP_KFS_IP_7TUPLE",
+ [VCAP_KFS_ISDX] = "VCAP_KFS_ISDX",
+ [VCAP_KFS_LL_FULL] = "VCAP_KFS_LL_FULL",
[VCAP_KFS_MAC_ETYPE] = "VCAP_KFS_MAC_ETYPE",
[VCAP_KFS_MAC_LLC] = "VCAP_KFS_MAC_LLC",
[VCAP_KFS_MAC_SNAP] = "VCAP_KFS_MAC_SNAP",
+ [VCAP_KFS_NORMAL] = "VCAP_KFS_NORMAL",
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = "VCAP_KFS_NORMAL_5TUPLE_IP4",
+ [VCAP_KFS_NORMAL_7TUPLE] = "VCAP_KFS_NORMAL_7TUPLE",
+ [VCAP_KFS_NORMAL_IP6] = "VCAP_KFS_NORMAL_IP6",
[VCAP_KFS_OAM] = "VCAP_KFS_OAM",
+ [VCAP_KFS_PURE_5TUPLE_IP4] = "VCAP_KFS_PURE_5TUPLE_IP4",
+ [VCAP_KFS_RT] = "VCAP_KFS_RT",
[VCAP_KFS_SMAC_SIP4] = "VCAP_KFS_SMAC_SIP4",
[VCAP_KFS_SMAC_SIP6] = "VCAP_KFS_SMAC_SIP6",
};
@@ -1481,16 +2749,42 @@ static const char * const vcap_keyfield_set_names[] = {
static const char * const vcap_actionfield_set_names[] = {
[VCAP_AFS_NO_VALUE] = "(None)",
[VCAP_AFS_BASE_TYPE] = "VCAP_AFS_BASE_TYPE",
+ [VCAP_AFS_CLASSIFICATION] = "VCAP_AFS_CLASSIFICATION",
+ [VCAP_AFS_CLASS_REDUCED] = "VCAP_AFS_CLASS_REDUCED",
+ [VCAP_AFS_FULL] = "VCAP_AFS_FULL",
+ [VCAP_AFS_S1] = "VCAP_AFS_S1",
[VCAP_AFS_SMAC_SIP] = "VCAP_AFS_SMAC_SIP",
};
/* Keyfield names */
static const char * const vcap_keyfield_names[] = {
[VCAP_KF_NO_VALUE] = "(None)",
+ [VCAP_KF_8021BR_ECID_BASE] = "8021BR_ECID_BASE",
+ [VCAP_KF_8021BR_ECID_EXT] = "8021BR_ECID_EXT",
+ [VCAP_KF_8021BR_E_TAGGED] = "8021BR_E_TAGGED",
+ [VCAP_KF_8021BR_GRP] = "8021BR_GRP",
+ [VCAP_KF_8021BR_IGR_ECID_BASE] = "8021BR_IGR_ECID_BASE",
+ [VCAP_KF_8021BR_IGR_ECID_EXT] = "8021BR_IGR_ECID_EXT",
+ [VCAP_KF_8021CB_R_TAGGED_IS] = "8021CB_R_TAGGED_IS",
+ [VCAP_KF_8021Q_DEI0] = "8021Q_DEI0",
+ [VCAP_KF_8021Q_DEI1] = "8021Q_DEI1",
+ [VCAP_KF_8021Q_DEI2] = "8021Q_DEI2",
[VCAP_KF_8021Q_DEI_CLS] = "8021Q_DEI_CLS",
+ [VCAP_KF_8021Q_PCP0] = "8021Q_PCP0",
+ [VCAP_KF_8021Q_PCP1] = "8021Q_PCP1",
+ [VCAP_KF_8021Q_PCP2] = "8021Q_PCP2",
[VCAP_KF_8021Q_PCP_CLS] = "8021Q_PCP_CLS",
+ [VCAP_KF_8021Q_TPID0] = "8021Q_TPID0",
+ [VCAP_KF_8021Q_TPID1] = "8021Q_TPID1",
+ [VCAP_KF_8021Q_TPID2] = "8021Q_TPID2",
+ [VCAP_KF_8021Q_VID0] = "8021Q_VID0",
+ [VCAP_KF_8021Q_VID1] = "8021Q_VID1",
+ [VCAP_KF_8021Q_VID2] = "8021Q_VID2",
[VCAP_KF_8021Q_VID_CLS] = "8021Q_VID_CLS",
+ [VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS] = "8021Q_VLAN_DBL_TAGGED_IS",
[VCAP_KF_8021Q_VLAN_TAGGED_IS] = "8021Q_VLAN_TAGGED_IS",
+ [VCAP_KF_8021Q_VLAN_TAGS] = "8021Q_VLAN_TAGS",
+ [VCAP_KF_ACL_GRP_ID] = "ACL_GRP_ID",
[VCAP_KF_ARP_ADDR_SPACE_OK_IS] = "ARP_ADDR_SPACE_OK_IS",
[VCAP_KF_ARP_LEN_OK_IS] = "ARP_LEN_OK_IS",
[VCAP_KF_ARP_OPCODE] = "ARP_OPCODE",
@@ -1498,32 +2792,57 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_ARP_PROTO_SPACE_OK_IS] = "ARP_PROTO_SPACE_OK_IS",
[VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS",
[VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS",
+ [VCAP_KF_COSID_CLS] = "COSID_CLS",
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = "ES0_ISDX_KEY_ENA",
[VCAP_KF_ETYPE] = "ETYPE",
+ [VCAP_KF_ETYPE_LEN_IS] = "ETYPE_LEN_IS",
[VCAP_KF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_KF_IF_EGR_PORT_MASK] = "IF_EGR_PORT_MASK",
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = "IF_EGR_PORT_MASK_RNG",
[VCAP_KF_IF_IGR_PORT] = "IF_IGR_PORT",
[VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK",
+ [VCAP_KF_IF_IGR_PORT_MASK_L3] = "IF_IGR_PORT_MASK_L3",
+ [VCAP_KF_IF_IGR_PORT_MASK_RNG] = "IF_IGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = "IF_IGR_PORT_MASK_SEL",
+ [VCAP_KF_IF_IGR_PORT_SEL] = "IF_IGR_PORT_SEL",
[VCAP_KF_IP4_IS] = "IP4_IS",
+ [VCAP_KF_IP_MC_IS] = "IP_MC_IS",
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = "IP_PAYLOAD_5TUPLE",
+ [VCAP_KF_IP_PAYLOAD_S1_IP6] = "IP_PAYLOAD_S1_IP6",
+ [VCAP_KF_IP_SNAP_IS] = "IP_SNAP_IS",
+ [VCAP_KF_ISDX_CLS] = "ISDX_CLS",
[VCAP_KF_ISDX_GT0_IS] = "ISDX_GT0_IS",
[VCAP_KF_L2_BC_IS] = "L2_BC_IS",
[VCAP_KF_L2_DMAC] = "L2_DMAC",
[VCAP_KF_L2_FRM_TYPE] = "L2_FRM_TYPE",
+ [VCAP_KF_L2_FWD_IS] = "L2_FWD_IS",
[VCAP_KF_L2_LLC] = "L2_LLC",
+ [VCAP_KF_L2_MAC] = "L2_MAC",
[VCAP_KF_L2_MC_IS] = "L2_MC_IS",
[VCAP_KF_L2_PAYLOAD0] = "L2_PAYLOAD0",
[VCAP_KF_L2_PAYLOAD1] = "L2_PAYLOAD1",
[VCAP_KF_L2_PAYLOAD2] = "L2_PAYLOAD2",
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = "L2_PAYLOAD_ETYPE",
[VCAP_KF_L2_SMAC] = "L2_SMAC",
[VCAP_KF_L2_SNAP] = "L2_SNAP",
[VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS",
+ [VCAP_KF_L3_DPL_CLS] = "L3_DPL_CLS",
+ [VCAP_KF_L3_DSCP] = "L3_DSCP",
+ [VCAP_KF_L3_DST_IS] = "L3_DST_IS",
[VCAP_KF_L3_FRAGMENT] = "L3_FRAGMENT",
+ [VCAP_KF_L3_FRAGMENT_TYPE] = "L3_FRAGMENT_TYPE",
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = "L3_FRAG_INVLD_L4_LEN",
[VCAP_KF_L3_FRAG_OFS_GT0] = "L3_FRAG_OFS_GT0",
[VCAP_KF_L3_IP4_DIP] = "L3_IP4_DIP",
[VCAP_KF_L3_IP4_SIP] = "L3_IP4_SIP",
[VCAP_KF_L3_IP6_DIP] = "L3_IP6_DIP",
+ [VCAP_KF_L3_IP6_DIP_MSB] = "L3_IP6_DIP_MSB",
[VCAP_KF_L3_IP6_SIP] = "L3_IP6_SIP",
+ [VCAP_KF_L3_IP6_SIP_MSB] = "L3_IP6_SIP_MSB",
[VCAP_KF_L3_IP_PROTO] = "L3_IP_PROTO",
[VCAP_KF_L3_OPTIONS_IS] = "L3_OPTIONS_IS",
[VCAP_KF_L3_PAYLOAD] = "L3_PAYLOAD",
+ [VCAP_KF_L3_RT_IS] = "L3_RT_IS",
[VCAP_KF_L3_TOS] = "L3_TOS",
[VCAP_KF_L3_TTL_GT0] = "L3_TTL_GT0",
[VCAP_KF_L4_1588_DOM] = "L4_1588_DOM",
@@ -1531,6 +2850,7 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_L4_ACK] = "L4_ACK",
[VCAP_KF_L4_DPORT] = "L4_DPORT",
[VCAP_KF_L4_FIN] = "L4_FIN",
+ [VCAP_KF_L4_PAYLOAD] = "L4_PAYLOAD",
[VCAP_KF_L4_PSH] = "L4_PSH",
[VCAP_KF_L4_RNG] = "L4_RNG",
[VCAP_KF_L4_RST] = "L4_RST",
@@ -1540,7 +2860,11 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_L4_SYN] = "L4_SYN",
[VCAP_KF_L4_URG] = "L4_URG",
[VCAP_KF_LOOKUP_FIRST_IS] = "LOOKUP_FIRST_IS",
+ [VCAP_KF_LOOKUP_GEN_IDX] = "LOOKUP_GEN_IDX",
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = "LOOKUP_GEN_IDX_SEL",
+ [VCAP_KF_LOOKUP_INDEX] = "LOOKUP_INDEX",
[VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG",
+ [VCAP_KF_MIRROR_PROBE] = "MIRROR_PROBE",
[VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0",
[VCAP_KF_OAM_DETECTED] = "OAM_DETECTED",
[VCAP_KF_OAM_FLAGS] = "OAM_FLAGS",
@@ -1549,7 +2873,12 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_OAM_OPCODE] = "OAM_OPCODE",
[VCAP_KF_OAM_VER] = "OAM_VER",
[VCAP_KF_OAM_Y1731_IS] = "OAM_Y1731_IS",
+ [VCAP_KF_PROT_ACTIVE] = "PROT_ACTIVE",
+ [VCAP_KF_RT_FRMID] = "RT_FRMID",
+ [VCAP_KF_RT_TYPE] = "RT_TYPE",
+ [VCAP_KF_RT_VLAN_IDX] = "RT_VLAN_IDX",
[VCAP_KF_TCP_IS] = "TCP_IS",
+ [VCAP_KF_TCP_UDP_IS] = "TCP_UDP_IS",
[VCAP_KF_TYPE] = "TYPE",
};
@@ -1557,24 +2886,95 @@ static const char * const vcap_keyfield_names[] = {
static const char * const vcap_actionfield_names[] = {
[VCAP_AF_NO_VALUE] = "(None)",
[VCAP_AF_ACL_ID] = "ACL_ID",
+ [VCAP_AF_CLS_VID_SEL] = "CLS_VID_SEL",
+ [VCAP_AF_CNT_ID] = "CNT_ID",
+ [VCAP_AF_COPY_PORT_NUM] = "COPY_PORT_NUM",
+ [VCAP_AF_COPY_QUEUE_NUM] = "COPY_QUEUE_NUM",
[VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA",
[VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM",
+ [VCAP_AF_CUSTOM_ACE_TYPE_ENA] = "CUSTOM_ACE_TYPE_ENA",
+ [VCAP_AF_DEI_ENA] = "DEI_ENA",
+ [VCAP_AF_DEI_VAL] = "DEI_VAL",
+ [VCAP_AF_DLR_SEL] = "DLR_SEL",
+ [VCAP_AF_DP_ENA] = "DP_ENA",
+ [VCAP_AF_DP_VAL] = "DP_VAL",
+ [VCAP_AF_DSCP_ENA] = "DSCP_ENA",
+ [VCAP_AF_DSCP_VAL] = "DSCP_VAL",
+ [VCAP_AF_ES2_REW_CMD] = "ES2_REW_CMD",
[VCAP_AF_FWD_KILL_ENA] = "FWD_KILL_ENA",
+ [VCAP_AF_FWD_MODE] = "FWD_MODE",
[VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE",
[VCAP_AF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = "IGNORE_PIPELINE_CTRL",
+ [VCAP_AF_INTR_ENA] = "INTR_ENA",
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = "ISDX_ADD_REPLACE_SEL",
+ [VCAP_AF_ISDX_ADD_VAL] = "ISDX_ADD_VAL",
[VCAP_AF_ISDX_ENA] = "ISDX_ENA",
+ [VCAP_AF_ISDX_REPLACE_ENA] = "ISDX_REPLACE_ENA",
+ [VCAP_AF_ISDX_VAL] = "ISDX_VAL",
[VCAP_AF_LRN_DIS] = "LRN_DIS",
+ [VCAP_AF_MAP_IDX] = "MAP_IDX",
+ [VCAP_AF_MAP_KEY] = "MAP_KEY",
+ [VCAP_AF_MAP_LOOKUP_SEL] = "MAP_LOOKUP_SEL",
[VCAP_AF_MASK_MODE] = "MASK_MODE",
+ [VCAP_AF_MATCH_ID] = "MATCH_ID",
+ [VCAP_AF_MATCH_ID_MASK] = "MATCH_ID_MASK",
[VCAP_AF_MIRROR_ENA] = "MIRROR_ENA",
+ [VCAP_AF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_AF_MIRROR_PROBE_ID] = "MIRROR_PROBE_ID",
+ [VCAP_AF_MRP_SEL] = "MRP_SEL",
+ [VCAP_AF_NXT_IDX] = "NXT_IDX",
+ [VCAP_AF_NXT_IDX_CTRL] = "NXT_IDX_CTRL",
+ [VCAP_AF_OAM_SEL] = "OAM_SEL",
+ [VCAP_AF_PAG_OVERRIDE_MASK] = "PAG_OVERRIDE_MASK",
+ [VCAP_AF_PAG_VAL] = "PAG_VAL",
+ [VCAP_AF_PCP_ENA] = "PCP_ENA",
+ [VCAP_AF_PCP_VAL] = "PCP_VAL",
+ [VCAP_AF_PIPELINE_FORCE_ENA] = "PIPELINE_FORCE_ENA",
+ [VCAP_AF_PIPELINE_PT] = "PIPELINE_PT",
[VCAP_AF_POLICE_ENA] = "POLICE_ENA",
[VCAP_AF_POLICE_IDX] = "POLICE_IDX",
+ [VCAP_AF_POLICE_REMARK] = "POLICE_REMARK",
[VCAP_AF_POLICE_VCAP_ONLY] = "POLICE_VCAP_ONLY",
[VCAP_AF_PORT_MASK] = "PORT_MASK",
+ [VCAP_AF_QOS_ENA] = "QOS_ENA",
+ [VCAP_AF_QOS_VAL] = "QOS_VAL",
[VCAP_AF_REW_OP] = "REW_OP",
+ [VCAP_AF_RT_DIS] = "RT_DIS",
+ [VCAP_AF_SFID_ENA] = "SFID_ENA",
+ [VCAP_AF_SFID_VAL] = "SFID_VAL",
+ [VCAP_AF_SGID_ENA] = "SGID_ENA",
+ [VCAP_AF_SGID_VAL] = "SGID_VAL",
+ [VCAP_AF_TYPE] = "TYPE",
+ [VCAP_AF_VID_REPLACE_ENA] = "VID_REPLACE_ENA",
+ [VCAP_AF_VID_VAL] = "VID_VAL",
+ [VCAP_AF_VLAN_POP_CNT] = "VLAN_POP_CNT",
+ [VCAP_AF_VLAN_POP_CNT_ENA] = "VLAN_POP_CNT_ENA",
};
/* VCAPs */
const struct vcap_info lan966x_vcaps[] = {
+ [VCAP_TYPE_IS1] = {
+ .name = "is1",
+ .rows = 192,
+ .sw_count = 4,
+ .sw_width = 96,
+ .sticky_width = 32,
+ .act_width = 123,
+ .default_cnt = 0,
+ .require_cnt_dis = 1,
+ .version = 1,
+ .keyfield_set = is1_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is1_keyfield_set),
+ .actionfield_set = is1_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is1_actionfield_set),
+ .keyfield_set_map = is1_keyfield_set_map,
+ .keyfield_set_map_size = is1_keyfield_set_map_size,
+ .actionfield_set_map = is1_actionfield_set_map,
+ .actionfield_set_map_size = is1_actionfield_set_map_size,
+ .keyfield_set_typegroups = is1_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is1_actionfield_set_typegroups,
+ },
[VCAP_TYPE_IS2] = {
.name = "is2",
.rows = 64,
@@ -1600,7 +3000,7 @@ const struct vcap_info lan966x_vcaps[] = {
const struct vcap_statistics lan966x_vcap_stats = {
.name = "lan966x",
- .count = 1,
+ .count = 2,
.keyfield_set_names = vcap_keyfield_set_names,
.actionfield_set_names = vcap_actionfield_set_names,
.keyfield_names = vcap_keyfield_names,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
index 7a0db58f5513..d90c08cfcf14 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
@@ -5,9 +5,124 @@
#include "vcap_api.h"
#include "vcap_api_client.h"
-static void lan966x_vcap_port_keys(struct lan966x_port *port,
- struct vcap_admin *admin,
- struct vcap_output_print *out)
+static void lan966x_vcap_is1_port_keys(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ out->prf(out->dst, " port[%d] (%s): ", port->chip_port,
+ netdev_name(port->dev));
+
+ val = lan_rd(lan966x, ANA_VCAP_CFG(port->chip_port));
+ out->prf(out->dst, "\n state: ");
+ if (ANA_VCAP_CFG_S1_ENA_GET(val))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ for (int l = 0; l < admin->lookups; ++l) {
+ out->prf(out->dst, "\n Lookup %d: ", l);
+
+ out->prf(out->dst, "\n other: ");
+ switch (ANA_VCAP_S1_CFG_KEY_OTHER_CFG_GET(val)) {
+ case VCAP_IS1_PS_OTHER_NORMAL:
+ out->prf(out->dst, "normal");
+ break;
+ case VCAP_IS1_PS_OTHER_7TUPLE:
+ out->prf(out->dst, "7tuple");
+ break;
+ case VCAP_IS1_PS_OTHER_DBL_VID:
+ out->prf(out->dst, "dbl_vid");
+ break;
+ case VCAP_IS1_PS_OTHER_DMAC_VID:
+ out->prf(out->dst, "dmac_vid");
+ break;
+ default:
+ out->prf(out->dst, "-");
+ break;
+ }
+
+ out->prf(out->dst, "\n ipv4: ");
+ switch (ANA_VCAP_S1_CFG_KEY_IP4_CFG_GET(val)) {
+ case VCAP_IS1_PS_IPV4_NORMAL:
+ out->prf(out->dst, "normal");
+ break;
+ case VCAP_IS1_PS_IPV4_7TUPLE:
+ out->prf(out->dst, "7tuple");
+ break;
+ case VCAP_IS1_PS_IPV4_5TUPLE_IP4:
+ out->prf(out->dst, "5tuple_ipv4");
+ break;
+ case VCAP_IS1_PS_IPV4_DBL_VID:
+ out->prf(out->dst, "dbl_vid");
+ break;
+ case VCAP_IS1_PS_IPV4_DMAC_VID:
+ out->prf(out->dst, "dmac_vid");
+ break;
+ default:
+ out->prf(out->dst, "-");
+ break;
+ }
+
+ out->prf(out->dst, "\n ipv6: ");
+ switch (ANA_VCAP_S1_CFG_KEY_IP6_CFG_GET(val)) {
+ case VCAP_IS1_PS_IPV6_NORMAL:
+ out->prf(out->dst, "normal");
+ break;
+ case VCAP_IS1_PS_IPV6_7TUPLE:
+ out->prf(out->dst, "7tuple");
+ break;
+ case VCAP_IS1_PS_IPV6_5TUPLE_IP4:
+ out->prf(out->dst, "5tuple_ip4");
+ break;
+ case VCAP_IS1_PS_IPV6_NORMAL_IP6:
+ out->prf(out->dst, "normal_ip6");
+ break;
+ case VCAP_IS1_PS_IPV6_5TUPLE_IP6:
+ out->prf(out->dst, "5tuple_ip6");
+ break;
+ case VCAP_IS1_PS_IPV6_DBL_VID:
+ out->prf(out->dst, "dbl_vid");
+ break;
+ case VCAP_IS1_PS_IPV6_DMAC_VID:
+ out->prf(out->dst, "dmac_vid");
+ break;
+ default:
+ out->prf(out->dst, "-");
+ break;
+ }
+
+ out->prf(out->dst, "\n rt: ");
+ switch (ANA_VCAP_S1_CFG_KEY_RT_CFG_GET(val)) {
+ case VCAP_IS1_PS_RT_NORMAL:
+ out->prf(out->dst, "normal");
+ break;
+ case VCAP_IS1_PS_RT_7TUPLE:
+ out->prf(out->dst, "7tuple");
+ break;
+ case VCAP_IS1_PS_RT_DBL_VID:
+ out->prf(out->dst, "dbl_vid");
+ break;
+ case VCAP_IS1_PS_RT_DMAC_VID:
+ out->prf(out->dst, "dmac_vid");
+ break;
+ case VCAP_IS1_PS_RT_FOLLOW_OTHER:
+ out->prf(out->dst, "follow_other");
+ break;
+ default:
+ out->prf(out->dst, "-");
+ break;
+ }
+ }
+
+ out->prf(out->dst, "\n");
+}
+
+static void lan966x_vcap_is2_port_keys(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
{
struct lan966x *lan966x = port->lan966x;
u32 val;
@@ -88,7 +203,17 @@ int lan966x_vcap_port_info(struct net_device *dev,
vcap = &vctrl->vcaps[admin->vtype];
out->prf(out->dst, "%s:\n", vcap->name);
- lan966x_vcap_port_keys(port, admin, out);
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS2:
+ lan966x_vcap_is2_port_keys(port, admin, out);
+ break;
+ case VCAP_TYPE_IS1:
+ lan966x_vcap_is1_port_keys(port, admin, out);
+ break;
+ default:
+ out->prf(out->dst, " no info\n");
+ break;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
index 68f9d69fd37b..7ea8e8633609 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
@@ -8,6 +8,7 @@
#define STREAMSIZE (64 * 4)
+#define LAN966X_IS1_LOOKUPS 3
#define LAN966X_IS2_LOOKUPS 2
static struct lan966x_vcap_inst {
@@ -20,6 +21,15 @@ static struct lan966x_vcap_inst {
bool ingress; /* is vcap in the ingress path */
} lan966x_vcap_inst_cfg[] = {
{
+ .vtype = VCAP_TYPE_IS1, /* IS1-0 */
+ .tgt_inst = 1,
+ .lookups = LAN966X_IS1_LOOKUPS,
+ .first_cid = LAN966X_VCAP_CID_IS1_L0,
+ .last_cid = LAN966X_VCAP_CID_IS1_MAX,
+ .count = 768,
+ .ingress = true,
+ },
+ {
.vtype = VCAP_TYPE_IS2, /* IS2-0 */
.tgt_inst = 2,
.lookups = LAN966X_IS2_LOOKUPS,
@@ -72,7 +82,21 @@ static void __lan966x_vcap_range_init(struct lan966x *lan966x,
lan966x_vcap_wait_update(lan966x, admin->tgt_inst);
}
-static int lan966x_vcap_cid_to_lookup(int cid)
+static int lan966x_vcap_is1_cid_to_lookup(int cid)
+{
+ int lookup = 0;
+
+ if (cid >= LAN966X_VCAP_CID_IS1_L1 &&
+ cid < LAN966X_VCAP_CID_IS1_L2)
+ lookup = 1;
+ else if (cid >= LAN966X_VCAP_CID_IS1_L2 &&
+ cid < LAN966X_VCAP_CID_IS1_MAX)
+ lookup = 2;
+
+ return lookup;
+}
+
+static int lan966x_vcap_is2_cid_to_lookup(int cid)
{
if (cid >= LAN966X_VCAP_CID_IS2_L1 &&
cid < LAN966X_VCAP_CID_IS2_MAX)
@@ -81,6 +105,67 @@ static int lan966x_vcap_cid_to_lookup(int cid)
return 0;
}
+/* Return the list of keysets for the vcap port configuration */
+static int
+lan966x_vcap_is1_get_port_keysets(struct net_device *ndev, int lookup,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct lan966x_port *port = netdev_priv(ndev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ val = lan_rd(lan966x, ANA_VCAP_S1_CFG(port->chip_port, lookup));
+
+ /* Collect all keysets for the port in a list */
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IP) {
+ switch (ANA_VCAP_S1_CFG_KEY_IP4_CFG_GET(val)) {
+ case VCAP_IS1_PS_IPV4_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_7TUPLE);
+ break;
+ case VCAP_IS1_PS_IPV4_5TUPLE_IP4:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_5TUPLE_IP4);
+ break;
+ case VCAP_IS1_PS_IPV4_NORMAL:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL);
+ break;
+ }
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IPV6) {
+ switch (ANA_VCAP_S1_CFG_KEY_IP6_CFG_GET(val)) {
+ case VCAP_IS1_PS_IPV6_NORMAL:
+ case VCAP_IS1_PS_IPV6_NORMAL_IP6:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL_IP6);
+ break;
+ case VCAP_IS1_PS_IPV6_5TUPLE_IP6:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_5TUPLE_IP6);
+ break;
+ case VCAP_IS1_PS_IPV6_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_7TUPLE);
+ break;
+ case VCAP_IS1_PS_IPV6_5TUPLE_IP4:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_5TUPLE_IP4);
+ break;
+ case VCAP_IS1_PS_IPV6_DMAC_VID:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_DMAC_VID);
+ break;
+ }
+ }
+
+ switch (ANA_VCAP_S1_CFG_KEY_OTHER_CFG_GET(val)) {
+ case VCAP_IS1_PS_OTHER_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_7TUPLE);
+ break;
+ case VCAP_IS1_PS_OTHER_NORMAL:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL);
+ break;
+ }
+
+ return 0;
+}
+
static int
lan966x_vcap_is2_get_port_keysets(struct net_device *dev, int lookup,
struct vcap_keyset_list *keysetlist,
@@ -180,11 +265,26 @@ lan966x_vcap_validate_keyset(struct net_device *dev,
if (!kslist || kslist->cnt == 0)
return VCAP_KFS_NO_VALUE;
- lookup = lan966x_vcap_cid_to_lookup(rule->vcap_chain_id);
keysetlist.max = ARRAY_SIZE(keysets);
keysetlist.keysets = keysets;
- err = lan966x_vcap_is2_get_port_keysets(dev, lookup, &keysetlist,
- l3_proto);
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS1:
+ lookup = lan966x_vcap_is1_cid_to_lookup(rule->vcap_chain_id);
+ err = lan966x_vcap_is1_get_port_keysets(dev, lookup, &keysetlist,
+ l3_proto);
+ break;
+ case VCAP_TYPE_IS2:
+ lookup = lan966x_vcap_is2_cid_to_lookup(rule->vcap_chain_id);
+ err = lan966x_vcap_is2_get_port_keysets(dev, lookup, &keysetlist,
+ l3_proto);
+ break;
+ default:
+ pr_err("vcap type: %s not supported\n",
+ lan966x_vcaps[admin->vtype].name);
+ return VCAP_KFS_NO_VALUE;
+ }
+
if (err)
return VCAP_KFS_NO_VALUE;
@@ -197,17 +297,32 @@ lan966x_vcap_validate_keyset(struct net_device *dev,
return VCAP_KFS_NO_VALUE;
}
-static bool lan966x_vcap_is_first_chain(struct vcap_rule *rule)
+static bool lan966x_vcap_is2_is_first_chain(struct vcap_rule *rule)
{
return (rule->vcap_chain_id >= LAN966X_VCAP_CID_IS2_L0 &&
rule->vcap_chain_id < LAN966X_VCAP_CID_IS2_L1);
}
-static void lan966x_vcap_add_default_fields(struct net_device *dev,
- struct vcap_admin *admin,
- struct vcap_rule *rule)
+static void lan966x_vcap_is1_add_default_fields(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ u32 value, mask;
+ u32 lookup;
+
+ if (vcap_rule_get_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK,
+ &value, &mask))
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK, 0,
+ ~BIT(port->chip_port));
+
+ lookup = lan966x_vcap_is1_cid_to_lookup(rule->vcap_chain_id);
+ vcap_rule_add_key_u32(rule, VCAP_KF_LOOKUP_INDEX, lookup, 0x3);
+}
+
+static void lan966x_vcap_is2_add_default_fields(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
{
- struct lan966x_port *port = netdev_priv(dev);
u32 value, mask;
if (vcap_rule_get_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK,
@@ -215,7 +330,7 @@ static void lan966x_vcap_add_default_fields(struct net_device *dev,
vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK, 0,
~BIT(port->chip_port));
- if (lan966x_vcap_is_first_chain(rule))
+ if (lan966x_vcap_is2_is_first_chain(rule))
vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
VCAP_BIT_1);
else
@@ -223,6 +338,26 @@ static void lan966x_vcap_add_default_fields(struct net_device *dev,
VCAP_BIT_0);
}
+static void lan966x_vcap_add_default_fields(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS1:
+ lan966x_vcap_is1_add_default_fields(port, admin, rule);
+ break;
+ case VCAP_TYPE_IS2:
+ lan966x_vcap_is2_add_default_fields(port, admin, rule);
+ break;
+ default:
+ pr_err("vcap type: %s not supported\n",
+ lan966x_vcaps[admin->vtype].name);
+ break;
+ }
+}
+
static void lan966x_vcap_cache_erase(struct vcap_admin *admin)
{
memset(admin->cache.keystream, 0, STREAMSIZE);
@@ -464,8 +599,37 @@ static void lan966x_vcap_block_init(struct lan966x *lan966x,
static void lan966x_vcap_port_key_deselection(struct lan966x *lan966x,
struct vcap_admin *admin)
{
- for (int p = 0; p < lan966x->num_phys_ports; ++p)
- lan_wr(0, lan966x, ANA_VCAP_S2_CFG(p));
+ u32 val;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS1:
+ val = ANA_VCAP_S1_CFG_KEY_IP6_CFG_SET(VCAP_IS1_PS_IPV6_5TUPLE_IP6) |
+ ANA_VCAP_S1_CFG_KEY_IP4_CFG_SET(VCAP_IS1_PS_IPV4_5TUPLE_IP4) |
+ ANA_VCAP_S1_CFG_KEY_OTHER_CFG_SET(VCAP_IS1_PS_OTHER_NORMAL);
+
+ for (int p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (!lan966x->ports[p])
+ continue;
+
+ for (int l = 0; l < LAN966X_IS1_LOOKUPS; ++l)
+ lan_wr(val, lan966x, ANA_VCAP_S1_CFG(p, l));
+
+ lan_rmw(ANA_VCAP_CFG_S1_ENA_SET(true),
+ ANA_VCAP_CFG_S1_ENA, lan966x,
+ ANA_VCAP_CFG(p));
+ }
+
+ break;
+ case VCAP_TYPE_IS2:
+ for (int p = 0; p < lan966x->num_phys_ports; ++p)
+ lan_wr(0, lan966x, ANA_VCAP_S2_CFG(p));
+
+ break;
+ default:
+ pr_err("vcap type: %s not supported\n",
+ lan966x_vcaps[admin->vtype].name);
+ break;
+ }
}
int lan966x_vcap_init(struct lan966x *lan966x)
@@ -506,6 +670,10 @@ int lan966x_vcap_init(struct lan966x *lan966x)
lan_rmw(ANA_VCAP_S2_CFG_ENA_SET(true),
ANA_VCAP_S2_CFG_ENA, lan966x,
ANA_VCAP_S2_CFG(lan966x->ports[p]->chip_port));
+
+ lan_rmw(ANA_VCAP_CFG_S1_ENA_SET(true),
+ ANA_VCAP_CFG_S1_ENA, lan966x,
+ ANA_VCAP_CFG(lan966x->ports[p]->chip_port));
}
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 42b77ba9b572..a7edf524eedb 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -282,6 +282,7 @@ static int sparx5_create_port(struct sparx5 *sparx5,
spx5_port->phylink_pcs.poll = true;
spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops;
spx5_port->is_mrouter = false;
+ INIT_LIST_HEAD(&spx5_port->tc_templates);
sparx5->ports[config->portno] = spx5_port;
err = sparx5_port_init(sparx5, spx5_port, &config->conf);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 72e7928912eb..62c85463b634 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -192,6 +192,7 @@ struct sparx5_port {
u16 ts_id;
struct sk_buff_head tx_skbs;
bool is_mrouter;
+ struct list_head tc_templates; /* list of TC templates on this port */
};
enum sparx5_core_clockfreq {
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
index b36819aafaca..3f87a5285a6d 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
@@ -28,6 +28,14 @@ struct sparx5_multiple_rules {
struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE];
};
+struct sparx5_tc_flower_template {
+ struct list_head list; /* for insertion in the list of templates */
+ int cid; /* chain id */
+ enum vcap_keyfield_set orig; /* keyset used before the template */
+ enum vcap_keyfield_set keyset; /* new keyset used by template */
+ u16 l3_proto; /* protocol specified in the template */
+};
+
static int
sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
{
@@ -382,7 +390,7 @@ static int sparx5_tc_select_protocol_keyset(struct net_device *ndev,
/* Find the keysets that the rule can use */
matches.keysets = keysets;
matches.max = ARRAY_SIZE(keysets);
- if (vcap_rule_find_keysets(vrule, &matches) == 0)
+ if (!vcap_rule_find_keysets(vrule, &matches))
return -EINVAL;
/* Find the keysets that the port configuration supports */
@@ -996,6 +1004,73 @@ static int sparx5_tc_action_vlan_push(struct vcap_admin *admin,
return err;
}
+/* Remove rule keys that may prevent templates from matching a keyset */
+static void sparx5_tc_flower_simplify_rule(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ u16 l3_proto)
+{
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ vcap_rule_rem_key(vrule, VCAP_KF_ETYPE);
+ switch (l3_proto) {
+ case ETH_P_IP:
+ break;
+ case ETH_P_IPV6:
+ vcap_rule_rem_key(vrule, VCAP_KF_IP_SNAP_IS);
+ break;
+ default:
+ break;
+ }
+ break;
+ case VCAP_TYPE_ES2:
+ switch (l3_proto) {
+ case ETH_P_IP:
+ if (vrule->keyset == VCAP_KFS_IP4_OTHER)
+ vcap_rule_rem_key(vrule, VCAP_KF_TCP_IS);
+ break;
+ case ETH_P_IPV6:
+ if (vrule->keyset == VCAP_KFS_IP6_STD)
+ vcap_rule_rem_key(vrule, VCAP_KF_TCP_IS);
+ vcap_rule_rem_key(vrule, VCAP_KF_IP4_IS);
+ break;
+ default:
+ break;
+ }
+ break;
+ case VCAP_TYPE_IS2:
+ switch (l3_proto) {
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ vcap_rule_rem_key(vrule, VCAP_KF_IP4_IS);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static bool sparx5_tc_flower_use_template(struct net_device *ndev,
+ struct flow_cls_offload *fco,
+ struct vcap_admin *admin,
+ struct vcap_rule *vrule)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5_tc_flower_template *ftp;
+
+ list_for_each_entry(ftp, &port->tc_templates, list) {
+ if (ftp->cid != fco->common.chain_index)
+ continue;
+
+ vcap_set_rule_set_keyset(vrule, ftp->keyset);
+ sparx5_tc_flower_simplify_rule(admin, vrule, ftp->l3_proto);
+ return true;
+ }
+ return false;
+}
+
static int sparx5_tc_flower_replace(struct net_device *ndev,
struct flow_cls_offload *fco,
struct vcap_admin *admin,
@@ -1122,12 +1197,14 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
goto out;
}
- err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin,
- state.l3_proto, &multi);
- if (err) {
- NL_SET_ERR_MSG_MOD(fco->common.extack,
- "No matching port keyset for filter protocol and keys");
- goto out;
+ if (!sparx5_tc_flower_use_template(ndev, fco, admin, vrule)) {
+ err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin,
+ state.l3_proto, &multi);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "No matching port keyset for filter protocol and keys");
+ goto out;
+ }
}
/* provide the l3 protocol to guide the keyset selection */
@@ -1259,6 +1336,120 @@ static int sparx5_tc_flower_stats(struct net_device *ndev,
return err;
}
+static int sparx5_tc_flower_template_create(struct net_device *ndev,
+ struct flow_cls_offload *fco,
+ struct vcap_admin *admin)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct vcap_tc_flower_parse_usage state = {
+ .fco = fco,
+ .l3_proto = ETH_P_ALL,
+ .admin = admin,
+ };
+ struct sparx5_tc_flower_template *ftp;
+ struct vcap_keyset_list kslist = {};
+ enum vcap_keyfield_set keysets[10];
+ struct vcap_control *vctrl;
+ struct vcap_rule *vrule;
+ int count, err;
+
+ if (admin->vtype == VCAP_TYPE_ES0) {
+ pr_err("%s:%d: %s\n", __func__, __LINE__,
+ "VCAP does not support templates");
+ return -EINVAL;
+ }
+
+ count = vcap_admin_rule_count(admin, fco->common.chain_index);
+ if (count > 0) {
+ pr_err("%s:%d: %s\n", __func__, __LINE__,
+ "Filters are already present");
+ return -EBUSY;
+ }
+
+ ftp = kzalloc(sizeof(*ftp), GFP_KERNEL);
+ if (!ftp)
+ return -ENOMEM;
+
+ ftp->cid = fco->common.chain_index;
+ ftp->orig = VCAP_KFS_NO_VALUE;
+ ftp->keyset = VCAP_KFS_NO_VALUE;
+
+ vctrl = port->sparx5->vcap_ctrl;
+ vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index,
+ VCAP_USER_TC, fco->common.prio, 0);
+ if (IS_ERR(vrule)) {
+ err = PTR_ERR(vrule);
+ goto err_rule;
+ }
+
+ state.vrule = vrule;
+ state.frule = flow_cls_offload_flow_rule(fco);
+ err = sparx5_tc_use_dissectors(&state, admin, vrule);
+ if (err) {
+ pr_err("%s:%d: key error: %d\n", __func__, __LINE__, err);
+ goto out;
+ }
+
+ ftp->l3_proto = state.l3_proto;
+
+ sparx5_tc_flower_simplify_rule(admin, vrule, state.l3_proto);
+
+ /* Find the keysets that the rule can use */
+ kslist.keysets = keysets;
+ kslist.max = ARRAY_SIZE(keysets);
+ if (!vcap_rule_find_keysets(vrule, &kslist)) {
+ pr_err("%s:%d: %s\n", __func__, __LINE__,
+ "Could not find a suitable keyset");
+ err = -ENOENT;
+ goto out;
+ }
+
+ ftp->keyset = vcap_select_min_rule_keyset(vctrl, admin->vtype, &kslist);
+ kslist.cnt = 0;
+ sparx5_vcap_set_port_keyset(ndev, admin, fco->common.chain_index,
+ state.l3_proto,
+ ftp->keyset,
+ &kslist);
+
+ if (kslist.cnt > 0)
+ ftp->orig = kslist.keysets[0];
+
+ /* Store new template */
+ list_add_tail(&ftp->list, &port->tc_templates);
+ vcap_free_rule(vrule);
+ return 0;
+
+out:
+ vcap_free_rule(vrule);
+err_rule:
+ kfree(ftp);
+ return err;
+}
+
+static int sparx5_tc_flower_template_destroy(struct net_device *ndev,
+ struct flow_cls_offload *fco,
+ struct vcap_admin *admin)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5_tc_flower_template *ftp, *tmp;
+ int err = -ENOENT;
+
+ /* Rules using the template are removed by the tc framework */
+ list_for_each_entry_safe(ftp, tmp, &port->tc_templates, list) {
+ if (ftp->cid != fco->common.chain_index)
+ continue;
+
+ sparx5_vcap_set_port_keyset(ndev, admin,
+ fco->common.chain_index,
+ ftp->l3_proto, ftp->orig,
+ NULL);
+ list_del(&ftp->list);
+ kfree(ftp);
+ break;
+ }
+ return err;
+}
+
int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
bool ingress)
{
@@ -1282,6 +1473,10 @@ int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
return sparx5_tc_flower_destroy(ndev, fco, admin);
case FLOW_CLS_STATS:
return sparx5_tc_flower_stats(ndev, fco, admin);
+ case FLOW_CLS_TMPLT_CREATE:
+ return sparx5_tc_flower_template_create(ndev, fco, admin);
+ case FLOW_CLS_TMPLT_DESTROY:
+ return sparx5_tc_flower_template_destroy(ndev, fco, admin);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c
index 07b472c84a47..12722f728ef7 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c
@@ -198,7 +198,7 @@ static void sparx5_vcap_is2_port_keys(struct sparx5 *sparx5,
out->prf(out->dst, "ip6_std");
break;
case VCAP_IS2_PS_IPV6_MC_IP4_TCP_UDP_OTHER:
- out->prf(out->dst, "ip4_tcp_udp ipv4_other");
+ out->prf(out->dst, "ip4_tcp_udp ip4_other");
break;
}
out->prf(out->dst, "\n ipv6_uc: ");
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
index d0d4e0385ac7..187efa1fc904 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
@@ -1519,6 +1519,276 @@ static struct vcap_operations sparx5_vcap_ops = {
.port_info = sparx5_port_info,
};
+static u32 sparx5_vcap_is0_keyset_to_etype_ps(enum vcap_keyfield_set keyset)
+{
+ switch (keyset) {
+ case VCAP_KFS_NORMAL_7TUPLE:
+ return VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE;
+ case VCAP_KFS_NORMAL_5TUPLE_IP4:
+ return VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4;
+ default:
+ return VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE;
+ }
+}
+
+static void sparx5_vcap_is0_set_port_keyset(struct net_device *ndev, int lookup,
+ enum vcap_keyfield_set keyset,
+ int l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ switch (l3_proto) {
+ case ETH_P_IP:
+ value = sparx5_vcap_is0_keyset_to_etype_ps(keyset);
+ spx5_rmw(ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_SET(value),
+ ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL,
+ sparx5,
+ ANA_CL_ADV_CL_CFG(portno, lookup));
+ break;
+ case ETH_P_IPV6:
+ value = sparx5_vcap_is0_keyset_to_etype_ps(keyset);
+ spx5_rmw(ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL_SET(value),
+ ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL,
+ sparx5,
+ ANA_CL_ADV_CL_CFG(portno, lookup));
+ break;
+ default:
+ value = sparx5_vcap_is0_keyset_to_etype_ps(keyset);
+ spx5_rmw(ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_SET(value),
+ ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL,
+ sparx5,
+ ANA_CL_ADV_CL_CFG(portno, lookup));
+ break;
+ }
+}
+
+static u32 sparx5_vcap_is2_keyset_to_arp_ps(enum vcap_keyfield_set keyset)
+{
+ switch (keyset) {
+ case VCAP_KFS_ARP:
+ return VCAP_IS2_PS_ARP_ARP;
+ default:
+ return VCAP_IS2_PS_ARP_MAC_ETYPE;
+ }
+}
+
+static u32 sparx5_vcap_is2_keyset_to_ipv4_ps(enum vcap_keyfield_set keyset)
+{
+ switch (keyset) {
+ case VCAP_KFS_MAC_ETYPE:
+ return VCAP_IS2_PS_IPV4_UC_MAC_ETYPE;
+ case VCAP_KFS_IP4_OTHER:
+ case VCAP_KFS_IP4_TCP_UDP:
+ return VCAP_IS2_PS_IPV4_UC_IP4_TCP_UDP_OTHER;
+ case VCAP_KFS_IP_7TUPLE:
+ return VCAP_IS2_PS_IPV4_UC_IP_7TUPLE;
+ default:
+ return VCAP_KFS_NO_VALUE;
+ }
+}
+
+static u32 sparx5_vcap_is2_keyset_to_ipv6_uc_ps(enum vcap_keyfield_set keyset)
+{
+ switch (keyset) {
+ case VCAP_KFS_MAC_ETYPE:
+ return VCAP_IS2_PS_IPV6_UC_MAC_ETYPE;
+ case VCAP_KFS_IP4_OTHER:
+ case VCAP_KFS_IP4_TCP_UDP:
+ return VCAP_IS2_PS_IPV6_UC_IP4_TCP_UDP_OTHER;
+ case VCAP_KFS_IP_7TUPLE:
+ return VCAP_IS2_PS_IPV6_UC_IP_7TUPLE;
+ default:
+ return VCAP_KFS_NO_VALUE;
+ }
+}
+
+static u32 sparx5_vcap_is2_keyset_to_ipv6_mc_ps(enum vcap_keyfield_set keyset)
+{
+ switch (keyset) {
+ case VCAP_KFS_MAC_ETYPE:
+ return VCAP_IS2_PS_IPV6_MC_MAC_ETYPE;
+ case VCAP_KFS_IP4_OTHER:
+ case VCAP_KFS_IP4_TCP_UDP:
+ return VCAP_IS2_PS_IPV6_MC_IP4_TCP_UDP_OTHER;
+ case VCAP_KFS_IP_7TUPLE:
+ return VCAP_IS2_PS_IPV6_MC_IP_7TUPLE;
+ default:
+ return VCAP_KFS_NO_VALUE;
+ }
+}
+
+static void sparx5_vcap_is2_set_port_keyset(struct net_device *ndev, int lookup,
+ enum vcap_keyfield_set keyset,
+ int l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ switch (l3_proto) {
+ case ETH_P_ARP:
+ value = sparx5_vcap_is2_keyset_to_arp_ps(keyset);
+ spx5_rmw(ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_SET(value),
+ ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL,
+ sparx5,
+ ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
+ break;
+ case ETH_P_IP:
+ value = sparx5_vcap_is2_keyset_to_ipv4_ps(keyset);
+ spx5_rmw(ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_SET(value),
+ ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL,
+ sparx5,
+ ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
+ spx5_rmw(ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_SET(value),
+ ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL,
+ sparx5,
+ ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
+ break;
+ case ETH_P_IPV6:
+ value = sparx5_vcap_is2_keyset_to_ipv6_uc_ps(keyset);
+ spx5_rmw(ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_SET(value),
+ ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL,
+ sparx5,
+ ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
+ value = sparx5_vcap_is2_keyset_to_ipv6_mc_ps(keyset);
+ spx5_rmw(ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_SET(value),
+ ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL,
+ sparx5,
+ ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
+ break;
+ default:
+ value = VCAP_IS2_PS_NONETH_MAC_ETYPE;
+ spx5_rmw(ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_SET(value),
+ ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL,
+ sparx5,
+ ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
+ break;
+ }
+}
+
+static u32 sparx5_vcap_es2_keyset_to_arp_ps(enum vcap_keyfield_set keyset)
+{
+ switch (keyset) {
+ case VCAP_KFS_ARP:
+ return VCAP_ES2_PS_ARP_ARP;
+ default:
+ return VCAP_ES2_PS_ARP_MAC_ETYPE;
+ }
+}
+
+static u32 sparx5_vcap_es2_keyset_to_ipv4_ps(enum vcap_keyfield_set keyset)
+{
+ switch (keyset) {
+ case VCAP_KFS_MAC_ETYPE:
+ return VCAP_ES2_PS_IPV4_MAC_ETYPE;
+ case VCAP_KFS_IP_7TUPLE:
+ return VCAP_ES2_PS_IPV4_IP_7TUPLE;
+ case VCAP_KFS_IP4_TCP_UDP:
+ return VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER;
+ case VCAP_KFS_IP4_OTHER:
+ return VCAP_ES2_PS_IPV4_IP4_OTHER;
+ default:
+ return VCAP_ES2_PS_IPV4_MAC_ETYPE;
+ }
+}
+
+static u32 sparx5_vcap_es2_keyset_to_ipv6_ps(enum vcap_keyfield_set keyset)
+{
+ switch (keyset) {
+ case VCAP_KFS_MAC_ETYPE:
+ return VCAP_ES2_PS_IPV6_MAC_ETYPE;
+ case VCAP_KFS_IP4_TCP_UDP:
+ case VCAP_KFS_IP4_OTHER:
+ return VCAP_ES2_PS_IPV6_IP4_DOWNGRADE;
+ case VCAP_KFS_IP_7TUPLE:
+ return VCAP_ES2_PS_IPV6_IP_7TUPLE;
+ case VCAP_KFS_IP6_STD:
+ return VCAP_ES2_PS_IPV6_IP6_STD;
+ default:
+ return VCAP_ES2_PS_IPV6_MAC_ETYPE;
+ }
+}
+
+static void sparx5_vcap_es2_set_port_keyset(struct net_device *ndev, int lookup,
+ enum vcap_keyfield_set keyset,
+ int l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ switch (l3_proto) {
+ case ETH_P_IP:
+ value = sparx5_vcap_es2_keyset_to_ipv4_ps(keyset);
+ spx5_rmw(EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_SET(value),
+ EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL,
+ sparx5,
+ EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+ break;
+ case ETH_P_IPV6:
+ value = sparx5_vcap_es2_keyset_to_ipv6_ps(keyset);
+ spx5_rmw(EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_SET(value),
+ EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL,
+ sparx5,
+ EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+ break;
+ case ETH_P_ARP:
+ value = sparx5_vcap_es2_keyset_to_arp_ps(keyset);
+ spx5_rmw(EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_SET(value),
+ EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL,
+ sparx5,
+ EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+ break;
+ }
+}
+
+/* Change the port keyset for the lookup and protocol */
+void sparx5_vcap_set_port_keyset(struct net_device *ndev,
+ struct vcap_admin *admin,
+ int cid,
+ u16 l3_proto,
+ enum vcap_keyfield_set keyset,
+ struct vcap_keyset_list *orig)
+{
+ struct sparx5_port *port;
+ int lookup;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ lookup = sparx5_vcap_is0_cid_to_lookup(cid);
+ if (orig)
+ sparx5_vcap_is0_get_port_keysets(ndev, lookup, orig,
+ l3_proto);
+ sparx5_vcap_is0_set_port_keyset(ndev, lookup, keyset, l3_proto);
+ break;
+ case VCAP_TYPE_IS2:
+ lookup = sparx5_vcap_is2_cid_to_lookup(cid);
+ if (orig)
+ sparx5_vcap_is2_get_port_keysets(ndev, lookup, orig,
+ l3_proto);
+ sparx5_vcap_is2_set_port_keyset(ndev, lookup, keyset, l3_proto);
+ break;
+ case VCAP_TYPE_ES0:
+ break;
+ case VCAP_TYPE_ES2:
+ lookup = sparx5_vcap_es2_cid_to_lookup(cid);
+ if (orig)
+ sparx5_vcap_es2_get_port_keysets(ndev, lookup, orig,
+ l3_proto);
+ sparx5_vcap_es2_set_port_keyset(ndev, lookup, keyset, l3_proto);
+ break;
+ default:
+ port = netdev_priv(ndev);
+ sparx5_vcap_type_err(port->sparx5, admin, __func__);
+ break;
+ }
+}
+
/* Enable IS0 lookups per port and set the keyset generation */
static void sparx5_vcap_is0_port_key_selection(struct sparx5 *sparx5,
struct vcap_admin *admin)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
index 3260ab5e3a82..2684d9199b05 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
@@ -195,6 +195,12 @@ int sparx5_vcap_get_port_keyset(struct net_device *ndev,
u16 l3_proto,
struct vcap_keyset_list *kslist);
+/* Change the port keyset for the lookup and protocol */
+void sparx5_vcap_set_port_keyset(struct net_device *ndev,
+ struct vcap_admin *admin, int cid,
+ u16 l3_proto, enum vcap_keyfield_set keyset,
+ struct vcap_keyset_list *orig);
+
/* Check if the ethertype is supported by the vcap port classification */
bool sparx5_vcap_is_known_etype(struct vcap_admin *admin, u16 etype);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
index 0844fcaeee68..a556c4419986 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
@@ -3,8 +3,8 @@
* Microchip VCAP API
*/
-/* This file is autogenerated by cml-utils 2023-02-10 11:15:56 +0100.
- * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada
+/* This file is autogenerated by cml-utils 2023-02-16 11:41:14 +0100.
+ * Commit ID: be85f176b3a151fa748dcaf97c8824a5c2e065f3
*/
#ifndef __VCAP_AG_API__
@@ -14,6 +14,7 @@ enum vcap_type {
VCAP_TYPE_ES0,
VCAP_TYPE_ES2,
VCAP_TYPE_IS0,
+ VCAP_TYPE_IS1,
VCAP_TYPE_IS2,
VCAP_TYPE_MAX
};
@@ -21,7 +22,12 @@ enum vcap_type {
/* Keyfieldset names with origin information */
enum vcap_keyfield_set {
VCAP_KFS_NO_VALUE, /* initial value */
+ VCAP_KFS_5TUPLE_IP4, /* lan966x is1 X2 */
+ VCAP_KFS_5TUPLE_IP6, /* lan966x is1 X4 */
+ VCAP_KFS_7TUPLE, /* lan966x is1 X4 */
VCAP_KFS_ARP, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
+ VCAP_KFS_DBL_VID, /* lan966x is1 X1 */
+ VCAP_KFS_DMAC_VID, /* lan966x is1 X1 */
VCAP_KFS_ETAG, /* sparx5 is0 X2 */
VCAP_KFS_IP4_OTHER, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
VCAP_KFS_IP4_TCP_UDP, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
@@ -36,10 +42,13 @@ enum vcap_keyfield_set {
VCAP_KFS_MAC_ETYPE, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
VCAP_KFS_MAC_LLC, /* lan966x is2 X2 */
VCAP_KFS_MAC_SNAP, /* lan966x is2 X2 */
+ VCAP_KFS_NORMAL, /* lan966x is1 X2 */
VCAP_KFS_NORMAL_5TUPLE_IP4, /* sparx5 is0 X6 */
VCAP_KFS_NORMAL_7TUPLE, /* sparx5 is0 X12 */
+ VCAP_KFS_NORMAL_IP6, /* lan966x is1 X4 */
VCAP_KFS_OAM, /* lan966x is2 X2 */
VCAP_KFS_PURE_5TUPLE_IP4, /* sparx5 is0 X3 */
+ VCAP_KFS_RT, /* lan966x is1 X1 */
VCAP_KFS_SMAC_SIP4, /* lan966x is2 X1 */
VCAP_KFS_SMAC_SIP6, /* lan966x is2 X2 */
};
@@ -61,17 +70,20 @@ enum vcap_keyfield_set {
* Used by 802.1BR Bridge Port Extension in an E-Tag
* VCAP_KF_8021BR_IGR_ECID_EXT: W8, sparx5: is0
* Used by 802.1BR Bridge Port Extension in an E-Tag
- * VCAP_KF_8021Q_DEI0: W1, sparx5: is0
+ * VCAP_KF_8021CB_R_TAGGED_IS: W1, lan966x: is1
+ * Set if frame contains an RTAG: IEEE 802.1CB (FRER Redundancy tag, Ethertype
+ * 0xf1c1)
+ * VCAP_KF_8021Q_DEI0: W1, sparx5: is0, lan966x: is1
* First DEI in multiple vlan tags (outer tag or default port tag)
- * VCAP_KF_8021Q_DEI1: W1, sparx5: is0
+ * VCAP_KF_8021Q_DEI1: W1, sparx5: is0, lan966x: is1
* Second DEI in multiple vlan tags (inner tag)
* VCAP_KF_8021Q_DEI2: W1, sparx5: is0
* Third DEI in multiple vlan tags (not always available)
* VCAP_KF_8021Q_DEI_CLS: W1, sparx5: is2/es2, lan966x: is2
* Classified DEI
- * VCAP_KF_8021Q_PCP0: W3, sparx5: is0
+ * VCAP_KF_8021Q_PCP0: W3, sparx5: is0, lan966x: is1
* First PCP in multiple vlan tags (outer tag or default port tag)
- * VCAP_KF_8021Q_PCP1: W3, sparx5: is0
+ * VCAP_KF_8021Q_PCP1: W3, sparx5: is0, lan966x: is1
* Second PCP in multiple vlan tags (inner tag)
* VCAP_KF_8021Q_PCP2: W3, sparx5: is0
* Third PCP in multiple vlan tags (not always available)
@@ -79,22 +91,24 @@ enum vcap_keyfield_set {
* Classified PCP
* VCAP_KF_8021Q_TPID: W3, sparx5: es0
* TPID for outer tag: 0: Customer TPID 1: Service TPID (88A8 or programmable)
- * VCAP_KF_8021Q_TPID0: W3, sparx5: is0
+ * VCAP_KF_8021Q_TPID0: sparx5 is0 W3, lan966x is1 W1
* First TPIC in multiple vlan tags (outer tag or default port tag)
- * VCAP_KF_8021Q_TPID1: W3, sparx5: is0
+ * VCAP_KF_8021Q_TPID1: sparx5 is0 W3, lan966x is1 W1
* Second TPID in multiple vlan tags (inner tag)
* VCAP_KF_8021Q_TPID2: W3, sparx5: is0
* Third TPID in multiple vlan tags (not always available)
- * VCAP_KF_8021Q_VID0: W12, sparx5: is0
+ * VCAP_KF_8021Q_VID0: W12, sparx5: is0, lan966x: is1
* First VID in multiple vlan tags (outer tag or default port tag)
- * VCAP_KF_8021Q_VID1: W12, sparx5: is0
+ * VCAP_KF_8021Q_VID1: W12, sparx5: is0, lan966x: is1
* Second VID in multiple vlan tags (inner tag)
* VCAP_KF_8021Q_VID2: W12, sparx5: is0
* Third VID in multiple vlan tags (not always available)
* VCAP_KF_8021Q_VID_CLS: sparx5 is2 W13, sparx5 es0 W13, sparx5 es2 W13,
* lan966x is2 W12
* Classified VID
- * VCAP_KF_8021Q_VLAN_TAGGED_IS: W1, sparx5: is2/es2, lan966x: is2
+ * VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS: W1, lan966x: is1
+ * Set if frame has two or more Q-tags. Independent of port VLAN awareness
+ * VCAP_KF_8021Q_VLAN_TAGGED_IS: W1, sparx5: is2/es2, lan966x: is1/is2
* Sparx5: Set if frame was received with a VLAN tag, LAN966x: Set if frame has
* one or more Q-tags. Independent of port VLAN awareness
* VCAP_KF_8021Q_VLAN_TAGS: W3, sparx5: is0
@@ -120,9 +134,9 @@ enum vcap_keyfield_set {
* Class of service
* VCAP_KF_ES0_ISDX_KEY_ENA: W1, sparx5: es2
* The value taken from the IFH .FWD.ES0_ISDX_KEY_ENA
- * VCAP_KF_ETYPE: W16, sparx5: is0/is2/es2, lan966x: is2
+ * VCAP_KF_ETYPE: W16, sparx5: is0/is2/es2, lan966x: is1/is2
* Ethernet type
- * VCAP_KF_ETYPE_LEN_IS: W1, sparx5: is0/is2/es2
+ * VCAP_KF_ETYPE_LEN_IS: W1, sparx5: is0/is2/es2, lan966x: is1
* Set if frame has EtherType >= 0x600
* VCAP_KF_HOST_MATCH: W1, lan966x: is2
* The action from the SMAC_SIP4 or SMAC_SIP6 lookups. Used for IP source
@@ -134,11 +148,12 @@ enum vcap_keyfield_set {
* CPU queue)
* VCAP_KF_IF_EGR_PORT_NO: W7, sparx5: es0
* Egress port number
- * VCAP_KF_IF_IGR_PORT: sparx5 is0 W7, sparx5 es2 W9, lan966x is2 W4
+ * VCAP_KF_IF_IGR_PORT: sparx5 is0 W7, sparx5 es2 W9, lan966x is1 W3, lan966x
+ * is2 W4
* Sparx5: Logical ingress port number retrieved from
* ANA_CL::PORT_ID_CFG.LPORT_NUM or ERLEG, LAN966x: ingress port nunmber
* VCAP_KF_IF_IGR_PORT_MASK: sparx5 is0 W65, sparx5 is2 W32, sparx5 is2 W65,
- * lan966x is2 W9
+ * lan966x is1 W9, lan966x is2 W9
* Ingress port mask, one bit per port/erleg
* VCAP_KF_IF_IGR_PORT_MASK_L3: W1, sparx5: is2
* If set, IF_IGR_PORT_MASK, IF_IGR_PORT_MASK_RNG, and IF_IGR_PORT_MASK_SEL are
@@ -151,24 +166,26 @@ enum vcap_keyfield_set {
* Mapping: 0: DEFAULT 1: LOOPBACK 2: MASQUERADE 3: CPU_VD
* VCAP_KF_IF_IGR_PORT_SEL: W1, sparx5: es2
* Selector for IF_IGR_PORT: physical port number or ERLEG
- * VCAP_KF_IP4_IS: W1, sparx5: is0/is2/es2, lan966x: is2
+ * VCAP_KF_IP4_IS: W1, sparx5: is0/is2/es2, lan966x: is1/is2
* Set if frame has EtherType = 0x800 and IP version = 4
- * VCAP_KF_IP_MC_IS: W1, sparx5: is0
+ * VCAP_KF_IP_MC_IS: W1, sparx5: is0, lan966x: is1
* Set if frame is IPv4 frame and frame's destination MAC address is an IPv4
* multicast address (0x01005E0 /25). Set if frame is IPv6 frame and frame's
* destination MAC address is an IPv6 multicast address (0x3333/16).
- * VCAP_KF_IP_PAYLOAD_5TUPLE: W32, sparx5: is0
+ * VCAP_KF_IP_PAYLOAD_5TUPLE: W32, sparx5: is0, lan966x: is1
* Payload bytes after IP header
- * VCAP_KF_IP_SNAP_IS: W1, sparx5: is0
+ * VCAP_KF_IP_PAYLOAD_S1_IP6: W112, lan966x: is1
+ * Payload after IPv6 header
+ * VCAP_KF_IP_SNAP_IS: W1, sparx5: is0, lan966x: is1
* Set if frame is IPv4, IPv6, or SNAP frame
* VCAP_KF_ISDX_CLS: W12, sparx5: is2/es0/es2
* Classified ISDX
* VCAP_KF_ISDX_GT0_IS: W1, sparx5: is2/es0/es2, lan966x: is2
* Set if classified ISDX > 0
- * VCAP_KF_L2_BC_IS: W1, sparx5: is0/is2/es2, lan966x: is2
+ * VCAP_KF_L2_BC_IS: W1, sparx5: is0/is2/es2, lan966x: is1/is2
* Set if frame's destination MAC address is the broadcast address
* (FF-FF-FF-FF-FF-FF).
- * VCAP_KF_L2_DMAC: W48, sparx5: is0/is2/es2, lan966x: is2
+ * VCAP_KF_L2_DMAC: W48, sparx5: is0/is2/es2, lan966x: is1/is2
* Destination MAC address
* VCAP_KF_L2_FRM_TYPE: W4, lan966x: is2
* Frame subtype for specific EtherTypes (MRP, DLR)
@@ -176,7 +193,9 @@ enum vcap_keyfield_set {
* Set if the frame is allowed to be forwarded to front ports
* VCAP_KF_L2_LLC: W40, lan966x: is2
* LLC header and data after up to two VLAN tags and the type/length field
- * VCAP_KF_L2_MC_IS: W1, sparx5: is0/is2/es2, lan966x: is2
+ * VCAP_KF_L2_MAC: W48, lan966x: is1
+ * MAC address (FIRST=1: SMAC, FIRST=0: DMAC)
+ * VCAP_KF_L2_MC_IS: W1, sparx5: is0/is2/es2, lan966x: is1/is2
* Set if frame's destination MAC address is a multicast address (bit 40 = 1).
* VCAP_KF_L2_PAYLOAD0: W16, lan966x: is2
* Payload bytes 0-1 after the frame's EtherType
@@ -188,7 +207,7 @@ enum vcap_keyfield_set {
* specifically for PTP frames.
* VCAP_KF_L2_PAYLOAD_ETYPE: W64, sparx5: is2/es2
* Byte 0-7 of L2 payload after Type/Len field and overloading for OAM
- * VCAP_KF_L2_SMAC: W48, sparx5: is0/is2/es2, lan966x: is2
+ * VCAP_KF_L2_SMAC: W48, sparx5: is0/is2/es2, lan966x: is1/is2
* Source MAC address
* VCAP_KF_L2_SNAP: W40, lan966x: is2
* SNAP header after LLC header (AA-AA-03)
@@ -196,32 +215,38 @@ enum vcap_keyfield_set {
* Set if Src IP matches Dst IP address
* VCAP_KF_L3_DPL_CLS: W1, sparx5: es0/es2
* The frames drop precedence level
- * VCAP_KF_L3_DSCP: W6, sparx5: is0
+ * VCAP_KF_L3_DSCP: W6, sparx5: is0, lan966x: is1
* Frame's DSCP value
* VCAP_KF_L3_DST_IS: W1, sparx5: is2
* Set if lookup is done for egress router leg
- * VCAP_KF_L3_FRAGMENT: W1, lan966x: is2
+ * VCAP_KF_L3_FRAGMENT: W1, lan966x: is1/is2
* Set if IPv4 frame is fragmented
* VCAP_KF_L3_FRAGMENT_TYPE: W2, sparx5: is0/is2/es2
* L3 Fragmentation type (none, initial, suspicious, valid follow up)
* VCAP_KF_L3_FRAG_INVLD_L4_LEN: W1, sparx5: is0/is2
* Set if frame's L4 length is less than ANA_CL:COMMON:CLM_FRAGMENT_CFG.L4_MIN_L
* EN
- * VCAP_KF_L3_FRAG_OFS_GT0: W1, lan966x: is2
+ * VCAP_KF_L3_FRAG_OFS_GT0: W1, lan966x: is1/is2
* Set if IPv4 frame is fragmented and it is not the first fragment
- * VCAP_KF_L3_IP4_DIP: W32, sparx5: is0/is2/es2, lan966x: is2
+ * VCAP_KF_L3_IP4_DIP: W32, sparx5: is0/is2/es2, lan966x: is1/is2
* Destination IPv4 Address
- * VCAP_KF_L3_IP4_SIP: W32, sparx5: is0/is2/es2, lan966x: is2
+ * VCAP_KF_L3_IP4_SIP: W32, sparx5: is0/is2/es2, lan966x: is1/is2
* Source IPv4 Address
- * VCAP_KF_L3_IP6_DIP: W128, sparx5: is0/is2/es2, lan966x: is2
+ * VCAP_KF_L3_IP6_DIP: sparx5 is0 W128, sparx5 is2 W128, sparx5 es2 W128,
+ * lan966x is1 W64, lan966x is1 W128, lan966x is2 W128
* Sparx5: Full IPv6 DIP, LAN966x: Either Full IPv6 DIP or a subset depending on
* frame type
- * VCAP_KF_L3_IP6_SIP: W128, sparx5: is0/is2/es2, lan966x: is2
+ * VCAP_KF_L3_IP6_DIP_MSB: W16, lan966x: is1
+ * MS 16bits of IPv6 DIP
+ * VCAP_KF_L3_IP6_SIP: sparx5 is0 W128, sparx5 is2 W128, sparx5 es2 W128,
+ * lan966x is1 W128, lan966x is1 W64, lan966x is2 W128
* Sparx5: Full IPv6 SIP, LAN966x: Either Full IPv6 SIP or a subset depending on
* frame type
- * VCAP_KF_L3_IP_PROTO: W8, sparx5: is0/is2/es2, lan966x: is2
+ * VCAP_KF_L3_IP6_SIP_MSB: W16, lan966x: is1
+ * MS 16bits of IPv6 DIP
+ * VCAP_KF_L3_IP_PROTO: W8, sparx5: is0/is2/es2, lan966x: is1/is2
* IPv4 frames: IP protocol. IPv6 frames: Next header, same as for IPV4
- * VCAP_KF_L3_OPTIONS_IS: W1, sparx5: is0/is2/es2, lan966x: is2
+ * VCAP_KF_L3_OPTIONS_IS: W1, sparx5: is0/is2/es2, lan966x: is1/is2
* Set if IPv4 frame contains options (IP len > 5)
* VCAP_KF_L3_PAYLOAD: sparx5 is2 W96, sparx5 is2 W40, sparx5 es2 W96, sparx5
* es2 W40, lan966x is2 W56
@@ -254,7 +279,8 @@ enum vcap_keyfield_set {
* VCAP_KF_L4_PSH: W1, sparx5: is2/es2, lan966x: is2
* Sparx5: TCP flag PSH, LAN966x: TCP: TCP flag PSH. PTP over UDP: flagField bit
* 1 (twoStepFlag)
- * VCAP_KF_L4_RNG: sparx5 is0 W8, sparx5 is2 W16, sparx5 es2 W16, lan966x is2 W8
+ * VCAP_KF_L4_RNG: sparx5 is0 W8, sparx5 is2 W16, sparx5 es2 W16, lan966x is1
+ * W8, lan966x is2 W8
* Range checker bitmask (one for each range checker). Input into range checkers
* is taken from classified results (VID, DSCP) and frame (SPORT, DPORT, ETYPE,
* outer VID, inner VID)
@@ -264,7 +290,7 @@ enum vcap_keyfield_set {
* VCAP_KF_L4_SEQUENCE_EQ0_IS: W1, sparx5: is2/es2, lan966x: is2
* Set if TCP sequence number is 0, LAN966x: Overlayed with PTP over UDP:
* messageType bit 0
- * VCAP_KF_L4_SPORT: W16, sparx5: is0/is2/es2, lan966x: is2
+ * VCAP_KF_L4_SPORT: W16, sparx5: is0/is2/es2, lan966x: is1/is2
* TCP/UDP source port
* VCAP_KF_L4_SPORT_EQ_DPORT_IS: W1, sparx5: is2/es2, lan966x: is2
* Set if UDP or TCP source port equals UDP or TCP destination port
@@ -274,13 +300,16 @@ enum vcap_keyfield_set {
* VCAP_KF_L4_URG: W1, sparx5: is2/es2, lan966x: is2
* Sparx5: TCP flag URG, LAN966x: TCP: TCP flag URG. PTP over UDP: flagField bit
* 7 (reserved)
- * VCAP_KF_LOOKUP_FIRST_IS: W1, sparx5: is0/is2/es2, lan966x: is2
+ * VCAP_KF_LOOKUP_FIRST_IS: W1, sparx5: is0/is2/es2, lan966x: is1/is2
* Selects between entries relevant for first and second lookup. Set for first
* lookup, cleared for second lookup.
* VCAP_KF_LOOKUP_GEN_IDX: W12, sparx5: is0
* Generic index - for chaining CLM instances
* VCAP_KF_LOOKUP_GEN_IDX_SEL: W2, sparx5: is0
* Select the mode of the Generic Index
+ * VCAP_KF_LOOKUP_INDEX: W2, lan966x: is1
+ * 0: First lookup, 1: Second lookup, 2: Third lookup, Similar to VCAP_KF_FIRST
+ * but with extra info
* VCAP_KF_LOOKUP_PAG: W8, sparx5: is2, lan966x: is2
* Classified Policy Association Group: chains rules from IS1/CLM to IS2
* VCAP_KF_MIRROR_PROBE: W2, sparx5: es2
@@ -303,14 +332,22 @@ enum vcap_keyfield_set {
* Set if frame's EtherType = 0x8902
* VCAP_KF_PROT_ACTIVE: W1, sparx5: es0/es2
* Protection is active
- * VCAP_KF_TCP_IS: W1, sparx5: is0/is2/es2, lan966x: is2
+ * VCAP_KF_RT_FRMID: W32, lan966x: is1
+ * Profinet or OPC-UA FrameId
+ * VCAP_KF_RT_TYPE: W2, lan966x: is1
+ * Encoding of frame's EtherType: 0: Other, 1: Profinet, 2: OPC-UA, 3: Custom
+ * (ANA::RT_CUSTOM)
+ * VCAP_KF_RT_VLAN_IDX: W3, lan966x: is1
+ * Real-time VLAN index from ANA::RT_VLAN_PCP
+ * VCAP_KF_TCP_IS: W1, sparx5: is0/is2/es2, lan966x: is1/is2
* Set if frame is IPv4 TCP frame (IP protocol = 6) or IPv6 TCP frames (Next
* header = 6)
- * VCAP_KF_TCP_UDP_IS: W1, sparx5: is0/is2/es2
+ * VCAP_KF_TCP_UDP_IS: W1, sparx5: is0/is2/es2, lan966x: is1
* Set if frame is IPv4/IPv6 TCP or UDP frame (IP protocol/next header equals 6
* or 17)
* VCAP_KF_TYPE: sparx5 is0 W2, sparx5 is0 W1, sparx5 is2 W4, sparx5 is2 W2,
- * sparx5 es0 W1, sparx5 es2 W3, lan966x is2 W4, lan966x is2 W2
+ * sparx5 es0 W1, sparx5 es2 W3, lan966x is1 W1, lan966x is1 W2, lan966x is2 W4,
+ * lan966x is2 W2
* Keyset type id - set by the API
*/
@@ -323,6 +360,7 @@ enum vcap_key_field {
VCAP_KF_8021BR_GRP,
VCAP_KF_8021BR_IGR_ECID_BASE,
VCAP_KF_8021BR_IGR_ECID_EXT,
+ VCAP_KF_8021CB_R_TAGGED_IS,
VCAP_KF_8021Q_DEI0,
VCAP_KF_8021Q_DEI1,
VCAP_KF_8021Q_DEI2,
@@ -339,6 +377,7 @@ enum vcap_key_field {
VCAP_KF_8021Q_VID1,
VCAP_KF_8021Q_VID2,
VCAP_KF_8021Q_VID_CLS,
+ VCAP_KF_8021Q_VLAN_DBL_TAGGED_IS,
VCAP_KF_8021Q_VLAN_TAGGED_IS,
VCAP_KF_8021Q_VLAN_TAGS,
VCAP_KF_ACL_GRP_ID,
@@ -366,6 +405,7 @@ enum vcap_key_field {
VCAP_KF_IP4_IS,
VCAP_KF_IP_MC_IS,
VCAP_KF_IP_PAYLOAD_5TUPLE,
+ VCAP_KF_IP_PAYLOAD_S1_IP6,
VCAP_KF_IP_SNAP_IS,
VCAP_KF_ISDX_CLS,
VCAP_KF_ISDX_GT0_IS,
@@ -374,6 +414,7 @@ enum vcap_key_field {
VCAP_KF_L2_FRM_TYPE,
VCAP_KF_L2_FWD_IS,
VCAP_KF_L2_LLC,
+ VCAP_KF_L2_MAC,
VCAP_KF_L2_MC_IS,
VCAP_KF_L2_PAYLOAD0,
VCAP_KF_L2_PAYLOAD1,
@@ -392,7 +433,9 @@ enum vcap_key_field {
VCAP_KF_L3_IP4_DIP,
VCAP_KF_L3_IP4_SIP,
VCAP_KF_L3_IP6_DIP,
+ VCAP_KF_L3_IP6_DIP_MSB,
VCAP_KF_L3_IP6_SIP,
+ VCAP_KF_L3_IP6_SIP_MSB,
VCAP_KF_L3_IP_PROTO,
VCAP_KF_L3_OPTIONS_IS,
VCAP_KF_L3_PAYLOAD,
@@ -416,6 +459,7 @@ enum vcap_key_field {
VCAP_KF_LOOKUP_FIRST_IS,
VCAP_KF_LOOKUP_GEN_IDX,
VCAP_KF_LOOKUP_GEN_IDX_SEL,
+ VCAP_KF_LOOKUP_INDEX,
VCAP_KF_LOOKUP_PAG,
VCAP_KF_MIRROR_PROBE,
VCAP_KF_OAM_CCM_CNTS_EQ0,
@@ -427,6 +471,9 @@ enum vcap_key_field {
VCAP_KF_OAM_VER,
VCAP_KF_OAM_Y1731_IS,
VCAP_KF_PROT_ACTIVE,
+ VCAP_KF_RT_FRMID,
+ VCAP_KF_RT_TYPE,
+ VCAP_KF_RT_VLAN_IDX,
VCAP_KF_TCP_IS,
VCAP_KF_TCP_UDP_IS,
VCAP_KF_TYPE,
@@ -440,6 +487,7 @@ enum vcap_actionfield_set {
VCAP_AFS_CLASS_REDUCED, /* sparx5 is0 X1 */
VCAP_AFS_ES0, /* sparx5 es0 X1 */
VCAP_AFS_FULL, /* sparx5 is0 X3 */
+ VCAP_AFS_S1, /* lan966x is1 X1 */
VCAP_AFS_SMAC_SIP, /* lan966x is2 X1 */
};
@@ -470,23 +518,31 @@ enum vcap_actionfield_set {
* CPU extraction queue. Used when FWD_SEL >0 and PIPELINE_ACT = XTR.
* VCAP_AF_CPU_QUEUE_NUM: W3, sparx5: is2/es2, lan966x: is2
* CPU queue number. Used when CPU_COPY_ENA is set.
+ * VCAP_AF_CUSTOM_ACE_TYPE_ENA: W4, lan966x: is1
+ * Enables use of custom keys in IS2. Bits 3:2 control second lookup in IS2
+ * while bits 1:0 control first lookup. Encoding per lookup: 0: Disabled. 1:
+ * Extract 40 bytes after position corresponding to the location of the IPv4
+ * header and use as key. 2: Extract 40 bytes after SMAC and use as key
* VCAP_AF_DEI_A_VAL: W1, sparx5: es0
* DEI used in ES0 tag A. See TAG_A_DEI_SEL.
* VCAP_AF_DEI_B_VAL: W1, sparx5: es0
* DEI used in ES0 tag B. See TAG_B_DEI_SEL.
* VCAP_AF_DEI_C_VAL: W1, sparx5: es0
* DEI used in ES0 tag C. See TAG_C_DEI_SEL.
- * VCAP_AF_DEI_ENA: W1, sparx5: is0
+ * VCAP_AF_DEI_ENA: W1, sparx5: is0, lan966x: is1
* If set, use DEI_VAL as classified DEI value. Otherwise, DEI from basic
* classification is used
- * VCAP_AF_DEI_VAL: W1, sparx5: is0
+ * VCAP_AF_DEI_VAL: W1, sparx5: is0, lan966x: is1
* See DEI_ENA
- * VCAP_AF_DP_ENA: W1, sparx5: is0
+ * VCAP_AF_DLR_SEL: W2, lan966x: is1
+ * 0: No changes to port-based selection in ANA:PORT:OAM_CFG.DLR_ENA. 1: Enable
+ * DLR frame processing 2: Disable DLR processing
+ * VCAP_AF_DP_ENA: W1, sparx5: is0, lan966x: is1
* If set, use DP_VAL as classified drop precedence level. Otherwise, drop
* precedence level from basic classification is used.
- * VCAP_AF_DP_VAL: W2, sparx5: is0
+ * VCAP_AF_DP_VAL: sparx5 is0 W2, lan966x is1 W1
* See DP_ENA.
- * VCAP_AF_DSCP_ENA: W1, sparx5: is0
+ * VCAP_AF_DSCP_ENA: W1, sparx5: is0, lan966x: is1
* If set, use DSCP_VAL as classified DSCP value. Otherwise, DSCP value from
* basic classification is used.
* VCAP_AF_DSCP_SEL: W3, sparx5: es0
@@ -495,7 +551,7 @@ enum vcap_actionfield_set {
* table 0, otherwise use DSCP_VAL. 5: Mapped using mapping table 1, otherwise
* use mapping table 0. 6: Mapped using mapping table 2, otherwise use DSCP_VAL.
* 7: Mapped using mapping table 3, otherwise use mapping table 2
- * VCAP_AF_DSCP_VAL: W6, sparx5: is0/es0
+ * VCAP_AF_DSCP_VAL: W6, sparx5: is0/es0, lan966x: is1
* See DSCP_ENA.
* VCAP_AF_ES2_REW_CMD: W3, sparx5: es2
* Command forwarded to REW: 0: No action. 1: SWAP MAC addresses. 2: Do L2CP
@@ -529,9 +585,16 @@ enum vcap_actionfield_set {
* VCAP_AF_ISDX_ADD_REPLACE_SEL: W1, sparx5: is0
* Controls the classified ISDX. 0: New ISDX = old ISDX + ISDX_VAL. 1: New ISDX
* = ISDX_VAL.
+ * VCAP_AF_ISDX_ADD_VAL: W8, lan966x: is1
+ * If ISDX_REPLACE_ENA is set, ISDX_ADD_VAL is used directly as the new ISDX.
+ * Encoding: ISDX_REPLACE_ENA=0, ISDX_ADD_VAL=0: Disabled ISDX_EPLACE_ENA=0,
+ * ISDX_ADD_VAL>0: Add value to classified ISDX. ISDX_REPLACE_ENA=1: Replace
+ * with ISDX_ADD_VAL value.
* VCAP_AF_ISDX_ENA: W1, lan966x: is2
* Setting this bit to 1 causes the classified ISDX to be set to the value of
* POLICE_IDX[8:0].
+ * VCAP_AF_ISDX_REPLACE_ENA: W1, lan966x: is1
+ * If set, classified ISDX is set to ISDX_ADD_VAL.
* VCAP_AF_ISDX_VAL: W12, sparx5: is0
* See isdx_add_replace_sel
* VCAP_AF_LOOP_ENA: W1, sparx5: es0
@@ -572,14 +635,22 @@ enum vcap_actionfield_set {
* VCAP_AF_MIRROR_PROBE_ID: W2, sparx5: es2
* Signals a mirror probe to be placed in the IFH. Only possible when FWD_MODE
* is copy. 0: No mirroring. 1-3: Use mirror probe 0-2.
+ * VCAP_AF_MRP_SEL: W2, lan966x: is1
+ * 0: No changes to port-based selection in ANA:PORT:OAM_CFG.MRP_ENA. 1: Enable
+ * MRP frame processing 2: Disable MRP processing
* VCAP_AF_NXT_IDX: W12, sparx5: is0
* Index used as part of key (field G_IDX) in the next lookup.
* VCAP_AF_NXT_IDX_CTRL: W3, sparx5: is0
* Controls the generation of the G_IDX used in the VCAP CLM next lookup
- * VCAP_AF_PAG_OVERRIDE_MASK: W8, sparx5: is0
+ * VCAP_AF_OAM_SEL: W3, lan966x: is1
+ * 0: No changes to port-based selection in ANA:PORT:OAM_CFG.OAM_CFG 1: Enable
+ * OAM frame processing for untagged frames 2: Enable OAM frame processing for
+ * single frames 3: Enable OAM frame processing for double frames 4: Disable OAM
+ * frame processing
+ * VCAP_AF_PAG_OVERRIDE_MASK: W8, sparx5: is0, lan966x: is1
* Bits set in this mask will override PAG_VAL from port profile. New PAG = (PAG
* (input) AND ~PAG_OVERRIDE_MASK) OR (PAG_VAL AND PAG_OVERRIDE_MASK)
- * VCAP_AF_PAG_VAL: W8, sparx5: is0
+ * VCAP_AF_PAG_VAL: W8, sparx5: is0, lan966x: is1
* See PAG_OVERRIDE_MASK.
* VCAP_AF_PCP_A_VAL: W3, sparx5: es0
* PCP used in ES0 tag A. See TAG_A_PCP_SEL.
@@ -587,10 +658,10 @@ enum vcap_actionfield_set {
* PCP used in ES0 tag B. See TAG_B_PCP_SEL.
* VCAP_AF_PCP_C_VAL: W3, sparx5: es0
* PCP used in ES0 tag C. See TAG_C_PCP_SEL.
- * VCAP_AF_PCP_ENA: W1, sparx5: is0
+ * VCAP_AF_PCP_ENA: W1, sparx5: is0, lan966x: is1
* If set, use PCP_VAL as classified PCP value. Otherwise, PCP from basic
* classification is used.
- * VCAP_AF_PCP_VAL: W3, sparx5: is0
+ * VCAP_AF_PCP_VAL: W3, sparx5: is0, lan966x: is1
* See PCP_ENA.
* VCAP_AF_PIPELINE_ACT: W1, sparx5: es0
* Pipeline action when FWD_SEL > 0. 0: XTR. CPU_QU selects CPU extraction queue
@@ -600,11 +671,11 @@ enum vcap_actionfield_set {
* PIPELINE_PT == NONE. Overrules previous settings of pipeline point.
* VCAP_AF_PIPELINE_PT: sparx5 is2 W5, sparx5 es0 W2
* Pipeline point used if PIPELINE_FORCE_ENA is set
- * VCAP_AF_POLICE_ENA: W1, sparx5: is2/es2, lan966x: is2
- * Setting this bit to 1 causes frames that hit this action to be policed by the
- * ACL policer specified in POLICE_IDX. Only applies to the first lookup.
- * VCAP_AF_POLICE_IDX: sparx5 is2 W6, sparx5 es2 W6, lan966x is2 W9
- * Selects VCAP policer used when policing frames (POLICE_ENA)
+ * VCAP_AF_POLICE_ENA: W1, sparx5: is2/es2, lan966x: is1/is2
+ * If set, POLICE_IDX is used to lookup ANA::POL.
+ * VCAP_AF_POLICE_IDX: sparx5 is2 W6, sparx5 es2 W6, lan966x is1 W9, lan966x is2
+ * W9
+ * Policer index.
* VCAP_AF_POLICE_REMARK: W1, sparx5: es2
* If set, frames exceeding policer rates are marked as yellow but not
* discarded.
@@ -628,16 +699,24 @@ enum vcap_actionfield_set {
* port. 1: ES0 tag A: Push ES0 tag A. No port tag. 2: Force port tag: Always
* push port tag. No ES0 tag A. 3: Force untag: Never push port tag or ES0 tag
* A.
- * VCAP_AF_QOS_ENA: W1, sparx5: is0
+ * VCAP_AF_QOS_ENA: W1, sparx5: is0, lan966x: is1
* If set, use QOS_VAL as classified QoS class. Otherwise, QoS class from basic
* classification is used.
- * VCAP_AF_QOS_VAL: W3, sparx5: is0
+ * VCAP_AF_QOS_VAL: W3, sparx5: is0, lan966x: is1
* See QOS_ENA.
* VCAP_AF_REW_OP: W16, lan966x: is2
* Rewriter operation command.
* VCAP_AF_RT_DIS: W1, sparx5: is2
* If set, routing is disallowed. Only applies when IS_INNER_ACL is 0. See also
* IGR_ACL_ENA, EGR_ACL_ENA, and RLEG_STAT_IDX.
+ * VCAP_AF_SFID_ENA: W1, lan966x: is1
+ * If set, SFID_VAL is used to lookup ANA::SFID.
+ * VCAP_AF_SFID_VAL: W8, lan966x: is1
+ * Stream filter identifier.
+ * VCAP_AF_SGID_ENA: W1, lan966x: is1
+ * If set, SGID_VAL is used to lookup ANA::SGID.
+ * VCAP_AF_SGID_VAL: W8, lan966x: is1
+ * Stream gate identifier.
* VCAP_AF_SWAP_MACS_ENA: W1, sparx5: es0
* This setting is only active when FWD_SEL = 1 or FWD_SEL = 2 and PIPELINE_ACT
* = LBK_ASM. 0: No action. 1: Swap MACs and clear bit 40 in new SMAC.
@@ -686,7 +765,7 @@ enum vcap_actionfield_set {
* VCAP_AF_TAG_C_VID_SEL: W2, sparx5: es0
* Selects VID for ES0 tag C. The resulting VID is termed C-TAG.VID. 0:
* Classified VID. 1: VID_C_VAL. 2: IFH.ENCAP.GVID. 3: Reserved.
- * VCAP_AF_TYPE: W1, sparx5: is0
+ * VCAP_AF_TYPE: W1, sparx5: is0, lan966x: is1
* Actionset type id - Set by the API
* VCAP_AF_UNTAG_VID_ENA: W1, sparx5: es0
* Controls insertion of tag C. Untag or insert mode can be selected. See
@@ -697,8 +776,19 @@ enum vcap_actionfield_set {
* VID used in ES0 tag B. See TAG_B_VID_SEL.
* VCAP_AF_VID_C_VAL: W12, sparx5: es0
* VID used in ES0 tag C. See TAG_C_VID_SEL.
- * VCAP_AF_VID_VAL: W13, sparx5: is0
+ * VCAP_AF_VID_REPLACE_ENA: W1, lan966x: is1
+ * Controls the classified VID: VID_REPLACE_ENA=0: Add VID_ADD_VAL to basic
+ * classified VID and use result as new classified VID. VID_REPLACE_ENA = 1:
+ * Replace basic classified VID with VID_VAL value and use as new classified
+ * VID.
+ * VCAP_AF_VID_VAL: sparx5 is0 W13, lan966x is1 W12
* New VID Value
+ * VCAP_AF_VLAN_POP_CNT: W2, lan966x: is1
+ * See VLAN_POP_CNT_ENA
+ * VCAP_AF_VLAN_POP_CNT_ENA: W1, lan966x: is1
+ * If set, use VLAN_POP_CNT as the number of VLAN tags to pop from the incoming
+ * frame. This number is used by the Rewriter. Otherwise, VLAN_POP_CNT from
+ * ANA:PORT:VLAN_CFG.VLAN_POP_CNT is used
*/
/* Actionfield names */
@@ -712,11 +802,13 @@ enum vcap_action_field {
VCAP_AF_CPU_COPY_ENA,
VCAP_AF_CPU_QU,
VCAP_AF_CPU_QUEUE_NUM,
+ VCAP_AF_CUSTOM_ACE_TYPE_ENA,
VCAP_AF_DEI_A_VAL,
VCAP_AF_DEI_B_VAL,
VCAP_AF_DEI_C_VAL,
VCAP_AF_DEI_ENA,
VCAP_AF_DEI_VAL,
+ VCAP_AF_DLR_SEL,
VCAP_AF_DP_ENA,
VCAP_AF_DP_VAL,
VCAP_AF_DSCP_ENA,
@@ -732,7 +824,9 @@ enum vcap_action_field {
VCAP_AF_IGNORE_PIPELINE_CTRL,
VCAP_AF_INTR_ENA,
VCAP_AF_ISDX_ADD_REPLACE_SEL,
+ VCAP_AF_ISDX_ADD_VAL,
VCAP_AF_ISDX_ENA,
+ VCAP_AF_ISDX_REPLACE_ENA,
VCAP_AF_ISDX_VAL,
VCAP_AF_LOOP_ENA,
VCAP_AF_LRN_DIS,
@@ -745,8 +839,10 @@ enum vcap_action_field {
VCAP_AF_MIRROR_ENA,
VCAP_AF_MIRROR_PROBE,
VCAP_AF_MIRROR_PROBE_ID,
+ VCAP_AF_MRP_SEL,
VCAP_AF_NXT_IDX,
VCAP_AF_NXT_IDX_CTRL,
+ VCAP_AF_OAM_SEL,
VCAP_AF_PAG_OVERRIDE_MASK,
VCAP_AF_PAG_VAL,
VCAP_AF_PCP_A_VAL,
@@ -770,6 +866,10 @@ enum vcap_action_field {
VCAP_AF_QOS_VAL,
VCAP_AF_REW_OP,
VCAP_AF_RT_DIS,
+ VCAP_AF_SFID_ENA,
+ VCAP_AF_SFID_VAL,
+ VCAP_AF_SGID_ENA,
+ VCAP_AF_SGID_VAL,
VCAP_AF_SWAP_MACS_ENA,
VCAP_AF_TAG_A_DEI_SEL,
VCAP_AF_TAG_A_PCP_SEL,
@@ -788,7 +888,10 @@ enum vcap_action_field {
VCAP_AF_VID_A_VAL,
VCAP_AF_VID_B_VAL,
VCAP_AF_VID_C_VAL,
+ VCAP_AF_VID_REPLACE_ENA,
VCAP_AF_VID_VAL,
+ VCAP_AF_VLAN_POP_CNT,
+ VCAP_AF_VLAN_POP_CNT_ENA,
};
#endif /* __VCAP_AG_API__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c
index 4847d0d99ec9..5675b0962bc3 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c
@@ -976,6 +976,25 @@ int vcap_lookup_rule_by_cookie(struct vcap_control *vctrl, u64 cookie)
}
EXPORT_SYMBOL_GPL(vcap_lookup_rule_by_cookie);
+/* Get number of rules in a vcap instance lookup chain id range */
+int vcap_admin_rule_count(struct vcap_admin *admin, int cid)
+{
+ int max_cid = roundup(cid + 1, VCAP_CID_LOOKUP_SIZE);
+ int min_cid = rounddown(cid, VCAP_CID_LOOKUP_SIZE);
+ struct vcap_rule_internal *elem;
+ int count = 0;
+
+ list_for_each_entry(elem, &admin->rules, list) {
+ mutex_lock(&admin->lock);
+ if (elem->data.vcap_chain_id >= min_cid &&
+ elem->data.vcap_chain_id < max_cid)
+ ++count;
+ mutex_unlock(&admin->lock);
+ }
+ return count;
+}
+EXPORT_SYMBOL_GPL(vcap_admin_rule_count);
+
/* Make a copy of the rule, shallow or full */
static struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri,
bool full)
@@ -3403,6 +3422,25 @@ int vcap_rule_mod_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
}
EXPORT_SYMBOL_GPL(vcap_rule_mod_key_u32);
+/* Remove a key field with value and mask in the rule */
+int vcap_rule_rem_key(struct vcap_rule *rule, enum vcap_key_field key)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ struct vcap_client_keyfield *field;
+
+ field = vcap_find_keyfield(rule, key);
+ if (!field) {
+ pr_err("%s:%d: key %s is not in the rule\n",
+ __func__, __LINE__, vcap_keyfield_name(ri->vctrl, key));
+ return -EINVAL;
+ }
+ /* Deallocate the key field */
+ list_del(&field->ctrl.list);
+ kfree(field);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vcap_rule_rem_key);
+
static int vcap_rule_mod_action(struct vcap_rule *rule,
enum vcap_action_field action,
enum vcap_field_type ftype,
@@ -3475,6 +3513,29 @@ int vcap_filter_rule_keys(struct vcap_rule *rule,
}
EXPORT_SYMBOL_GPL(vcap_filter_rule_keys);
+/* Select the keyset from the list that results in the smallest rule size */
+enum vcap_keyfield_set
+vcap_select_min_rule_keyset(struct vcap_control *vctrl,
+ enum vcap_type vtype,
+ struct vcap_keyset_list *kslist)
+{
+ enum vcap_keyfield_set ret = VCAP_KFS_NO_VALUE;
+ const struct vcap_set *kset;
+ int max = 100, idx;
+
+ for (idx = 0; idx < kslist->cnt; ++idx) {
+ kset = vcap_keyfieldset(vctrl, vtype, kslist->keysets[idx]);
+ if (!kset)
+ continue;
+ if (kset->sw_per_item >= max)
+ continue;
+ max = kset->sw_per_item;
+ ret = kslist->keysets[idx];
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vcap_select_min_rule_keyset);
+
/* Make a full copy of an existing rule with a new rule id */
struct vcap_rule *vcap_copy_rule(struct vcap_rule *erule)
{
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
index 417af9754bcc..d9d1f7c9d762 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
@@ -201,6 +201,9 @@ int vcap_rule_add_action_bit(struct vcap_rule *rule,
int vcap_rule_add_action_u32(struct vcap_rule *rule,
enum vcap_action_field action, u32 value);
+/* Get number of rules in a vcap instance lookup chain id range */
+int vcap_admin_rule_count(struct vcap_admin *admin, int cid);
+
/* VCAP rule counter operations */
int vcap_get_rule_count_by_cookie(struct vcap_control *vctrl,
struct vcap_counter *ctr, u64 cookie);
@@ -269,6 +272,14 @@ int vcap_rule_mod_action_u32(struct vcap_rule *rule,
int vcap_rule_get_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
u32 *value, u32 *mask);
+/* Remove a key field with value and mask in the rule */
+int vcap_rule_rem_key(struct vcap_rule *rule, enum vcap_key_field key);
+
+/* Select the keyset from the list that results in the smallest rule size */
+enum vcap_keyfield_set
+vcap_select_min_rule_keyset(struct vcap_control *vctrl, enum vcap_type vtype,
+ struct vcap_keyset_list *kslist);
+
struct vcap_client_actionfield *
vcap_find_actionfield(struct vcap_rule *rule, enum vcap_action_field act);
#endif /* __VCAP_API_CLIENT__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
index 0de3f677135a..b23c11b0647c 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
@@ -387,7 +387,7 @@ static const char * const test_admin_info_expect[] = {
"default_cnt: 73\n",
"require_cnt_dis: 0\n",
"version: 1\n",
- "vtype: 3\n",
+ "vtype: 4\n",
"vinst: 0\n",
"ingress: 1\n",
"first_cid: 10000\n",
@@ -435,7 +435,7 @@ static const char * const test_admin_expect[] = {
"default_cnt: 73\n",
"require_cnt_dis: 0\n",
"version: 1\n",
- "vtype: 3\n",
+ "vtype: 4\n",
"vinst: 0\n",
"ingress: 1\n",
"first_cid: 8000000\n",
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index f9b8f372ec8a..8f3f78b68592 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1439,7 +1439,6 @@ free_gc:
release_region:
pci_release_regions(pdev);
disable_dev:
- pci_clear_master(pdev);
pci_disable_device(pdev);
dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err);
return err;
@@ -1458,7 +1457,6 @@ static void mana_gd_remove(struct pci_dev *pdev)
vfree(gc);
pci_release_regions(pdev);
- pci_clear_master(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 6120f2b6684f..492474b4d8aa 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -156,6 +156,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct mana_txq *txq;
struct mana_cq *cq;
int err, len;
+ u16 ihs;
if (unlikely(!apc->port_is_up))
goto tx_drop;
@@ -166,6 +167,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
txq = &apc->tx_qp[txq_idx].txq;
gdma_sq = txq->gdma_sq;
cq = &apc->tx_qp[txq_idx].tx_cq;
+ tx_stats = &txq->stats;
pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
@@ -179,10 +181,17 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
- if (pkt_fmt == MANA_SHORT_PKT_FMT)
+ if (pkt_fmt == MANA_SHORT_PKT_FMT) {
pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
- else
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->short_pkt_fmt++;
+ u64_stats_update_end(&tx_stats->syncp);
+ } else {
pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->long_pkt_fmt++;
+ u64_stats_update_end(&tx_stats->syncp);
+ }
pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
pkg.wqe_req.flags = 0;
@@ -232,9 +241,35 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
&ipv6_hdr(skb)->daddr, 0,
IPPROTO_TCP, 0);
}
+
+ if (skb->encapsulation) {
+ ihs = skb_inner_tcp_all_headers(skb);
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->tso_inner_packets++;
+ tx_stats->tso_inner_bytes += skb->len - ihs;
+ u64_stats_update_end(&tx_stats->syncp);
+ } else {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
+ } else {
+ ihs = skb_tcp_all_headers(skb);
+ if (ipv6_has_hopopt_jumbo(skb))
+ ihs -= sizeof(struct hop_jumbo_hdr);
+ }
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->tso_packets++;
+ tx_stats->tso_bytes += skb->len - ihs;
+ u64_stats_update_end(&tx_stats->syncp);
+ }
+
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
csum_type = mana_checksum_info(skb);
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->csum_partial++;
+ u64_stats_update_end(&tx_stats->syncp);
+
if (csum_type == IPPROTO_TCP) {
pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
@@ -254,8 +289,12 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
}
- if (mana_map_skb(skb, apc, &pkg))
+ if (mana_map_skb(skb, apc, &pkg)) {
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->mana_map_err++;
+ u64_stats_update_end(&tx_stats->syncp);
goto free_sgl_ptr;
+ }
skb_queue_tail(&txq->pending_skbs, skb);
@@ -1038,6 +1077,8 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
if (comp_read < 1)
return;
+ apc->eth_stats.tx_cqes = comp_read;
+
for (i = 0; i < comp_read; i++) {
struct mana_tx_comp_oob *cqe_oob;
@@ -1064,6 +1105,7 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
case CQE_TX_VLAN_TAGGING_VIOLATION:
WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
cqe_oob->cqe_hdr.cqe_type);
+ apc->eth_stats.tx_cqe_err++;
break;
default:
@@ -1072,6 +1114,7 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
*/
WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
cqe_oob->cqe_hdr.cqe_type);
+ apc->eth_stats.tx_cqe_unknown_type++;
return;
}
@@ -1118,6 +1161,8 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
WARN_ON_ONCE(1);
cq->work_done = pkt_transmitted;
+
+ apc->eth_stats.tx_cqes -= pkt_transmitted;
}
static void mana_post_pkt_rxq(struct mana_rxq *rxq)
@@ -1252,12 +1297,15 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
struct net_device *ndev = rxq->ndev;
struct mana_recv_buf_oob *rxbuf_oob;
+ struct mana_port_context *apc;
struct device *dev = gc->dev;
void *new_buf, *old_buf;
struct page *new_page;
u32 curr, pktlen;
dma_addr_t da;
+ apc = netdev_priv(ndev);
+
switch (oob->cqe_hdr.cqe_type) {
case CQE_RX_OKAY:
break;
@@ -1270,6 +1318,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
case CQE_RX_COALESCED_4:
netdev_err(ndev, "RX coalescing is unsupported\n");
+ apc->eth_stats.rx_coalesced_err++;
return;
case CQE_RX_OBJECT_FENCE:
@@ -1279,6 +1328,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
default:
netdev_err(ndev, "Unknown RX CQE type = %d\n",
oob->cqe_hdr.cqe_type);
+ apc->eth_stats.rx_cqe_unknown_type++;
return;
}
@@ -1341,11 +1391,15 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
{
struct gdma_comp *comp = cq->gdma_comp_buf;
struct mana_rxq *rxq = cq->rxq;
+ struct mana_port_context *apc;
int comp_read, i;
+ apc = netdev_priv(rxq->ndev);
+
comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
+ apc->eth_stats.rx_cqes = comp_read;
rxq->xdp_flush = false;
for (i = 0; i < comp_read; i++) {
@@ -1357,6 +1411,8 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
return;
mana_process_rx_cqe(rxq, cq, &comp[i]);
+
+ apc->eth_stats.rx_cqes--;
}
if (rxq->xdp_flush)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index 5b776a33a817..a64c81410dc1 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -13,6 +13,15 @@ static const struct {
} mana_eth_stats[] = {
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
+ {"tx_cqes", offsetof(struct mana_ethtool_stats, tx_cqes)},
+ {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
+ {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
+ tx_cqe_unknown_type)},
+ {"rx_cqes", offsetof(struct mana_ethtool_stats, rx_cqes)},
+ {"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
+ rx_coalesced_err)},
+ {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
+ rx_cqe_unknown_type)},
};
static int mana_get_sset_count(struct net_device *ndev, int stringset)
@@ -23,7 +32,8 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset)
if (stringset != ETH_SS_STATS)
return -EINVAL;
- return ARRAY_SIZE(mana_eth_stats) + num_queues * 8;
+ return ARRAY_SIZE(mana_eth_stats) + num_queues *
+ (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
}
static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
@@ -61,6 +71,22 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
sprintf(p, "tx_%d_xdp_xmit", i);
p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_%d_tso_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_%d_tso_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_%d_tso_inner_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_%d_tso_inner_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_%d_long_pkt_fmt", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_%d_short_pkt_fmt", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_%d_csum_partial", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_%d_mana_map_err", i);
+ p += ETH_GSTRING_LEN;
}
}
@@ -78,6 +104,14 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
u64 xdp_xmit;
u64 xdp_drop;
u64 xdp_tx;
+ u64 tso_packets;
+ u64 tso_bytes;
+ u64 tso_inner_packets;
+ u64 tso_inner_bytes;
+ u64 long_pkt_fmt;
+ u64 short_pkt_fmt;
+ u64 csum_partial;
+ u64 mana_map_err;
int q, i = 0;
if (!apc->port_is_up)
@@ -113,11 +147,27 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
packets = tx_stats->packets;
bytes = tx_stats->bytes;
xdp_xmit = tx_stats->xdp_xmit;
+ tso_packets = tx_stats->tso_packets;
+ tso_bytes = tx_stats->tso_bytes;
+ tso_inner_packets = tx_stats->tso_inner_packets;
+ tso_inner_bytes = tx_stats->tso_inner_bytes;
+ long_pkt_fmt = tx_stats->long_pkt_fmt;
+ short_pkt_fmt = tx_stats->short_pkt_fmt;
+ csum_partial = tx_stats->csum_partial;
+ mana_map_err = tx_stats->mana_map_err;
} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
data[i++] = xdp_xmit;
+ data[i++] = tso_packets;
+ data[i++] = tso_bytes;
+ data[i++] = tso_inner_packets;
+ data[i++] = tso_inner_bytes;
+ data[i++] = long_pkt_fmt;
+ data[i++] = short_pkt_fmt;
+ data[i++] = csum_partial;
+ data[i++] = mana_map_err;
}
}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 08acb7b89086..1502bb2c8ea7 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -7,6 +7,8 @@
#include <linux/dsa/ocelot.h>
#include <linux/if_bridge.h>
#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
+#include <soc/mscc/ocelot_hsio.h>
#include <soc/mscc/ocelot_vcap.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
@@ -211,6 +213,36 @@ static void ocelot_mact_init(struct ocelot *ocelot)
ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
}
+void ocelot_pll5_init(struct ocelot *ocelot)
+{
+ /* Configure PLL5. This will need a proper CCF driver
+ * The values are coming from the VTSS API for Ocelot
+ */
+ regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4,
+ HSIO_PLL5G_CFG4_IB_CTRL(0x7600) |
+ HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8));
+ regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0,
+ HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) |
+ HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) |
+ HSIO_PLL5G_CFG0_ENA_BIAS |
+ HSIO_PLL5G_CFG0_ENA_VCO_BUF |
+ HSIO_PLL5G_CFG0_ENA_CP1 |
+ HSIO_PLL5G_CFG0_SELCPI(2) |
+ HSIO_PLL5G_CFG0_LOOP_BW_RES(0xe) |
+ HSIO_PLL5G_CFG0_SELBGV820(4) |
+ HSIO_PLL5G_CFG0_DIV4 |
+ HSIO_PLL5G_CFG0_ENA_CLKTREE |
+ HSIO_PLL5G_CFG0_ENA_LANE);
+ regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2,
+ HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET |
+ HSIO_PLL5G_CFG2_EN_RESET_OVERRUN |
+ HSIO_PLL5G_CFG2_GAIN_TEST(0x8) |
+ HSIO_PLL5G_CFG2_ENA_AMPCTRL |
+ HSIO_PLL5G_CFG2_PWD_AMPCTRL_N |
+ HSIO_PLL5G_CFG2_AMPC_SEL(0x10));
+}
+EXPORT_SYMBOL(ocelot_pll5_init);
+
static void ocelot_vcap_enable(struct ocelot *ocelot, int port)
{
ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA |
@@ -778,6 +810,71 @@ static int ocelot_port_flush(struct ocelot *ocelot, int port)
return err;
}
+int ocelot_port_configure_serdes(struct ocelot *ocelot, int port,
+ struct device_node *portnp)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct device *dev = ocelot->dev;
+ int err;
+
+ /* Ensure clock signals and speed are set on all QSGMII links */
+ if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_QSGMII)
+ ocelot_port_rmwl(ocelot_port, 0,
+ DEV_CLOCK_CFG_MAC_TX_RST |
+ DEV_CLOCK_CFG_MAC_RX_RST,
+ DEV_CLOCK_CFG);
+
+ if (ocelot_port->phy_mode != PHY_INTERFACE_MODE_INTERNAL) {
+ struct phy *serdes = of_phy_get(portnp, NULL);
+
+ if (IS_ERR(serdes)) {
+ err = PTR_ERR(serdes);
+ dev_err_probe(dev, err,
+ "missing SerDes phys for port %d\n",
+ port);
+ return err;
+ }
+
+ err = phy_set_mode_ext(serdes, PHY_MODE_ETHERNET,
+ ocelot_port->phy_mode);
+ of_phy_put(serdes);
+ if (err) {
+ dev_err(dev, "Could not SerDes mode on port %d: %pe\n",
+ port, ERR_PTR(err));
+ return err;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_configure_serdes);
+
+void ocelot_phylink_mac_config(struct ocelot *ocelot, int port,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ /* Disable HDX fast control */
+ ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS,
+ DEV_PORT_MISC);
+
+ /* SGMII only for now */
+ ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA,
+ PCS1G_MODE_CFG);
+ ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
+
+ /* Enable PCS */
+ ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
+
+ /* No aneg on SGMII */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG);
+
+ /* No loopback */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
+}
+EXPORT_SYMBOL_GPL(ocelot_phylink_mac_config);
+
void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port,
unsigned int link_an_mode,
phy_interface_t interface,
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index ca4bde861397..21a87a3fc556 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -1675,25 +1675,10 @@ static void vsc7514_phylink_mac_config(struct phylink_config *config,
{
struct net_device *ndev = to_net_dev(config->dev);
struct ocelot_port_private *priv = netdev_priv(ndev);
- struct ocelot_port *ocelot_port = &priv->port;
-
- /* Disable HDX fast control */
- ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS,
- DEV_PORT_MISC);
-
- /* SGMII only for now */
- ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA,
- PCS1G_MODE_CFG);
- ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
-
- /* Enable PCS */
- ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
-
- /* No aneg on SGMII */
- ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->port.index;
- /* No loopback */
- ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
+ ocelot_phylink_mac_config(ocelot, port, link_an_mode, state);
}
static void vsc7514_phylink_mac_link_down(struct phylink_config *config,
@@ -1757,34 +1742,11 @@ static int ocelot_port_phylink_create(struct ocelot *ocelot, int port,
return -EINVAL;
}
- /* Ensure clock signals and speed are set on all QSGMII links */
- if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
- ocelot_port_rmwl(ocelot_port, 0,
- DEV_CLOCK_CFG_MAC_TX_RST |
- DEV_CLOCK_CFG_MAC_RX_RST,
- DEV_CLOCK_CFG);
-
ocelot_port->phy_mode = phy_mode;
- if (phy_mode != PHY_INTERFACE_MODE_INTERNAL) {
- struct phy *serdes = of_phy_get(portnp, NULL);
-
- if (IS_ERR(serdes)) {
- err = PTR_ERR(serdes);
- dev_err_probe(dev, err,
- "missing SerDes phys for port %d\n",
- port);
- return err;
- }
-
- err = phy_set_mode_ext(serdes, PHY_MODE_ETHERNET, phy_mode);
- of_phy_put(serdes);
- if (err) {
- dev_err(dev, "Could not SerDes mode on port %d: %pe\n",
- port, ERR_PTR(err));
- return err;
- }
- }
+ err = ocelot_port_configure_serdes(ocelot, port, portnp);
+ if (err)
+ return err;
priv = container_of(ocelot_port, struct ocelot_port_private, port);
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 7388c3b0535c..97e90e2869d4 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -18,7 +18,6 @@
#include <soc/mscc/ocelot.h>
#include <soc/mscc/ocelot_vcap.h>
-#include <soc/mscc/ocelot_hsio.h>
#include <soc/mscc/vsc7514_regs.h>
#include "ocelot_fdma.h"
#include "ocelot.h"
@@ -26,35 +25,6 @@
#define VSC7514_VCAP_POLICER_BASE 128
#define VSC7514_VCAP_POLICER_MAX 191
-static void ocelot_pll5_init(struct ocelot *ocelot)
-{
- /* Configure PLL5. This will need a proper CCF driver
- * The values are coming from the VTSS API for Ocelot
- */
- regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4,
- HSIO_PLL5G_CFG4_IB_CTRL(0x7600) |
- HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8));
- regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0,
- HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) |
- HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) |
- HSIO_PLL5G_CFG0_ENA_BIAS |
- HSIO_PLL5G_CFG0_ENA_VCO_BUF |
- HSIO_PLL5G_CFG0_ENA_CP1 |
- HSIO_PLL5G_CFG0_SELCPI(2) |
- HSIO_PLL5G_CFG0_LOOP_BW_RES(0xe) |
- HSIO_PLL5G_CFG0_SELBGV820(4) |
- HSIO_PLL5G_CFG0_DIV4 |
- HSIO_PLL5G_CFG0_ENA_CLKTREE |
- HSIO_PLL5G_CFG0_ENA_LANE);
- regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2,
- HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET |
- HSIO_PLL5G_CFG2_EN_RESET_OVERRUN |
- HSIO_PLL5G_CFG2_GAIN_TEST(0x8) |
- HSIO_PLL5G_CFG2_ENA_AMPCTRL |
- HSIO_PLL5G_CFG2_PWD_AMPCTRL_N |
- HSIO_PLL5G_CFG2_AMPC_SEL(0x10));
-}
-
static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
{
int ret;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index d23830b5bcb8..73032173ac4e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -55,9 +55,21 @@ static void *get_hashentry(struct rhashtable *ht, void *key,
bool is_pre_ct_flow(struct flow_cls_offload *flow)
{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct flow_dissector *dissector = rule->match.dissector;
struct flow_action_entry *act;
+ struct flow_match_ct ct;
int i;
+ if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
+ flow_rule_match_ct(rule, &ct);
+ if (ct.key->ct_state)
+ return false;
+ }
+
+ if (flow->common.chain_index)
+ return false;
+
flow_action_for_each(i, act, &flow->rule->action) {
if (act->id == FLOW_ACTION_CT) {
/* The pre_ct rule only have the ct or ct nat action, cannot
@@ -82,24 +94,23 @@ bool is_post_ct_flow(struct flow_cls_offload *flow)
struct flow_match_ct ct;
int i;
- /* post ct entry cannot contains any ct action except ct_clear. */
- flow_action_for_each(i, act, &flow->rule->action) {
- if (act->id == FLOW_ACTION_CT) {
- /* ignore ct clear action. */
- if (act->ct.action == TCA_CT_ACT_CLEAR) {
- exist_ct_clear = true;
- continue;
- }
-
- return false;
- }
- }
-
if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
flow_rule_match_ct(rule, &ct);
if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
return true;
} else {
+ /* post ct entry cannot contains any ct action except ct_clear. */
+ flow_action_for_each(i, act, &flow->rule->action) {
+ if (act->id == FLOW_ACTION_CT) {
+ /* ignore ct clear action. */
+ if (act->ct.action == TCA_CT_ACT_CLEAR) {
+ exist_ct_clear = true;
+ continue;
+ }
+
+ return false;
+ }
+ }
/* when do nat with ct, the post ct entry ignore the ct status,
* will match the nat field(sip/dip) instead. In this situation,
* the flow chain index is not zero and contains ct clear action.
@@ -511,6 +522,21 @@ static int nfp_ct_check_vlan_merge(struct flow_action_entry *a_in,
return 0;
}
+/* Extra check for multiple ct-zones merge
+ * currently surpport nft entries merge check in different zones
+ */
+static int nfp_ct_merge_extra_check(struct nfp_fl_ct_flow_entry *nft_entry,
+ struct nfp_fl_ct_tc_merge *tc_m_entry)
+{
+ struct nfp_fl_nft_tc_merge *prev_nft_m_entry;
+ struct nfp_fl_ct_flow_entry *pre_ct_entry;
+
+ pre_ct_entry = tc_m_entry->pre_ct_parent;
+ prev_nft_m_entry = pre_ct_entry->prev_m_entries[pre_ct_entry->num_prev_m_entries - 1];
+
+ return nfp_ct_merge_check(prev_nft_m_entry->nft_parent, nft_entry);
+}
+
static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
struct nfp_fl_ct_flow_entry *post_ct_entry,
struct nfp_fl_ct_flow_entry *nft_entry)
@@ -682,34 +708,34 @@ static void nfp_fl_get_csum_flag(struct flow_action_entry *a_in, u8 ip_proto, u3
static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
struct nfp_flower_priv *priv,
struct net_device *netdev,
- struct nfp_fl_payload *flow_pay)
+ struct nfp_fl_payload *flow_pay,
+ int num_rules)
{
enum flow_action_hw_stats tmp_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
struct flow_action_entry *a_in;
- int i, j, num_actions, id;
+ int i, j, id, num_actions = 0;
struct flow_rule *a_rule;
int err = 0, offset = 0;
- num_actions = rules[CT_TYPE_PRE_CT]->action.num_entries +
- rules[CT_TYPE_NFT]->action.num_entries +
- rules[CT_TYPE_POST_CT]->action.num_entries;
+ for (i = 0; i < num_rules; i++)
+ num_actions += rules[i]->action.num_entries;
/* Add one action to make sure there is enough room to add an checksum action
* when do nat.
*/
- a_rule = flow_rule_alloc(num_actions + 1);
+ a_rule = flow_rule_alloc(num_actions + (num_rules / 2));
if (!a_rule)
return -ENOMEM;
- /* Actions need a BASIC dissector. */
- a_rule->match = rules[CT_TYPE_PRE_CT]->match;
/* post_ct entry have one action at least. */
- if (rules[CT_TYPE_POST_CT]->action.num_entries != 0) {
- tmp_stats = rules[CT_TYPE_POST_CT]->action.entries[0].hw_stats;
- }
+ if (rules[num_rules - 1]->action.num_entries != 0)
+ tmp_stats = rules[num_rules - 1]->action.entries[0].hw_stats;
+
+ /* Actions need a BASIC dissector. */
+ a_rule->match = rules[0]->match;
/* Copy actions */
- for (j = 0; j < _CT_TYPE_MAX; j++) {
+ for (j = 0; j < num_rules; j++) {
u32 csum_updated = 0;
u8 ip_proto = 0;
@@ -747,8 +773,9 @@ static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
/* nft entry is generated by tc ct, which mangle action do not care
* the stats, inherit the post entry stats to meet the
* flow_action_hw_stats_check.
+ * nft entry flow rules are at odd array index.
*/
- if (j == CT_TYPE_NFT) {
+ if (j & 0x01) {
if (a_in->hw_stats == FLOW_ACTION_HW_STATS_DONT_CARE)
a_in->hw_stats = tmp_stats;
nfp_fl_get_csum_flag(a_in, ip_proto, &csum_updated);
@@ -784,32 +811,40 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
{
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
struct nfp_fl_ct_zone_entry *zt = m_entry->zt;
+ struct flow_rule *rules[NFP_MAX_ENTRY_RULES];
+ struct nfp_fl_ct_flow_entry *pre_ct_entry;
struct nfp_fl_key_ls key_layer, tmp_layer;
struct nfp_flower_priv *priv = zt->priv;
u16 key_map[_FLOW_PAY_LAYERS_MAX];
struct nfp_fl_payload *flow_pay;
-
- struct flow_rule *rules[_CT_TYPE_MAX];
u8 *key, *msk, *kdata, *mdata;
struct nfp_port *port = NULL;
+ int num_rules, err, i, j = 0;
struct net_device *netdev;
bool qinq_sup;
u32 port_id;
u16 offset;
- int i, err;
netdev = m_entry->netdev;
qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ);
- rules[CT_TYPE_PRE_CT] = m_entry->tc_m_parent->pre_ct_parent->rule;
- rules[CT_TYPE_NFT] = m_entry->nft_parent->rule;
- rules[CT_TYPE_POST_CT] = m_entry->tc_m_parent->post_ct_parent->rule;
+ pre_ct_entry = m_entry->tc_m_parent->pre_ct_parent;
+ num_rules = pre_ct_entry->num_prev_m_entries * 2 + _CT_TYPE_MAX;
+
+ for (i = 0; i < pre_ct_entry->num_prev_m_entries; i++) {
+ rules[j++] = pre_ct_entry->prev_m_entries[i]->tc_m_parent->pre_ct_parent->rule;
+ rules[j++] = pre_ct_entry->prev_m_entries[i]->nft_parent->rule;
+ }
+
+ rules[j++] = m_entry->tc_m_parent->pre_ct_parent->rule;
+ rules[j++] = m_entry->nft_parent->rule;
+ rules[j++] = m_entry->tc_m_parent->post_ct_parent->rule;
memset(&key_layer, 0, sizeof(struct nfp_fl_key_ls));
memset(&key_map, 0, sizeof(key_map));
/* Calculate the resultant key layer and size for offload */
- for (i = 0; i < _CT_TYPE_MAX; i++) {
+ for (i = 0; i < num_rules; i++) {
err = nfp_flower_calculate_key_layers(priv->app,
m_entry->netdev,
&tmp_layer, rules[i],
@@ -875,7 +910,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
* that the layer is not present.
*/
if (!qinq_sup) {
- for (i = 0; i < _CT_TYPE_MAX; i++) {
+ for (i = 0; i < num_rules; i++) {
offset = key_map[FLOW_PAY_META_TCI];
key = kdata + offset;
msk = mdata + offset;
@@ -889,7 +924,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
offset = key_map[FLOW_PAY_MAC_MPLS];
key = kdata + offset;
msk = mdata + offset;
- for (i = 0; i < _CT_TYPE_MAX; i++) {
+ for (i = 0; i < num_rules; i++) {
nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)key,
(struct nfp_flower_mac_mpls *)msk,
rules[i]);
@@ -905,7 +940,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
offset = key_map[FLOW_PAY_IPV4];
key = kdata + offset;
msk = mdata + offset;
- for (i = 0; i < _CT_TYPE_MAX; i++) {
+ for (i = 0; i < num_rules; i++) {
nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)key,
(struct nfp_flower_ipv4 *)msk,
rules[i]);
@@ -916,7 +951,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
offset = key_map[FLOW_PAY_IPV6];
key = kdata + offset;
msk = mdata + offset;
- for (i = 0; i < _CT_TYPE_MAX; i++) {
+ for (i = 0; i < num_rules; i++) {
nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)key,
(struct nfp_flower_ipv6 *)msk,
rules[i]);
@@ -927,7 +962,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
offset = key_map[FLOW_PAY_L4];
key = kdata + offset;
msk = mdata + offset;
- for (i = 0; i < _CT_TYPE_MAX; i++) {
+ for (i = 0; i < num_rules; i++) {
nfp_flower_compile_tport((struct nfp_flower_tp_ports *)key,
(struct nfp_flower_tp_ports *)msk,
rules[i]);
@@ -938,7 +973,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
offset = key_map[FLOW_PAY_QINQ];
key = kdata + offset;
msk = mdata + offset;
- for (i = 0; i < _CT_TYPE_MAX; i++) {
+ for (i = 0; i < num_rules; i++) {
nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
(struct nfp_flower_vlan *)msk,
rules[i]);
@@ -954,7 +989,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
struct nfp_ipv6_addr_entry *entry;
struct in6_addr *dst;
- for (i = 0; i < _CT_TYPE_MAX; i++) {
+ for (i = 0; i < num_rules; i++) {
nfp_flower_compile_ipv6_gre_tun((void *)key,
(void *)msk, rules[i]);
}
@@ -971,7 +1006,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
} else {
__be32 dst;
- for (i = 0; i < _CT_TYPE_MAX; i++) {
+ for (i = 0; i < num_rules; i++) {
nfp_flower_compile_ipv4_gre_tun((void *)key,
(void *)msk, rules[i]);
}
@@ -995,7 +1030,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
struct nfp_ipv6_addr_entry *entry;
struct in6_addr *dst;
- for (i = 0; i < _CT_TYPE_MAX; i++) {
+ for (i = 0; i < num_rules; i++) {
nfp_flower_compile_ipv6_udp_tun((void *)key,
(void *)msk, rules[i]);
}
@@ -1012,7 +1047,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
} else {
__be32 dst;
- for (i = 0; i < _CT_TYPE_MAX; i++) {
+ for (i = 0; i < num_rules; i++) {
nfp_flower_compile_ipv4_udp_tun((void *)key,
(void *)msk, rules[i]);
}
@@ -1029,13 +1064,13 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
offset = key_map[FLOW_PAY_GENEVE_OPT];
key = kdata + offset;
msk = mdata + offset;
- for (i = 0; i < _CT_TYPE_MAX; i++)
+ for (i = 0; i < num_rules; i++)
nfp_flower_compile_geneve_opt(key, msk, rules[i]);
}
}
/* Merge actions into flow_pay */
- err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay);
+ err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay, num_rules);
if (err)
goto ct_offload_err;
@@ -1168,6 +1203,12 @@ static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
if (err)
return err;
+ if (pre_ct_entry->num_prev_m_entries > 0) {
+ err = nfp_ct_merge_extra_check(nft_entry, tc_m_entry);
+ if (err)
+ return err;
+ }
+
/* Combine tc_merge and nft cookies for this cookie. */
new_cookie[0] = tc_m_entry->cookie[0];
new_cookie[1] = tc_m_entry->cookie[1];
@@ -1198,11 +1239,6 @@ static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
list_add(&nft_m_entry->tc_merge_list, &tc_m_entry->children);
list_add(&nft_m_entry->nft_flow_list, &nft_entry->children);
- /* Generate offload structure and send to nfp */
- err = nfp_fl_ct_add_offload(nft_m_entry);
- if (err)
- goto err_nft_ct_offload;
-
err = rhashtable_insert_fast(&zt->nft_merge_tb, &nft_m_entry->hash_node,
nfp_nft_ct_merge_params);
if (err)
@@ -1210,12 +1246,20 @@ static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
zt->nft_merge_count++;
+ if (post_ct_entry->goto_chain_index > 0)
+ return nfp_fl_create_new_pre_ct(nft_m_entry);
+
+ /* Generate offload structure and send to nfp */
+ err = nfp_fl_ct_add_offload(nft_m_entry);
+ if (err)
+ goto err_nft_ct_offload;
+
return err;
-err_nft_ct_merge_insert:
+err_nft_ct_offload:
nfp_fl_ct_del_offload(zt->priv->app, nft_m_entry->tc_flower_cookie,
nft_m_entry->netdev);
-err_nft_ct_offload:
+err_nft_ct_merge_insert:
list_del(&nft_m_entry->tc_merge_list);
list_del(&nft_m_entry->nft_flow_list);
kfree(nft_m_entry);
@@ -1243,7 +1287,7 @@ static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
/* Checks that the chain_index of the filter matches the
* chain_index of the GOTO action.
*/
- if (post_ct_entry->chain_index != pre_ct_entry->chain_index)
+ if (post_ct_entry->chain_index != pre_ct_entry->goto_chain_index)
return -EINVAL;
err = nfp_ct_merge_check(pre_ct_entry, post_ct_entry);
@@ -1461,7 +1505,7 @@ nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
entry->zt = zt;
entry->netdev = netdev;
- entry->cookie = flow->cookie;
+ entry->cookie = flow->cookie > 0 ? flow->cookie : (unsigned long)entry;
entry->chain_index = flow->common.chain_index;
entry->tun_offset = NFP_FL_CT_NO_TUN;
@@ -1501,6 +1545,9 @@ nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
INIT_LIST_HEAD(&entry->children);
+ if (flow->cookie == 0)
+ return entry;
+
/* Now add a ct map entry to flower-priv */
map = get_hashentry(&zt->priv->ct_map_table, &flow->cookie,
nfp_ct_map_params, sizeof(*map));
@@ -1559,6 +1606,14 @@ static void cleanup_nft_merge_entry(struct nfp_fl_nft_tc_merge *m_entry)
list_del(&m_entry->tc_merge_list);
list_del(&m_entry->nft_flow_list);
+ if (m_entry->next_pre_ct_entry) {
+ struct nfp_fl_ct_map_entry pre_ct_map_ent;
+
+ pre_ct_map_ent.ct_entry = m_entry->next_pre_ct_entry;
+ pre_ct_map_ent.cookie = 0;
+ nfp_fl_ct_del_flow(&pre_ct_map_ent);
+ }
+
kfree(m_entry);
}
@@ -1656,6 +1711,22 @@ void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry)
kfree(entry);
}
+static struct flow_action_entry *get_flow_act_ct(struct flow_rule *rule)
+{
+ struct flow_action_entry *act;
+ int i;
+
+ /* More than one ct action may be present in a flow rule,
+ * Return the first one that is not a CT clear action
+ */
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id == FLOW_ACTION_CT && act->ct.action != TCA_CT_ACT_CLEAR)
+ return act;
+ }
+
+ return NULL;
+}
+
static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
enum flow_action_id act_id)
{
@@ -1713,14 +1784,15 @@ nfp_ct_merge_nft_with_tc(struct nfp_fl_ct_flow_entry *nft_entry,
int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
struct net_device *netdev,
struct flow_cls_offload *flow,
- struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack,
+ struct nfp_fl_nft_tc_merge *m_entry)
{
struct flow_action_entry *ct_act, *ct_goto;
struct nfp_fl_ct_flow_entry *ct_entry;
struct nfp_fl_ct_zone_entry *zt;
int err;
- ct_act = get_flow_act(flow->rule, FLOW_ACTION_CT);
+ ct_act = get_flow_act_ct(flow->rule);
if (!ct_act) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: Conntrack action empty in conntrack offload");
@@ -1756,7 +1828,22 @@ int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
if (IS_ERR(ct_entry))
return PTR_ERR(ct_entry);
ct_entry->type = CT_TYPE_PRE_CT;
- ct_entry->chain_index = ct_goto->chain_index;
+ ct_entry->chain_index = flow->common.chain_index;
+ ct_entry->goto_chain_index = ct_goto->chain_index;
+
+ if (m_entry) {
+ struct nfp_fl_ct_flow_entry *pre_ct_entry;
+ int i;
+
+ pre_ct_entry = m_entry->tc_m_parent->pre_ct_parent;
+ for (i = 0; i < pre_ct_entry->num_prev_m_entries; i++)
+ ct_entry->prev_m_entries[i] = pre_ct_entry->prev_m_entries[i];
+ ct_entry->prev_m_entries[i++] = m_entry;
+ ct_entry->num_prev_m_entries = i;
+
+ m_entry->next_pre_ct_entry = ct_entry;
+ }
+
list_add(&ct_entry->list_node, &zt->pre_ct_list);
zt->pre_ct_count++;
@@ -1779,6 +1866,7 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
struct nfp_fl_ct_zone_entry *zt;
bool wildcarded = false;
struct flow_match_ct ct;
+ struct flow_action_entry *ct_goto;
flow_rule_match_ct(rule, &ct);
if (!ct.mask->ct_zone) {
@@ -1803,6 +1891,8 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
ct_entry->type = CT_TYPE_POST_CT;
ct_entry->chain_index = flow->common.chain_index;
+ ct_goto = get_flow_act(flow->rule, FLOW_ACTION_GOTO);
+ ct_entry->goto_chain_index = ct_goto ? ct_goto->chain_index : 0;
list_add(&ct_entry->list_node, &zt->post_ct_list);
zt->post_ct_count++;
@@ -1831,6 +1921,28 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
return 0;
}
+int nfp_fl_create_new_pre_ct(struct nfp_fl_nft_tc_merge *m_entry)
+{
+ struct nfp_fl_ct_flow_entry *pre_ct_entry, *post_ct_entry;
+ struct flow_cls_offload new_pre_ct_flow;
+ int err;
+
+ pre_ct_entry = m_entry->tc_m_parent->pre_ct_parent;
+ if (pre_ct_entry->num_prev_m_entries >= NFP_MAX_RECIRC_CT_ZONES - 1)
+ return -1;
+
+ post_ct_entry = m_entry->tc_m_parent->post_ct_parent;
+ memset(&new_pre_ct_flow, 0, sizeof(struct flow_cls_offload));
+ new_pre_ct_flow.rule = post_ct_entry->rule;
+ new_pre_ct_flow.common.chain_index = post_ct_entry->chain_index;
+
+ err = nfp_fl_ct_handle_pre_ct(pre_ct_entry->zt->priv,
+ pre_ct_entry->netdev,
+ &new_pre_ct_flow, NULL,
+ m_entry);
+ return err;
+}
+
static void
nfp_fl_ct_sub_stats(struct nfp_fl_nft_tc_merge *nft_merge,
enum ct_entry_type type, u64 *m_pkts,
@@ -1876,6 +1988,32 @@ nfp_fl_ct_sub_stats(struct nfp_fl_nft_tc_merge *nft_merge,
0, priv->stats[ctx_id].used,
FLOW_ACTION_HW_STATS_DELAYED);
}
+
+ /* Update previous pre_ct/post_ct/nft flow stats */
+ if (nft_merge->tc_m_parent->pre_ct_parent->num_prev_m_entries > 0) {
+ struct nfp_fl_nft_tc_merge *tmp_nft_merge;
+ int i;
+
+ for (i = 0; i < nft_merge->tc_m_parent->pre_ct_parent->num_prev_m_entries; i++) {
+ tmp_nft_merge = nft_merge->tc_m_parent->pre_ct_parent->prev_m_entries[i];
+ flow_stats_update(&tmp_nft_merge->tc_m_parent->pre_ct_parent->stats,
+ priv->stats[ctx_id].bytes,
+ priv->stats[ctx_id].pkts,
+ 0, priv->stats[ctx_id].used,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ flow_stats_update(&tmp_nft_merge->tc_m_parent->post_ct_parent->stats,
+ priv->stats[ctx_id].bytes,
+ priv->stats[ctx_id].pkts,
+ 0, priv->stats[ctx_id].used,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ flow_stats_update(&tmp_nft_merge->nft_parent->stats,
+ priv->stats[ctx_id].bytes,
+ priv->stats[ctx_id].pkts,
+ 0, priv->stats[ctx_id].used,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ }
+ }
+
/* Reset stats from the nfp */
priv->stats[ctx_id].pkts = 0;
priv->stats[ctx_id].bytes = 0;
@@ -2080,10 +2218,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
switch (ct_entry->type) {
case CT_TYPE_PRE_CT:
zt->pre_ct_count--;
- rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
- nfp_ct_map_params);
+ if (ct_map_ent->cookie > 0)
+ rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
+ nfp_ct_map_params);
nfp_fl_ct_clean_flow_entry(ct_entry);
- kfree(ct_map_ent);
+ if (ct_map_ent->cookie > 0)
+ kfree(ct_map_ent);
if (!zt->pre_ct_count) {
zt->nft = NULL;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
index 762c0b36e269..c4ec78358033 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
@@ -86,6 +86,9 @@ enum ct_entry_type {
_CT_TYPE_MAX,
};
+#define NFP_MAX_RECIRC_CT_ZONES 4
+#define NFP_MAX_ENTRY_RULES (NFP_MAX_RECIRC_CT_ZONES * 2 + 1)
+
enum nfp_nfp_layer_name {
FLOW_PAY_META_TCI = 0,
FLOW_PAY_INPORT,
@@ -112,27 +115,33 @@ enum nfp_nfp_layer_name {
* @cookie: Flow cookie, same as original TC flow, used as key
* @list_node: Used by the list
* @chain_index: Chain index of the original flow
+ * @goto_chain_index: goto chain index of the flow
* @netdev: netdev structure.
- * @type: Type of pre-entry from enum ct_entry_type
* @zt: Reference to the zone table this belongs to
* @children: List of tc_merge flows this flow forms part of
* @rule: Reference to the original TC flow rule
* @stats: Used to cache stats for updating
+ * @prev_m_entries: Array of all previous nft_tc_merge entries
+ * @num_prev_m_entries: The number of all previous nft_tc_merge entries
* @tun_offset: Used to indicate tunnel action offset in action list
* @flags: Used to indicate flow flag like NAT which used by merge.
+ * @type: Type of ct-entry from enum ct_entry_type
*/
struct nfp_fl_ct_flow_entry {
unsigned long cookie;
struct list_head list_node;
u32 chain_index;
- enum ct_entry_type type;
+ u32 goto_chain_index;
struct net_device *netdev;
struct nfp_fl_ct_zone_entry *zt;
struct list_head children;
struct flow_rule *rule;
struct flow_stats stats;
+ struct nfp_fl_nft_tc_merge *prev_m_entries[NFP_MAX_RECIRC_CT_ZONES - 1];
+ u8 num_prev_m_entries;
u8 tun_offset; // Set to NFP_FL_CT_NO_TUN if no tun
u8 flags;
+ u8 type;
};
/**
@@ -169,6 +178,7 @@ struct nfp_fl_ct_tc_merge {
* @nft_parent: The nft_entry parent
* @tc_flower_cookie: The cookie of the flow offloaded to the nfp
* @flow_pay: Reference to the offloaded flow struct
+ * @next_pre_ct_entry: Reference to the next ct zone pre ct entry
*/
struct nfp_fl_nft_tc_merge {
struct net_device *netdev;
@@ -181,6 +191,7 @@ struct nfp_fl_nft_tc_merge {
struct nfp_fl_ct_flow_entry *nft_parent;
unsigned long tc_flower_cookie;
struct nfp_fl_payload *flow_pay;
+ struct nfp_fl_ct_flow_entry *next_pre_ct_entry;
};
/**
@@ -204,6 +215,7 @@ bool is_post_ct_flow(struct flow_cls_offload *flow);
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure.
* @extack: Extack pointer for errors
+ * @m_entry:previous nfp_fl_nft_tc_merge entry
*
* Adds a new entry to the relevant zone table and tries to
* merge with other +trk+est entries and offload if possible.
@@ -213,7 +225,8 @@ bool is_post_ct_flow(struct flow_cls_offload *flow);
int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
struct net_device *netdev,
struct flow_cls_offload *flow,
- struct netlink_ext_ack *extack);
+ struct netlink_ext_ack *extack,
+ struct nfp_fl_nft_tc_merge *m_entry);
/**
* nfp_fl_ct_handle_post_ct() - Handles +trk+est conntrack rules
* @priv: Pointer to app priv
@@ -232,6 +245,19 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
struct netlink_ext_ack *extack);
/**
+ * nfp_fl_create_new_pre_ct() - create next ct_zone -trk conntrack rules
+ * @m_entry:previous nfp_fl_nft_tc_merge entry
+ *
+ * Create a new pre_ct entry from previous nfp_fl_nft_tc_merge entry
+ * to the next relevant zone table. Try to merge with other +trk+est
+ * entries and offload if possible. The created new pre_ct entry is
+ * linked to the previous nfp_fl_nft_tc_merge entry.
+ *
+ * Return: negative value on error, 0 if configured successfully.
+ */
+int nfp_fl_create_new_pre_ct(struct nfp_fl_nft_tc_merge *m_entry);
+
+/**
* nfp_fl_ct_clean_flow_entry() - Free a nfp_fl_ct_flow_entry
* @entry: Flow entry to cleanup
*/
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 8593cafa6368..18328eb7f5c3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1344,7 +1344,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
port = nfp_port_from_netdev(netdev);
if (is_pre_ct_flow(flow))
- return nfp_fl_ct_handle_pre_ct(priv, netdev, flow, extack);
+ return nfp_fl_ct_handle_pre_ct(priv, netdev, flow, extack, NULL);
if (is_post_ct_flow(flow))
return nfp_fl_ct_handle_post_ct(priv, netdev, flow, extack);
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 56e02cba0b8a..0fd156286d4d 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1422,7 +1422,7 @@ static struct platform_driver nixge_driver = {
.remove = nixge_remove,
.driver = {
.name = "nixge",
- .of_match_table = of_match_ptr(nixge_dt_ids),
+ .of_match_table = nixge_dt_ids,
},
};
module_platform_driver(nixge_driver);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index aaab590ef548..ed7dd0a04235 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1423,7 +1423,7 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2);
}
-static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct pasemi_mac * const mac = netdev_priv(dev);
struct pasemi_mac_txring * const txring = tx_ring(mac);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index e508f8eb43bf..b8678da1cce5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -392,7 +392,6 @@ static void ionic_remove(struct pci_dev *pdev)
ionic_port_reset(ionic);
ionic_reset(ionic);
ionic_dev_teardown(ionic);
- pci_clear_master(pdev);
ionic_unmap_bars(ionic);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index f13fa7396aef..3d36d23df0c6 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -854,7 +854,7 @@ typedef struct {
The following is packed:
- N cardrsp_rds_rings
- N cardrs_sds_rings */
- char data[0];
+ char data[];
} nx_cardrsp_rx_ctx_t;
#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index de8d54b23f73..59d0dd862fd1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -18,7 +18,6 @@
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
#include <linux/sysfs.h>
-#include <linux/aer.h>
MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver");
MODULE_LICENSE("GPL");
@@ -1464,9 +1463,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
goto err_out_disable_pdev;
- if (NX_IS_REVISION_P3(pdev->revision))
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
netdev = alloc_etherdev(sizeof(struct netxen_adapter));
@@ -1603,8 +1599,6 @@ err_out_free_netdev:
free_netdev(netdev);
err_out_free_res:
- if (NX_IS_REVISION_P3(pdev->revision))
- pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_out_disable_pdev:
@@ -1659,10 +1653,8 @@ static void netxen_nic_remove(struct pci_dev *pdev)
netxen_release_firmware(adapter);
- if (NX_IS_REVISION_P3(pdev->revision)) {
+ if (NX_IS_REVISION_P3(pdev->revision))
netxen_cleanup_minidump(adapter);
- pci_disable_pcie_error_reporting(pdev);
- }
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index c91898be7c03..f5af83342856 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -23,7 +23,6 @@
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h>
#include <net/devlink.h>
-#include <linux/aer.h>
#include <linux/phylink.h>
#include "qed.h"
@@ -259,8 +258,6 @@ static void qed_free_pci(struct qed_dev *cdev)
{
struct pci_dev *pdev = cdev->pdev;
- pci_disable_pcie_error_reporting(pdev);
-
if (cdev->doorbells && cdev->db_size)
iounmap(cdev->doorbells);
if (cdev->regview)
@@ -366,12 +363,6 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
return -ENOMEM;
}
- /* AER (Advanced Error reporting) configuration */
- rc = pci_enable_pcie_error_reporting(pdev);
- if (rc)
- DP_VERBOSE(cdev, NETIF_MSG_DRV,
- "Failed to configure PCIe AER [%d]\n", rc);
-
return 0;
err2:
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index f90dcfe9ee68..f9931ecb7baa 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -6,8 +6,6 @@
#ifndef _QEDE_H_
#define _QEDE_H_
-#include <linux/compiler.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 8034d812d5a0..374a86b875a3 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -4,7 +4,6 @@
* Copyright (c) 2019-2020 Marvell International Ltd.
*/
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 261f982ca40d..4c6c685820e3 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -35,7 +35,6 @@
#include <net/ip6_checksum.h>
#include <linux/bitops.h>
#include <linux/vmalloc.h>
-#include <linux/aer.h>
#include "qede.h"
#include "qede_ptp.h"
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 2fd5c6fdb500..bcef8ab715bf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -8,7 +8,6 @@
#include <linux/ipv6.h>
#include <linux/ethtool.h>
#include <linux/interrupt.h>
-#include <linux/aer.h>
#include "qlcnic.h"
#include "qlcnic_sriov.h"
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 44dac3c0908e..90df4a0909fa 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -12,7 +12,6 @@
#include <net/ip.h>
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
-#include <linux/aer.h>
#include <linux/log2.h>
#include <linux/pci.h>
#include <net/vxlan.h>
@@ -2445,7 +2444,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_disable_pdev;
pci_set_master(pdev);
- pci_enable_pcie_error_reporting(pdev);
ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL);
if (!ahw) {
@@ -2675,7 +2673,6 @@ err_out_free_hw_res:
kfree(ahw);
err_out_free_res:
- pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_out_disable_pdev:
@@ -2757,7 +2754,6 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_release_firmware(adapter);
- pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 5c2edb715d3e..74125188beb8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -12,7 +12,6 @@
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
#include <linux/sysfs.h>
-#include <linux/aer.h>
#include <linux/log2.h>
#ifdef CONFIG_QLCNIC_HWMON
#include <linux/hwmon.h>
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 45147a1016be..9f8357bbc8a4 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -613,8 +613,13 @@ struct rtl8169_private {
struct work_struct work;
} wk;
+ spinlock_t config25_lock;
+ spinlock_t mac_ocp_lock;
+
+ spinlock_t cfg9346_usage_lock;
+ int cfg9346_usage_count;
+
unsigned supports_gmii:1;
- unsigned aspm_manageable:1;
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
@@ -661,12 +666,22 @@ static inline struct device *tp_to_dev(struct rtl8169_private *tp)
static void rtl_lock_config_regs(struct rtl8169_private *tp)
{
- RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
+ if (!--tp->cfg9346_usage_count)
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
}
static void rtl_unlock_config_regs(struct rtl8169_private *tp)
{
- RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
+ if (!tp->cfg9346_usage_count++)
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
}
static void rtl_pci_commit(struct rtl8169_private *tp)
@@ -675,6 +690,28 @@ static void rtl_pci_commit(struct rtl8169_private *tp)
RTL_R8(tp, ChipCmd);
}
+static void rtl_mod_config2(struct rtl8169_private *tp, u8 clear, u8 set)
+{
+ unsigned long flags;
+ u8 val;
+
+ spin_lock_irqsave(&tp->config25_lock, flags);
+ val = RTL_R8(tp, Config2);
+ RTL_W8(tp, Config2, (val & ~clear) | set);
+ spin_unlock_irqrestore(&tp->config25_lock, flags);
+}
+
+static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set)
+{
+ unsigned long flags;
+ u8 val;
+
+ spin_lock_irqsave(&tp->config25_lock, flags);
+ val = RTL_R8(tp, Config5);
+ RTL_W8(tp, Config5, (val & ~clear) | set);
+ spin_unlock_irqrestore(&tp->config25_lock, flags);
+}
+
static bool rtl_is_8125(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_61;
@@ -847,7 +884,7 @@ static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
(RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT;
}
-static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
+static void __r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
{
if (rtl_ocp_reg_failure(reg))
return;
@@ -855,7 +892,16 @@ static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data);
}
-static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
+static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->mac_ocp_lock, flags);
+ __r8168_mac_ocp_write(tp, reg, data);
+ spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
+}
+
+static u16 __r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
{
if (rtl_ocp_reg_failure(reg))
return 0;
@@ -865,12 +911,28 @@ static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
return RTL_R32(tp, OCPDR);
}
+static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
+{
+ unsigned long flags;
+ u16 val;
+
+ spin_lock_irqsave(&tp->mac_ocp_lock, flags);
+ val = __r8168_mac_ocp_read(tp, reg);
+ spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
+
+ return val;
+}
+
static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
u16 set)
{
- u16 data = r8168_mac_ocp_read(tp, reg);
+ unsigned long flags;
+ u16 data;
- r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
+ spin_lock_irqsave(&tp->mac_ocp_lock, flags);
+ data = __r8168_mac_ocp_read(tp, reg);
+ __r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
+ spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
}
/* Work around a hw issue with RTL8168g PHY, the quirk disables
@@ -1336,6 +1398,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
{ WAKE_MAGIC, Config3, MagicPacket }
};
unsigned int i, tmp = ARRAY_SIZE(cfg);
+ unsigned long flags;
u8 options;
rtl_unlock_config_regs(tp);
@@ -1354,12 +1417,14 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0);
}
+ spin_lock_irqsave(&tp->config25_lock, flags);
for (i = 0; i < tmp; i++) {
options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask;
if (wolopts & cfg[i].opt)
options |= cfg[i].mask;
RTL_W8(tp, cfg[i].reg, options);
}
+ spin_unlock_irqrestore(&tp->config25_lock, flags);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
@@ -1371,10 +1436,10 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
- options = RTL_R8(tp, Config2) & ~PME_SIGNAL;
if (wolopts)
- options |= PME_SIGNAL;
- RTL_W8(tp, Config2, options);
+ rtl_mod_config2(tp, 0, PME_SIGNAL);
+ else
+ rtl_mod_config2(tp, PME_SIGNAL, 0);
break;
default:
break;
@@ -2675,10 +2740,12 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
{
- /* Don't enable ASPM in the chip if OS can't control ASPM */
- if (enable && tp->aspm_manageable) {
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
+ if (tp->mac_version < RTL_GIGA_MAC_VER_32)
+ return;
+
+ if (enable) {
+ rtl_mod_config5(tp, 0, ASPM_en);
+ rtl_mod_config2(tp, 0, ClkReqEn);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
@@ -2701,11 +2768,9 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
break;
}
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
+ rtl_mod_config2(tp, ClkReqEn, 0);
+ rtl_mod_config5(tp, ASPM_en, 0);
}
-
- udelay(10);
}
static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat,
@@ -2863,7 +2928,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
+ rtl_mod_config5(tp, Spi_en, 0);
}
static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
@@ -2896,9 +2961,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
-
- rtl_hw_aspm_clkreq_enable(tp, true);
+ rtl_mod_config5(tp, Spi_en, 0);
}
static void rtl_hw_start_8168f(struct rtl8169_private *tp)
@@ -2919,7 +2982,7 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
- RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
+ rtl_mod_config5(tp, Spi_en, 0);
rtl8168_config_eee_mac(tp);
}
@@ -2989,11 +3052,7 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
};
rtl_hw_start_8168g(tp);
-
- /* disable aspm and clock request before access ephy */
- rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168g_1);
- rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
@@ -3011,9 +3070,6 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
};
rtl_hw_start_8168g(tp);
-
- /* disable aspm and clock request before access ephy */
- rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168g_2);
}
@@ -3034,8 +3090,6 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
rtl_hw_start_8168g(tp);
- /* disable aspm and clock request before access ephy */
- rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8411_2);
/* The following Realtek-provided magic fixes an issue with the RX unit
@@ -3173,8 +3227,6 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
r8168_mac_ocp_write(tp, 0xFC32, 0x0C25);
r8168_mac_ocp_write(tp, 0xFC34, 0x00A9);
r8168_mac_ocp_write(tp, 0xFC36, 0x012D);
-
- rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
@@ -3189,8 +3241,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
};
int rg_saw_cnt;
- /* disable aspm and clock request before access ephy */
- rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168h_1);
rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
@@ -3238,8 +3288,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
r8168_mac_ocp_write(tp, 0xc094, 0x0000);
r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
-
- rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
@@ -3278,8 +3326,6 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
{ 0x1e, 0x0000, 0x2000 },
};
- /* disable aspm and clock request before access ephy */
- rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168ep_3);
rtl_hw_start_8168ep(tp);
@@ -3290,8 +3336,6 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271);
r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000);
r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080);
-
- rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8117(struct rtl8169_private *tp)
@@ -3303,9 +3347,6 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
int rg_saw_cnt;
rtl8168ep_stop_cmac(tp);
-
- /* disable aspm and clock request before access ephy */
- rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8117);
rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
@@ -3355,8 +3396,6 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
/* firmware is for MAC only */
r8169_apply_firmware(tp);
-
- rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
@@ -3479,8 +3518,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
static void rtl_hw_start_8106(struct rtl8169_private *tp)
{
- rtl_hw_aspm_clkreq_enable(tp, false);
-
/* Force LAN exit from ASPM if Rx/Tx are not idle */
RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
@@ -3497,7 +3534,6 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
rtl_pcie_state_l2l3_disable(tp);
- rtl_hw_aspm_clkreq_enable(tp, true);
}
DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
@@ -3585,13 +3621,8 @@ static void rtl_hw_start_8125a_2(struct rtl8169_private *tp)
};
rtl_set_def_aspm_entry_latency(tp);
-
- /* disable aspm and clock request before access ephy */
- rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8125a_2);
-
rtl_hw_start_8125_common(tp);
- rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8125b(struct rtl8169_private *tp)
@@ -3606,12 +3637,8 @@ static void rtl_hw_start_8125b(struct rtl8169_private *tp)
};
rtl_set_def_aspm_entry_latency(tp);
- rtl_hw_aspm_clkreq_enable(tp, false);
-
rtl_ephy_init(tp, e_info_8125b);
rtl_hw_start_8125_common(tp);
-
- rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_config(struct rtl8169_private *tp)
@@ -3707,7 +3734,8 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
static void rtl_hw_start(struct rtl8169_private *tp)
{
rtl_unlock_config_regs(tp);
-
+ /* disable aspm and clock request before ephy access */
+ rtl_hw_aspm_clkreq_enable(tp, false);
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
@@ -3718,6 +3746,7 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_hw_start_8168(tp);
rtl_enable_exit_l1(tp);
+ rtl_hw_aspm_clkreq_enable(tp, true);
rtl_set_rx_max_size(tp);
rtl_set_rx_tx_desc_registers(tp);
rtl_lock_config_regs(tp);
@@ -4510,6 +4539,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
}
if (napi_schedule_prep(&tp->napi)) {
+ rtl_unlock_config_regs(tp);
+ rtl_hw_aspm_clkreq_enable(tp, false);
+ rtl_lock_config_regs(tp);
+
rtl_irq_disable(tp);
__napi_schedule(&tp->napi);
}
@@ -4569,9 +4602,14 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
work_done = rtl_rx(dev, tp, budget);
- if (work_done < budget && napi_complete_done(napi, work_done))
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
rtl_irq_enable(tp);
+ rtl_unlock_config_regs(tp);
+ rtl_hw_aspm_clkreq_enable(tp, true);
+ rtl_lock_config_regs(tp);
+ }
+
return work_done;
}
@@ -5145,16 +5183,6 @@ done:
rtl_rar_set(tp, mac_addr);
}
-/* register is set if system vendor successfully tested ASPM 1.2 */
-static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
-{
- if (tp->mac_version >= RTL_GIGA_MAC_VER_61 &&
- r8168_mac_ocp_read(tp, 0xc0b2) & 0xf)
- return true;
-
- return false;
-}
-
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct rtl8169_private *tp;
@@ -5176,6 +5204,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->eee_adv = -1;
tp->ocp_base = OCP_STD_PHY_BASE;
+ spin_lock_init(&tp->cfg9346_usage_lock);
+ spin_lock_init(&tp->config25_lock);
+ spin_lock_init(&tp->mac_ocp_lock);
+
dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev,
struct pcpu_sw_netstats);
if (!dev->tstats)
@@ -5222,19 +5254,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->mac_version = chipset;
- /* Disable ASPM L1 as that cause random device stop working
- * problems as well as full system hangs for some PCIe devices users.
- * Chips from RTL8168h partially have issues with L1.2, but seem
- * to work fine with L1 and L1.1.
- */
- if (rtl_aspm_is_safe(tp))
- rc = 0;
- else if (tp->mac_version >= RTL_GIGA_MAC_VER_46)
- rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
- else
- rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
- tp->aspm_manageable = !rc;
-
tp->dash_type = rtl_check_dash(tp);
tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 894e2690c643..4d6b3b7d6abb 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -28,7 +28,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/sys_soc.h>
#include <linux/reset.h>
#include <linux/math64.h>
@@ -1390,11 +1389,6 @@ static void ravb_adjust_link(struct net_device *ndev)
phy_print_status(phydev);
}
-static const struct soc_device_attribute r8a7795es10[] = {
- { .soc_id = "r8a7795", .revision = "ES1.0", },
- { /* sentinel */ }
-};
-
/* PHY init function */
static int ravb_phy_init(struct net_device *ndev)
{
@@ -1434,15 +1428,6 @@ static int ravb_phy_init(struct net_device *ndev)
goto err_deregister_fixed_link;
}
- /* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0
- * at this time.
- */
- if (soc_device_match(r8a7795es10)) {
- phy_set_max_speed(phydev, SPEED_100);
-
- netdev_info(ndev, "limited PHY to 100Mbit/s\n");
- }
-
if (!info->half_duplex) {
/* 10BASE, Pause and Asym Pause is not supported */
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index c4f93d24c6a4..29afaddb598d 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -1324,10 +1324,8 @@ out:
static void rswitch_phy_device_deinit(struct rswitch_device *rdev)
{
- if (rdev->ndev->phydev) {
+ if (rdev->ndev->phydev)
phy_disconnect(rdev->ndev->phydev);
- rdev->ndev->phydev = NULL;
- }
}
static int rswitch_serdes_set_params(struct rswitch_device *rdev)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index d8ec729825be..2d9787231099 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2441,8 +2441,6 @@ static int sh_eth_open(struct net_device *ndev)
netif_start_queue(ndev);
- mdp->is_opened = 1;
-
return ret;
out_free_irq:
@@ -2565,7 +2563,7 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
if (mdp->cd->no_tx_cntrs)
return &ndev->stats;
- if (!mdp->is_opened)
+ if (!netif_running(ndev))
return &ndev->stats;
sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
@@ -2614,8 +2612,6 @@ static int sh_eth_close(struct net_device *ndev)
/* Free all the skbuffs in the Rx queue and the DMA buffer. */
sh_eth_ring_free(ndev);
- mdp->is_opened = 0;
-
pm_runtime_put(&mdp->pdev->dev);
return 0;
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index a5c07c6ff44a..f56dbc8a064a 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -560,7 +560,6 @@ struct sh_eth_private {
unsigned no_ether_link:1;
unsigned ether_link_active_low:1;
- unsigned is_opened:1;
unsigned wol_enabled:1;
};
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 926532466691..4e5526303f07 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -229,7 +229,7 @@ static struct platform_driver sxgbe_platform_driver = {
.driver = {
.name = SXGBE_RESOURCE_NAME,
.pm = &sxgbe_platform_pm_ops,
- .of_match_table = of_match_ptr(sxgbe_dt_ids),
+ .of_match_table = sxgbe_dt_ids,
},
};
diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c
index 71aab3d0480f..6334992b0af4 100644
--- a/drivers/net/ethernet/sfc/ef100.c
+++ b/drivers/net/ethernet/sfc/ef100.c
@@ -11,7 +11,6 @@
#include "net_driver.h"
#include <linux/module.h>
-#include <linux/aer.h>
#include "efx_common.h"
#include "efx_channels.h"
#include "io.h"
@@ -440,8 +439,6 @@ static void ef100_pci_remove(struct pci_dev *pci_dev)
pci_dbg(pci_dev, "shutdown successful\n");
- pci_disable_pcie_error_reporting(pci_dev);
-
pci_set_drvdata(pci_dev, NULL);
efx_fini_struct(efx);
kfree(probe_data);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 02c2adeb0a12..92c390ec4735 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -18,7 +18,6 @@
#include <linux/ethtool.h>
#include <linux/topology.h>
#include <linux/gfp.h>
-#include <linux/aer.h>
#include <linux/interrupt.h>
#include "net_driver.h"
#include <net/gre.h>
@@ -892,8 +891,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
free_netdev(efx->net_dev);
probe_data = container_of(efx, struct efx_probe_data, efx);
kfree(probe_data);
-
- pci_disable_pcie_error_reporting(pci_dev);
};
/* NIC VPD information
@@ -1126,8 +1123,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_warn(efx, probe, efx->net_dev,
"failed to create MTDs (%d)\n", rc);
- (void)pci_enable_pcie_error_reporting(pci_dev);
-
if (efx->type->udp_tnl_push_ports)
efx->type->udp_tnl_push_ports(efx);
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index e151b0957751..e001f27085c6 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -17,7 +17,6 @@
#include <linux/ethtool.h>
#include <linux/topology.h>
#include <linux/gfp.h>
-#include <linux/aer.h>
#include <linux/interrupt.h>
#include "net_driver.h"
#include "efx.h"
@@ -2765,8 +2764,6 @@ static void ef4_pci_remove(struct pci_dev *pci_dev)
ef4_fini_struct(efx);
free_netdev(efx->net_dev);
-
- pci_disable_pcie_error_reporting(pci_dev);
};
/* NIC VPD information
@@ -2927,12 +2924,6 @@ static int ef4_pci_probe(struct pci_dev *pci_dev,
netif_warn(efx, probe, efx->net_dev,
"failed to create MTDs (%d)\n", rc);
- rc = pci_enable_pcie_error_reporting(pci_dev);
- if (rc && rc != -EINVAL)
- netif_notice(efx, probe, efx->net_dev,
- "PCIE error reporting unavailable (%d).\n",
- rc);
-
return 0;
fail4:
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 2d32abe5f478..c53d354c1fb2 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -682,6 +682,10 @@ int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
size_t outlen;
int rc;
+ MCDI_POPULATE_DWORD_2(inbuf, MAE_ACTION_SET_ALLOC_IN_FLAGS,
+ MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH, act->vlan_push,
+ MAE_ACTION_SET_ALLOC_IN_VLAN_POP, act->vlan_pop);
+
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
@@ -694,6 +698,18 @@ int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL);
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID,
MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL);
+ if (act->vlan_push) {
+ MCDI_SET_WORD_BE(inbuf, MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE,
+ act->vlan_tci[0]);
+ MCDI_SET_WORD_BE(inbuf, MAE_ACTION_SET_ALLOC_IN_VLAN0_PROTO_BE,
+ act->vlan_proto[0]);
+ }
+ if (act->vlan_push >= 2) {
+ MCDI_SET_WORD_BE(inbuf, MAE_ACTION_SET_ALLOC_IN_VLAN1_TCI_BE,
+ act->vlan_tci[1]);
+ MCDI_SET_WORD_BE(inbuf, MAE_ACTION_SET_ALLOC_IN_VLAN1_PROTO_BE,
+ act->vlan_proto[1]);
+ }
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_ENCAP_HEADER_ID,
MC_CMD_MAE_ENCAP_HEADER_ALLOC_OUT_ENCAP_HEADER_ID_NULL);
if (act->deliver)
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index b139b76febff..454e9d51a4c2 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -233,6 +233,11 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
((void)BUILD_BUG_ON_ZERO(_field ## _LEN != 2), \
le16_to_cpu(*(__force const __le16 *)MCDI_STRUCT_PTR(_buf, _field)))
/* Write a 16-bit field defined in the protocol as being big-endian. */
+#define MCDI_SET_WORD_BE(_buf, _field, _value) do { \
+ BUILD_BUG_ON(MC_CMD_ ## _field ## _LEN != 2); \
+ BUILD_BUG_ON(MC_CMD_ ## _field ## _OFST & 1); \
+ *(__force __be16 *)MCDI_PTR(_buf, _field) = (_value); \
+ } while (0)
#define MCDI_STRUCT_SET_WORD_BE(_buf, _field, _value) do { \
BUILD_BUG_ON(_field ## _LEN != 2); \
BUILD_BUG_ON(_field ## _OFST & 1); \
diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c
index ef52ec71d197..8c557f6a183c 100644
--- a/drivers/net/ethernet/sfc/siena/efx.c
+++ b/drivers/net/ethernet/sfc/siena/efx.c
@@ -18,7 +18,6 @@
#include <linux/ethtool.h>
#include <linux/topology.h>
#include <linux/gfp.h>
-#include <linux/aer.h>
#include <linux/interrupt.h>
#include "net_driver.h"
#include <net/gre.h>
@@ -874,8 +873,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_siena_fini_struct(efx);
free_netdev(efx->net_dev);
-
- pci_disable_pcie_error_reporting(pci_dev);
};
/* NIC VPD information
@@ -1094,8 +1091,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_warn(efx, probe, efx->net_dev,
"failed to create MTDs (%d)\n", rc);
- (void)pci_enable_pcie_error_reporting(pci_dev);
-
if (efx->type->udp_tnl_push_ports)
efx->type->udp_tnl_push_ports(efx);
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index deeaab9ee761..2b07bb2fd735 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -286,6 +286,8 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx,
/* For details of action order constraints refer to SF-123102-TC-1ยง12.6.1 */
enum efx_tc_action_order {
+ EFX_TC_AO_VLAN_POP,
+ EFX_TC_AO_VLAN_PUSH,
EFX_TC_AO_COUNT,
EFX_TC_AO_DELIVER
};
@@ -294,6 +296,20 @@ static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act,
enum efx_tc_action_order new)
{
switch (new) {
+ case EFX_TC_AO_VLAN_POP:
+ if (act->vlan_pop >= 2)
+ return false;
+ /* If we've already pushed a VLAN, we can't then pop it;
+ * the hardware would instead try to pop an existing VLAN
+ * before pushing the new one.
+ */
+ if (act->vlan_push)
+ return false;
+ fallthrough;
+ case EFX_TC_AO_VLAN_PUSH:
+ if (act->vlan_push >= 2)
+ return false;
+ fallthrough;
case EFX_TC_AO_COUNT:
if (act->count)
return false;
@@ -393,6 +409,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
flow_action_for_each(i, fa, &fr->action) {
struct efx_tc_action_set save;
+ u16 tci;
if (!act) {
/* more actions after a non-pipe action */
@@ -494,6 +511,31 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
}
*act = save;
break;
+ case FLOW_ACTION_VLAN_POP:
+ if (act->vlan_push) {
+ act->vlan_push--;
+ } else if (efx_tc_flower_action_order_ok(act, EFX_TC_AO_VLAN_POP)) {
+ act->vlan_pop++;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "More than two VLAN pops, or action order violated");
+ rc = -EINVAL;
+ goto release;
+ }
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_VLAN_PUSH)) {
+ rc = -EINVAL;
+ NL_SET_ERR_MSG_MOD(extack,
+ "More than two VLAN pushes, or action order violated");
+ goto release;
+ }
+ tci = fa->vlan.vid & VLAN_VID_MASK;
+ tci |= fa->vlan.prio << VLAN_PRIO_SHIFT;
+ act->vlan_tci[act->vlan_push] = cpu_to_be16(tci);
+ act->vlan_proto[act->vlan_push] = fa->vlan.proto;
+ act->vlan_push++;
+ break;
default:
NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u",
fa->id);
diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h
index 418ce8c13a06..542853f60c2a 100644
--- a/drivers/net/ethernet/sfc/tc.h
+++ b/drivers/net/ethernet/sfc/tc.h
@@ -19,7 +19,11 @@
#define IS_ALL_ONES(v) (!(typeof (v))~(v))
struct efx_tc_action_set {
+ u16 vlan_push:2;
+ u16 vlan_pop:2;
u16 deliver:1;
+ __be16 vlan_tci[2]; /* TCIs for vlan_push */
+ __be16 vlan_proto[2]; /* Ethertypes for vlan_push */
struct efx_tc_counter_index *count;
u32 dest_mport;
u32 fw_id; /* index of this entry in firmware actions table */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 35e99bf0c401..032eccf8eb42 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -57,6 +57,7 @@ static const char version[] =
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/errno.h>
@@ -69,7 +70,6 @@ static const char version[] =
#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index a2e511912e6a..39446d4e94b6 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1741,7 +1741,6 @@ irq_stop_out:
free_irq(dev->irq, dev);
mii_free_out:
phy_disconnect(dev->phydev);
- dev->phydev = NULL;
out:
pm_runtime_put(dev->dev.parent);
return retval;
@@ -1772,7 +1771,6 @@ static int smsc911x_stop(struct net_device *dev)
if (dev->phydev) {
phy_stop(dev->phydev);
phy_disconnect(dev->phydev);
- dev->phydev = NULL;
}
netif_carrier_off(dev);
pm_runtime_put(dev->dev.parent);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index 5e731a72cce8..ef8f3a940938 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -91,7 +91,7 @@ static struct platform_driver dwmac_generic_driver = {
.driver = {
.name = STMMAC_RESOURCE_NAME,
.pm = &stmmac_pltfr_pm_ops,
- .of_match_table = of_match_ptr(dwmac_generic_match),
+ .of_match_table = dwmac_generic_match,
},
};
module_platform_driver(dwmac_generic_driver);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 732774645c1a..32763566c214 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -606,7 +606,7 @@ static struct platform_driver qcom_ethqos_driver = {
.driver = {
.name = "qcom-ethqos",
.pm = &stmmac_pltfr_pm_ops,
- .of_match_table = of_match_ptr(qcom_ethqos_match),
+ .of_match_table = qcom_ethqos_match,
},
};
module_platform_driver(qcom_ethqos_driver);
diff --git a/drivers/net/ethernet/sunplus/spl2sw_phy.c b/drivers/net/ethernet/sunplus/spl2sw_phy.c
index 404f508a54d4..6f899e48f51d 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_phy.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_phy.c
@@ -84,9 +84,7 @@ void spl2sw_phy_remove(struct spl2sw_common *comm)
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i]) {
ndev = comm->ndev[i];
- if (ndev) {
+ if (ndev)
phy_disconnect(ndev->phydev);
- ndev->phydev = NULL;
- }
}
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 4e3861c47708..9ddb79776c88 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -76,6 +76,7 @@
#define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C
#define AM65_CPSW_SGMII_CONTROL_REG 0x010
+#define AM65_CPSW_SGMII_MR_ADV_ABILITY_REG 0x018
#define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE BIT(0)
#define AM65_CPSW_CTL_VLAN_AWARE BIT(1)
@@ -1466,15 +1467,13 @@ static void am65_cpsw_disable_serdes_phy(struct am65_cpsw_common *common)
static int am65_cpsw_init_serdes_phy(struct device *dev, struct device_node *port_np,
struct am65_cpsw_port *port)
{
- const char *name = "serdes-phy";
+ const char *name = "serdes";
struct phy *phy;
int ret;
- phy = devm_of_phy_get(dev, port_np, name);
- if (PTR_ERR(phy) == -ENODEV)
- return 0;
- if (IS_ERR(phy))
- return PTR_ERR(phy);
+ phy = devm_of_phy_optional_get(dev, port_np, name);
+ if (IS_ERR_OR_NULL(phy))
+ return PTR_ERR_OR_ZERO(phy);
/* Serdes PHY exists. Store it. */
port->slave.serdes_phy = phy;
@@ -1498,9 +1497,14 @@ static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned in
struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
struct am65_cpsw_common *common = port->common;
- if (common->pdata.extra_modes & BIT(state->interface))
+ if (common->pdata.extra_modes & BIT(state->interface)) {
+ if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ writel(ADVERTISE_SGMII,
+ port->sgmii_base + AM65_CPSW_SGMII_MR_ADV_ABILITY_REG);
+
writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE,
port->sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
+ }
}
static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode,
@@ -1541,6 +1545,8 @@ static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy
if (speed == SPEED_1000)
mac_control |= CPSW_SL_CTL_GIG;
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ mac_control |= CPSW_SL_CTL_EXT_EN;
if (speed == SPEED_10 && phy_interface_mode_is_rgmii(interface))
/* Can be used with in band mode only */
mac_control |= CPSW_SL_CTL_EXT_EN;
@@ -2145,15 +2151,31 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
port->slave.phylink_config.mac_managed_pm = true; /* MAC does PM */
- if (phy_interface_mode_is_rgmii(port->slave.phy_if)) {
+ switch (port->slave.phy_if) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
- } else if (port->slave.phy_if == PHY_INTERFACE_MODE_RMII) {
+ break;
+
+ case PHY_INTERFACE_MODE_RMII:
__set_bit(PHY_INTERFACE_MODE_RMII,
port->slave.phylink_config.supported_interfaces);
- } else if (common->pdata.extra_modes & BIT(port->slave.phy_if)) {
- __set_bit(PHY_INTERFACE_MODE_QSGMII,
- port->slave.phylink_config.supported_interfaces);
- } else {
+ break;
+
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ if (common->pdata.extra_modes & BIT(port->slave.phy_if)) {
+ __set_bit(port->slave.phy_if,
+ port->slave.phylink_config.supported_interfaces);
+ } else {
+ dev_err(dev, "selected phy-mode is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
dev_err(dev, "selected phy-mode is not supported\n");
return -EOPNOTSUPP;
}
@@ -2755,14 +2777,14 @@ static const struct am65_cpsw_pdata j7200_cpswxg_pdata = {
.quirks = 0,
.ale_dev_id = "am64-cpswxg",
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
- .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII),
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
};
static const struct am65_cpsw_pdata j721e_cpswxg_pdata = {
.quirks = 0,
.ale_dev_id = "am64-cpswxg",
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
- .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII),
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
};
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 8caf85acbb6a..c66618d91c28 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -175,6 +175,7 @@ struct am65_cpts {
u64 timestamp;
u32 genf_enable;
u32 hw_ts_enable;
+ u32 estf_enable;
struct sk_buff_head txq;
bool pps_enabled;
bool pps_present;
@@ -405,13 +406,13 @@ static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id)
static int am65_cpts_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
- u32 pps_ctrl_val = 0, pps_ppm_hi = 0, pps_ppm_low = 0;
+ u32 estf_ctrl_val = 0, estf_ppm_hi = 0, estf_ppm_low = 0;
s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
int pps_index = cpts->pps_genf_idx;
u64 adj_period, pps_adj_period;
u32 ctrl_val, ppm_hi, ppm_low;
unsigned long flags;
- int neg_adj = 0;
+ int neg_adj = 0, i;
if (ppb < 0) {
neg_adj = 1;
@@ -441,19 +442,19 @@ static int am65_cpts_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
ppm_low = lower_32_bits(adj_period);
if (cpts->pps_enabled) {
- pps_ctrl_val = am65_cpts_read32(cpts, genf[pps_index].control);
+ estf_ctrl_val = am65_cpts_read32(cpts, genf[pps_index].control);
if (neg_adj)
- pps_ctrl_val &= ~BIT(1);
+ estf_ctrl_val &= ~BIT(1);
else
- pps_ctrl_val |= BIT(1);
+ estf_ctrl_val |= BIT(1);
/* GenF PPM will do correction using cpts refclk tick which is
* (cpts->ts_add_val + 1) ns, so GenF length PPM adj period
* need to be corrected.
*/
pps_adj_period = adj_period * (cpts->ts_add_val + 1);
- pps_ppm_hi = upper_32_bits(pps_adj_period) & 0x3FF;
- pps_ppm_low = lower_32_bits(pps_adj_period);
+ estf_ppm_hi = upper_32_bits(pps_adj_period) & 0x3FF;
+ estf_ppm_low = lower_32_bits(pps_adj_period);
}
spin_lock_irqsave(&cpts->lock, flags);
@@ -471,11 +472,18 @@ static int am65_cpts_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
am65_cpts_write32(cpts, ppm_low, ts_ppm_low);
if (cpts->pps_enabled) {
- am65_cpts_write32(cpts, pps_ctrl_val, genf[pps_index].control);
- am65_cpts_write32(cpts, pps_ppm_hi, genf[pps_index].ppm_hi);
- am65_cpts_write32(cpts, pps_ppm_low, genf[pps_index].ppm_low);
+ am65_cpts_write32(cpts, estf_ctrl_val, genf[pps_index].control);
+ am65_cpts_write32(cpts, estf_ppm_hi, genf[pps_index].ppm_hi);
+ am65_cpts_write32(cpts, estf_ppm_low, genf[pps_index].ppm_low);
}
+ for (i = 0; i < AM65_CPTS_ESTF_MAX_NUM; i++) {
+ if (cpts->estf_enable & BIT(i)) {
+ am65_cpts_write32(cpts, estf_ctrl_val, estf[i].control);
+ am65_cpts_write32(cpts, estf_ppm_hi, estf[i].ppm_hi);
+ am65_cpts_write32(cpts, estf_ppm_low, estf[i].ppm_low);
+ }
+ }
/* All GenF/EstF can be updated here the same way */
spin_unlock_irqrestore(&cpts->lock, flags);
@@ -596,6 +604,11 @@ int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
am65_cpts_write32(cpts, val, estf[idx].comp_lo);
val = lower_32_bits(cycles);
am65_cpts_write32(cpts, val, estf[idx].length);
+ am65_cpts_write32(cpts, 0, estf[idx].control);
+ am65_cpts_write32(cpts, 0, estf[idx].ppm_hi);
+ am65_cpts_write32(cpts, 0, estf[idx].ppm_low);
+
+ cpts->estf_enable |= BIT(idx);
dev_dbg(cpts->dev, "%s: ESTF:%u enabled\n", __func__, idx);
@@ -606,6 +619,7 @@ EXPORT_SYMBOL_GPL(am65_cpts_estf_enable);
void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
{
am65_cpts_write32(cpts, 0, estf[idx].length);
+ cpts->estf_enable &= ~BIT(idx);
dev_dbg(cpts->dev, "%s: ESTF:%u disabled\n", __func__, idx);
}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 7db57f934a91..ca409b4054d0 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -4,6 +4,7 @@
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
#include <linux/iopoll.h>
#include <linux/pci.h>
@@ -1261,7 +1262,7 @@ static void wx_set_rx_buffer_len(struct wx *wx)
struct net_device *netdev = wx->netdev;
u32 mhadd, max_frame;
- max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
/* adjust max frame to be at least the size of a standard frame */
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
@@ -1271,6 +1272,24 @@ static void wx_set_rx_buffer_len(struct wx *wx)
wr32(wx, WX_PSR_MAX_SZ, max_frame);
}
+/**
+ * wx_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int wx_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ netdev->mtu = new_mtu;
+ wx_set_rx_buffer_len(wx);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_change_mtu);
+
/* Disable the specified rx queue */
void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring)
{
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 44dfd6ea442a..c173c56f0ab5 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -23,6 +23,7 @@ void wx_flush_sw_mac_table(struct wx *wx);
int wx_set_mac(struct net_device *netdev, void *p);
void wx_disable_rx(struct wx *wx);
void wx_set_rx_mode(struct net_device *netdev);
+int wx_change_mtu(struct net_device *netdev, int new_mtu);
void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring);
void wx_configure(struct wx *wx);
int wx_disable_pcie_master(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 77d8d7f1707e..2bec5b1bc196 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -7,11 +7,6 @@
#include <linux/bitfield.h>
#include <linux/netdevice.h>
-/* Vendor ID */
-#ifndef PCI_VENDOR_ID_WANGXUN
-#define PCI_VENDOR_ID_WANGXUN 0x8088
-#endif
-
#define WX_NCSI_SUP 0x8000
#define WX_NCSI_MASK 0x8000
#define WX_WOL_SUP 0x4000
@@ -300,6 +295,8 @@
#define WX_MAX_RXD 8192
#define WX_MAX_TXD 8192
+#define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
+
/* Supported Rx Buffer Sizes */
#define WX_RXBUFFER_256 256 /* Used for skb receive header */
#define WX_RXBUFFER_2K 2048
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index 5b564d348c09..1a004aa2adcb 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -6,10 +6,10 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/string.h>
-#include <linux/aer.h>
#include <linux/etherdevice.h>
#include <net/ip.h>
#include <linux/phy.h>
+#include <linux/if_vlan.h>
#include "../libwx/wx_type.h"
#include "../libwx/wx_hw.h"
@@ -470,6 +470,7 @@ static void ngbe_shutdown(struct pci_dev *pdev)
static const struct net_device_ops ngbe_netdev_ops = {
.ndo_open = ngbe_open,
.ndo_stop = ngbe_close,
+ .ndo_change_mtu = wx_change_mtu,
.ndo_start_xmit = wx_xmit_frame,
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
@@ -520,7 +521,6 @@ static int ngbe_probe(struct pci_dev *pdev,
goto err_pci_disable_dev;
}
- pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
netdev = devm_alloc_etherdev_mqs(&pdev->dev,
@@ -562,7 +562,8 @@ static int ngbe_probe(struct pci_dev *pdev,
netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = NGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
+ netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
wx->bd_number = func_nums;
/* setup the private structure */
@@ -669,7 +670,6 @@ err_clear_interrupt_scheme:
err_free_mac_table:
kfree(wx->mac_table);
err_pci_release_regions:
- pci_disable_pcie_error_reporting(pdev);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_disable_dev:
@@ -698,7 +698,6 @@ static void ngbe_remove(struct pci_dev *pdev)
kfree(wx->mac_table);
wx_clear_interrupt_scheme(wx);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index a2351349785e..373d5af628cd 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -137,7 +137,6 @@ enum NGBE_MSCA_CMD_value {
#define NGBE_RX_PB_SIZE 42
#define NGBE_MC_TBL_SIZE 128
#define NGBE_TDB_PB_SZ (20 * 1024) /* 160KB Packet Buffer */
-#define NGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
/* TX/RX descriptor defines */
#define NGBE_DEFAULT_TXD 512 /* default ring size */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 6c0a98230557..843a88bc416f 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -6,9 +6,9 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/string.h>
-#include <linux/aer.h>
#include <linux/etherdevice.h>
#include <net/ip.h>
+#include <linux/if_vlan.h>
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
@@ -487,6 +487,7 @@ static void txgbe_shutdown(struct pci_dev *pdev)
static const struct net_device_ops txgbe_netdev_ops = {
.ndo_open = txgbe_open,
.ndo_stop = txgbe_close,
+ .ndo_change_mtu = wx_change_mtu,
.ndo_start_xmit = wx_xmit_frame,
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
@@ -538,7 +539,6 @@ static int txgbe_probe(struct pci_dev *pdev,
goto err_pci_disable_dev;
}
- pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
netdev = devm_alloc_etherdev_mqs(&pdev->dev,
@@ -605,7 +605,8 @@ static int txgbe_probe(struct pci_dev *pdev,
netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = TXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
+ netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
/* make sure the EEPROM is good */
err = txgbe_validate_eeprom_checksum(wx, NULL);
@@ -698,7 +699,6 @@ err_release_hw:
err_free_mac_table:
kfree(wx->mac_table);
err_pci_release_regions:
- pci_disable_pcie_error_reporting(pdev);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_disable_dev:
@@ -729,8 +729,6 @@ static void txgbe_remove(struct pci_dev *pdev)
kfree(wx->mac_table);
wx_clear_interrupt_scheme(wx);
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 563ea51deca6..63a1c733718d 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -79,7 +79,6 @@
#define TXGBE_SP_MC_TBL_SIZE 128
#define TXGBE_SP_RX_PB_SIZE 512
#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */
-#define TXGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
/* TX/RX descriptor defines */
#define TXGBE_DEFAULT_TXD 512