summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c16
-rw-r--r--drivers/net/ethernet/adi/adin1110.c8
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c4
-rw-r--r--drivers/net/ethernet/agere/et131x.c3
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h1
-rw-r--r--drivers/net/ethernet/amd/atarilance.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c6
-rw-r--r--drivers/net/ethernet/amd/sunlance.c2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.h1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c6
-rw-r--r--drivers/net/ethernet/apple/macmace.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c3
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c5
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig12
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c1437
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h586
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c503
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c1415
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h257
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c3
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c311
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h27
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c26
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c49
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h644
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c13
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c18
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h1
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c6
-rw-r--r--drivers/net/ethernet/cortina/gemini.c8
-rw-r--r--drivers/net/ethernet/davicom/dm9051.c9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c3
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h1
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c3
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c5
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c16
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c50
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c12
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c22
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c8
-rw-r--r--drivers/net/ethernet/freescale/fec.h18
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c314
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c10
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c9
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c60
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c6
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c7
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h24
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c5
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c15
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c9
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c10
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c7
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c8
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c9
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c4
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_txrx.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve.h113
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c89
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h10
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc.h4
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c20
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c126
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c404
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c576
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c668
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c121
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c164
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c10
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h1
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c116
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c54
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c72
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c253
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c101
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c18
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c75
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h107
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c285
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c120
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.c1346
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.h120
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c45
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c1838
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c129
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c181
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c383
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c309
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c78
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h90
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c465
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c186
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c84
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c221
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h19
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c76
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c174
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h3
-rw-r--r--drivers/net/ethernet/korina.c2
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c10
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h11
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c9
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h6
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c37
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c37
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c155
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c35
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c23
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c177
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c470
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c398
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.h11
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c20
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c3
-rw-r--r--drivers/net/ethernet/marvell/sky2.c3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c36
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c694
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h376
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c56
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h22
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_regs.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_debugfs.c24
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_regs.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c155
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c363
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c223
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c237
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c773
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c176
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c1394
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c136
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c360
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c195
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c369
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c325
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h129
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c665
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.c418
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c449
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c2411
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/thermal.c114
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/thermal.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h166
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c368
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c200
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c626
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c193
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c65
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h15
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c38
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c35
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c24
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c6
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.c4
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.h3
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_client.h3
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_tc.c18
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_tc.h2
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c35
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c24
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c142
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c15
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.c1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c28
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.h1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c3
-rw-r--r--drivers/net/ethernet/neterion/s2io.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c64
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c57
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h1
-rw-r--r--drivers/net/ethernet/ni/nixge.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c160
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c70
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c45
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c12
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c3
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c3
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c32
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h1
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c6
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h2
-rw-r--r--drivers/net/ethernet/sfc/efx.c1
-rw-r--r--drivers/net/ethernet/sfc/efx.h2
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c30
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c7
-rw-r--r--drivers/net/ethernet/sfc/farch_regs.h2929
-rw-r--r--drivers/net/ethernet/sfc/filter.h7
-rw-r--r--drivers/net/ethernet/sfc/io.h84
-rw-r--r--drivers/net/ethernet/sfc/mae.c916
-rw-r--r--drivers/net/ethernet/sfc/mae.h16
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c7
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h14
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.c24
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c5
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h63
-rw-r--r--drivers/net/ethernet/sfc/nic.c158
-rw-r--r--drivers/net/ethernet/sfc/nic.h178
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h13
-rw-r--r--drivers/net/ethernet/sfc/ptp.c227
-rw-r--r--drivers/net/ethernet/sfc/selftest.c7
-rw-r--r--drivers/net/ethernet/sfc/tc.c1076
-rw-r--r--drivers/net/ethernet/sfc/tc.h144
-rw-r--r--drivers/net/ethernet/sfc/tc_conntrack.c533
-rw-r--r--drivers/net/ethernet/sfc/tc_conntrack.h55
-rw-r--r--drivers/net/ethernet/sfc/tc_counters.c8
-rw-r--r--drivers/net/ethernet/sfc/tc_counters.h4
-rw-r--r--drivers/net/ethernet/sfc/tx.c45
-rw-r--r--drivers/net/ethernet/sfc/tx_tso.c2
-rw-r--r--drivers/net/ethernet/sfc/vfdi.h252
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h7
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c5
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c7
-rw-r--r--drivers/net/ethernet/socionext/netsec.c2
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h78
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c65
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c245
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c50
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c53
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c123
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c47
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c417
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c6
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c1
-rw-r--r--drivers/net/ethernet/sun/sunhme.c3
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig25
-rw-r--r--drivers/net/ethernet/ti/Makefile11
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c8
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c1
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c965
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.h41
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_classifier.c367
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c457
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h200
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_ethtool.c209
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c120
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_mii_rt.h151
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c2336
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h286
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_queues.c50
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c57
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.h158
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switch_map.h234
-rw-r--r--drivers/net/ethernet/ti/netcp.h2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c3
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c68
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h34
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c35
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c64
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c88
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h19
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c39
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c56
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c188
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c4
565 files changed, 33736 insertions, 13769 deletions
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 2c6bd36d2f31..65f56a98c0a0 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -731,18 +731,4 @@ static struct pci_driver ne2k_driver = {
.id_table = ne2k_pci_tbl,
.driver.pm = &ne2k_pci_pm_ops,
};
-
-
-static int __init ne2k_pci_init(void)
-{
- return pci_register_driver(&ne2k_driver);
-}
-
-
-static void __exit ne2k_pci_cleanup(void)
-{
- pci_unregister_driver(&ne2k_driver);
-}
-
-module_init(ne2k_pci_init);
-module_exit(ne2k_pci_cleanup);
+module_pci_driver(ne2k_driver);
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index f5c2d7a9abc1..1c009b485188 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -739,7 +739,7 @@ static int adin1110_broadcasts_filter(struct adin1110_port_priv *port_priv,
u32 port_rules = 0;
u8 mask[ETH_ALEN];
- memset(mask, 0xFF, ETH_ALEN);
+ eth_broadcast_addr(mask);
if (accept_broadcast && port_priv->state == BR_STATE_FORWARDING)
port_rules = adin1110_port_rules(port_priv, true, true);
@@ -760,7 +760,7 @@ static int adin1110_set_mac_address(struct net_device *netdev,
return -EADDRNOTAVAIL;
eth_hw_addr_set(netdev, dev_addr);
- memset(mask, 0xFF, ETH_ALEN);
+ eth_broadcast_addr(mask);
mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT;
port_rules = adin1110_port_rules(port_priv, true, false);
@@ -1271,7 +1271,7 @@ static int adin1110_port_set_blocking_state(struct adin1110_port_priv *port_priv
goto out;
/* Allow only BPDUs to be passed to the CPU */
- memset(mask, 0xFF, ETH_ALEN);
+ eth_broadcast_addr(mask);
port_rules = adin1110_port_rules(port_priv, true, false);
ret = adin1110_write_mac_address(port_priv, mac_slot, mac,
mask, port_rules);
@@ -1386,7 +1386,7 @@ static int adin1110_fdb_add(struct adin1110_port_priv *port_priv,
other_port = priv->ports[!port_priv->nr];
port_rules = adin1110_port_rules(port_priv, false, true);
- memset(mask, 0xFF, ETH_ALEN);
+ eth_broadcast_addr(mask);
return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr,
mask, port_rules);
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index aa0d2f3aaeaa..597a02c75d52 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -29,9 +29,9 @@
#include <linux/io.h>
#include <linux/crc32.h>
#include <linux/mii.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_net.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
#include <asm/byteorder.h>
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 5fab589b3ddf..3d9220f9c9fe 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3982,8 +3982,7 @@ static int et131x_pci_setup(struct pci_dev *pdev,
}
adapter->mii_bus->name = "et131x_eth_mii";
- snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
- (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
+ snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(adapter->pdev));
adapter->mii_bus->priv = netdev;
adapter->mii_bus->read = et131x_mdio_read;
adapter->mii_bus->write = et131x_mdio_write;
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index a30d0f172986..78231c85234d 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1520,10 +1520,8 @@ static void slic_get_ethtool_stats(struct net_device *dev,
static void slic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
- if (stringset == ETH_SS_STATS) {
+ if (stringset == ETH_SS_STATS)
memcpy(data, slic_stats_strings, sizeof(slic_stats_strings));
- data += sizeof(slic_stats_strings);
- }
}
static void slic_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index d19593fae226..ad32ca81f7ef 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -3267,7 +3267,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
host_info = ena_dev->host_attr.host_info;
- host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
+ host_info->bdf = pci_dev_id(pdev);
host_info->os_type = ENA_ADMIN_OS_LINUX;
host_info->kernel_ver = LINUX_VERSION_CODE;
strscpy(host_info->kernel_ver_str, utsname()->version,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 248b715b4d68..33c923e1261a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
+#include <net/xdp.h>
#include <uapi/linux/bpf.h>
#include "ena_com.h"
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index ec704222925d..751454d305c6 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -367,7 +367,7 @@ static void *slow_memcpy( void *dst, const void *src, size_t len )
}
-struct net_device * __init atarilance_probe(void)
+static struct net_device * __init atarilance_probe(void)
{
int i;
static int found;
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
index 561af8e5b3ea..6787a5fae908 100644
--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -11,7 +11,7 @@
* @pf_pdev: ptr to the PF driver struct
* @devname: name that includes service into, e.g. pds_core.vDPA
*
- * Return: 0 on success, or
+ * Return: positive client ID (ci) on success, or
* negative for error
*/
int pds_client_register(struct pci_dev *pf_pdev, char *devname)
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index 672757932246..3a45bf474a19 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -367,14 +367,13 @@ static int pdsc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pdsc_init_vf(pdsc);
if (err) {
dev_err(dev, "Cannot init device: %pe\n", ERR_PTR(err));
- goto err_out_clear_master;
+ goto err_out_disable_device;
}
clear_bit(PDSC_S_INITING_DRIVER, &pdsc->state);
return 0;
-err_out_clear_master:
- pci_clear_master(pdev);
+err_out_disable_device:
pci_disable_device(pdev);
err_out_free_ida:
ida_free(&pdsc_ida, pdsc->uid);
@@ -439,7 +438,6 @@ static void pdsc_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
}
- pci_clear_master(pdev);
pci_disable_device(pdev);
ida_free(&pdsc_ida, pdsc->uid);
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 68ca1225eedc..33bb539ad70a 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -92,7 +92,7 @@ static char lancestr[] = "LANCE";
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/gfp.h>
#include <linux/pgtable.h>
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.h b/drivers/net/ethernet/apm/xgene-v2/main.h
index b3985a7be59d..7be6f83e22fe 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.h
+++ b/drivers/net/ethernet/apm/xgene-v2/main.h
@@ -22,6 +22,7 @@
#include <linux/of_mdio.h>
#include <linux/prefetch.h>
#include <linux/phy.h>
+#include <linux/platform_device.h>
#include <net/ip.h>
#include "mac.h"
#include "enet.h"
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 390671640388..4d4140b7c450 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1632,7 +1632,7 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
for (i = 0; i < max_irqs; i++) {
ret = platform_get_irq(pdev, i);
- if (ret <= 0) {
+ if (ret < 0) {
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
max_irqs = i;
pdata->rxq_cnt = max_irqs / 2;
@@ -1640,7 +1640,7 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
pdata->cq_cnt = max_irqs / 2;
break;
}
- return ret ? : -ENXIO;
+ return ret;
}
pdata->irqs[i] = ret;
}
@@ -2041,7 +2041,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
if (of_id) {
- pdata->enet_id = (enum xgene_enet_id)of_id->data;
+ pdata->enet_id = (uintptr_t)of_id->data;
}
#ifdef CONFIG_ACPI
else {
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 8fcaf1639920..8775c3234e91 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -77,7 +77,7 @@ struct mace_frame {
u8 pad4;
u32 pad5;
u32 pad6;
- u8 data[1];
+ DECLARE_FLEX_ARRAY(u8, data);
/* And frame continues.. */
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
index 5dfc751572ed..220400a633f5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -93,7 +93,7 @@ static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)
static int hw_atl2_hw_reset(struct aq_hw_s *self)
{
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct hw_atl2_priv *priv = self->priv;
int err;
err = hw_atl2_utils_soft_reset(self);
@@ -378,8 +378,8 @@ static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self)
static void hw_atl2_hw_init_new_rx_filters(struct aq_hw_s *self)
{
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
u8 *prio_tc_map = self->aq_nic_cfg->prio_tc_map;
+ struct hw_atl2_priv *priv = self->priv;
u16 action;
u8 index;
int i;
@@ -433,7 +433,7 @@ static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self,
u16 off_action = (!promisc &&
!hw_atl_rpfl2promiscuous_mode_en_get(self)) ?
HW_ATL2_ACTION_DROP : HW_ATL2_ACTION_DISABLE;
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct hw_atl2_priv *priv = self->priv;
u8 index;
index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX;
@@ -445,7 +445,7 @@ static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self,
static void hw_atl2_hw_new_rx_filter_promisc(struct aq_hw_s *self, bool promisc)
{
u16 off_action = promisc ? HW_ATL2_ACTION_DISABLE : HW_ATL2_ACTION_DROP;
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct hw_atl2_priv *priv = self->priv;
bool vlan_promisc_enable;
u8 index;
@@ -539,8 +539,8 @@ static int hw_atl2_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
+ struct hw_atl2_priv *priv = self->priv;
u8 base_index, count;
int err;
@@ -770,7 +770,7 @@ static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self)
static int hw_atl2_hw_vlan_set(struct aq_hw_s *self,
struct aq_rx_filter_vlan *aq_vlans)
{
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct hw_atl2_priv *priv = self->priv;
u32 queue;
u8 index;
int i;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
index 674683b54304..52e2070a4a2f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -413,8 +413,8 @@ do { \
static int aq_a2_fw_update_stats(struct aq_hw_s *self)
{
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
struct aq_stats_s *cs = &self->curr_stats;
+ struct hw_atl2_priv *priv = self->priv;
struct statistics_s stats;
struct version_s version;
int err;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 2b427d8a1831..31ee477dd131 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -15,11 +15,11 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_platform.h>
#include "emac.h"
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index ff1a5edf8df1..009e0b3066fa 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -29,9 +29,10 @@
#include <linux/if_vlan.h>
#include <linux/mfd/syscon.h>
+#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/phylink.h>
#include <linux/regmap.h>
#include <linux/reset.h>
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
index b716adacd815..7f6b69a52367 100644
--- a/drivers/net/ethernet/atheros/alx/ethtool.c
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -292,9 +292,8 @@ static void alx_get_ethtool_stats(struct net_device *netdev,
spin_lock(&alx->stats_lock);
alx_update_hw_stats(hw);
- BUILD_BUG_ON(sizeof(hw->stats) - offsetof(struct alx_hw_stats, rx_ok) <
- ALX_NUM_STATS * sizeof(u64));
- memcpy(data, &hw->stats.rx_ok, ALX_NUM_STATS * sizeof(u64));
+ BUILD_BUG_ON(sizeof(hw->stats) != ALX_NUM_STATS * sizeof(u64));
+ memcpy(data, &hw->stats, sizeof(hw->stats));
spin_unlock(&alx->stats_lock);
}
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 948586bf1b5b..75ca3ddda1f5 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -255,4 +255,16 @@ config BNXT_HWMON
Say Y if you want to expose the thermal sensor data on NetXtreme-C/E
devices, via the hwmon sysfs interface.
+config BCMASP
+ tristate "Broadcom ASP 2.0 Ethernet support"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default ARCH_BRCMSTB
+ depends on OF
+ select MII
+ select PHYLIB
+ select MDIO_BCM_UNIMAC
+ help
+ This configuration enables the Broadcom ASP 2.0 Ethernet controller
+ driver which is present in Broadcom STB SoCs such as 72165.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 0ddfb5b5d53c..bac5cb6ad0cd 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o
obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
obj-$(CONFIG_BNXT) += bnxt/
+obj-$(CONFIG_BCMASP) += asp2/
diff --git a/drivers/net/ethernet/broadcom/asp2/Makefile b/drivers/net/ethernet/broadcom/asp2/Makefile
new file mode 100644
index 000000000000..e07550315f83
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_BCMASP) += bcm-asp.o
+bcm-asp-objs := bcmasp.o bcmasp_intf.o bcmasp_ethtool.o
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
new file mode 100644
index 000000000000..d63d321f3e7b
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -0,0 +1,1437 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Broadcom STB ASP 2.0 Driver
+ *
+ * Copyright (c) 2023 Broadcom
+ */
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+
+#include "bcmasp.h"
+#include "bcmasp_intf_defs.h"
+
+static void _intr2_mask_clear(struct bcmasp_priv *priv, u32 mask)
+{
+ intr2_core_wl(priv, mask, ASP_INTR2_MASK_CLEAR);
+ priv->irq_mask &= ~mask;
+}
+
+static void _intr2_mask_set(struct bcmasp_priv *priv, u32 mask)
+{
+ intr2_core_wl(priv, mask, ASP_INTR2_MASK_SET);
+ priv->irq_mask |= mask;
+}
+
+void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ if (en)
+ _intr2_mask_clear(priv, ASP_INTR2_TX_DESC(intf->channel));
+ else
+ _intr2_mask_set(priv, ASP_INTR2_TX_DESC(intf->channel));
+}
+EXPORT_SYMBOL_GPL(bcmasp_enable_tx_irq);
+
+void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ if (en)
+ _intr2_mask_clear(priv, ASP_INTR2_RX_ECH(intf->channel));
+ else
+ _intr2_mask_set(priv, ASP_INTR2_RX_ECH(intf->channel));
+}
+EXPORT_SYMBOL_GPL(bcmasp_enable_rx_irq);
+
+static void bcmasp_intr2_mask_set_all(struct bcmasp_priv *priv)
+{
+ _intr2_mask_set(priv, 0xffffffff);
+ priv->irq_mask = 0xffffffff;
+}
+
+static void bcmasp_intr2_clear_all(struct bcmasp_priv *priv)
+{
+ intr2_core_wl(priv, 0xffffffff, ASP_INTR2_CLEAR);
+}
+
+static void bcmasp_intr2_handling(struct bcmasp_intf *intf, u32 status)
+{
+ if (status & ASP_INTR2_RX_ECH(intf->channel)) {
+ if (likely(napi_schedule_prep(&intf->rx_napi))) {
+ bcmasp_enable_rx_irq(intf, 0);
+ __napi_schedule_irqoff(&intf->rx_napi);
+ }
+ }
+
+ if (status & ASP_INTR2_TX_DESC(intf->channel)) {
+ if (likely(napi_schedule_prep(&intf->tx_napi))) {
+ bcmasp_enable_tx_irq(intf, 0);
+ __napi_schedule_irqoff(&intf->tx_napi);
+ }
+ }
+}
+
+static irqreturn_t bcmasp_isr(int irq, void *data)
+{
+ struct bcmasp_priv *priv = data;
+ struct bcmasp_intf *intf;
+ u32 status;
+
+ status = intr2_core_rl(priv, ASP_INTR2_STATUS) &
+ ~intr2_core_rl(priv, ASP_INTR2_MASK_STATUS);
+
+ intr2_core_wl(priv, status, ASP_INTR2_CLEAR);
+
+ if (unlikely(status == 0)) {
+ dev_warn(&priv->pdev->dev, "l2 spurious interrupt\n");
+ return IRQ_NONE;
+ }
+
+ /* Handle intferfaces */
+ list_for_each_entry(intf, &priv->intfs, list)
+ bcmasp_intr2_handling(intf, status);
+
+ return IRQ_HANDLED;
+}
+
+void bcmasp_flush_rx_port(struct bcmasp_intf *intf)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ u32 mask;
+
+ switch (intf->port) {
+ case 0:
+ mask = ASP_CTRL_UMAC0_FLUSH_MASK;
+ break;
+ case 1:
+ mask = ASP_CTRL_UMAC1_FLUSH_MASK;
+ break;
+ case 2:
+ mask = ASP_CTRL_SPB_FLUSH_MASK;
+ break;
+ default:
+ /* Not valid port */
+ return;
+ }
+
+ rx_ctrl_core_wl(priv, mask, priv->hw_info->rx_ctrl_flush);
+}
+
+static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt)
+{
+ rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L3_1(64),
+ ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index));
+
+ rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L2(32) |
+ ASP_RX_FILTER_NET_OFFSET_L3_0(32) |
+ ASP_RX_FILTER_NET_OFFSET_L3_1(96) |
+ ASP_RX_FILTER_NET_OFFSET_L4(32),
+ ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index + 1));
+
+ rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
+ ASP_RX_FILTER_NET_CFG_EN |
+ ASP_RX_FILTER_NET_CFG_L2_EN |
+ ASP_RX_FILTER_NET_CFG_L3_EN |
+ ASP_RX_FILTER_NET_CFG_L4_EN |
+ ASP_RX_FILTER_NET_CFG_L3_FRM(2) |
+ ASP_RX_FILTER_NET_CFG_L4_FRM(2) |
+ ASP_RX_FILTER_NET_CFG_UMC(nfilt->port),
+ ASP_RX_FILTER_NET_CFG(nfilt->hw_index));
+
+ rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
+ ASP_RX_FILTER_NET_CFG_EN |
+ ASP_RX_FILTER_NET_CFG_L2_EN |
+ ASP_RX_FILTER_NET_CFG_L3_EN |
+ ASP_RX_FILTER_NET_CFG_L4_EN |
+ ASP_RX_FILTER_NET_CFG_L3_FRM(2) |
+ ASP_RX_FILTER_NET_CFG_L4_FRM(2) |
+ ASP_RX_FILTER_NET_CFG_UMC(nfilt->port),
+ ASP_RX_FILTER_NET_CFG(nfilt->hw_index + 1));
+}
+
+#define MAX_WAKE_FILTER_SIZE 256
+enum asp_netfilt_reg_type {
+ ASP_NETFILT_MATCH = 0,
+ ASP_NETFILT_MASK,
+ ASP_NETFILT_MAX
+};
+
+static int bcmasp_netfilt_get_reg_offset(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ enum asp_netfilt_reg_type reg_type,
+ u32 offset)
+{
+ u32 block_index, filter_sel;
+
+ if (offset < 32) {
+ block_index = ASP_RX_FILTER_NET_L2;
+ filter_sel = nfilt->hw_index;
+ } else if (offset < 64) {
+ block_index = ASP_RX_FILTER_NET_L2;
+ filter_sel = nfilt->hw_index + 1;
+ } else if (offset < 96) {
+ block_index = ASP_RX_FILTER_NET_L3_0;
+ filter_sel = nfilt->hw_index;
+ } else if (offset < 128) {
+ block_index = ASP_RX_FILTER_NET_L3_0;
+ filter_sel = nfilt->hw_index + 1;
+ } else if (offset < 160) {
+ block_index = ASP_RX_FILTER_NET_L3_1;
+ filter_sel = nfilt->hw_index;
+ } else if (offset < 192) {
+ block_index = ASP_RX_FILTER_NET_L3_1;
+ filter_sel = nfilt->hw_index + 1;
+ } else if (offset < 224) {
+ block_index = ASP_RX_FILTER_NET_L4;
+ filter_sel = nfilt->hw_index;
+ } else if (offset < 256) {
+ block_index = ASP_RX_FILTER_NET_L4;
+ filter_sel = nfilt->hw_index + 1;
+ } else {
+ return -EINVAL;
+ }
+
+ switch (reg_type) {
+ case ASP_NETFILT_MATCH:
+ return ASP_RX_FILTER_NET_PAT(filter_sel, block_index,
+ (offset % 32));
+ case ASP_NETFILT_MASK:
+ return ASP_RX_FILTER_NET_MASK(filter_sel, block_index,
+ (offset % 32));
+ default:
+ return -EINVAL;
+ }
+}
+
+static void bcmasp_netfilt_wr(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ enum asp_netfilt_reg_type reg_type,
+ u32 val, u32 offset)
+{
+ int reg_offset;
+
+ /* HW only accepts 4 byte aligned writes */
+ if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE)
+ return;
+
+ reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type,
+ offset);
+
+ rx_filter_core_wl(priv, val, reg_offset);
+}
+
+static u32 bcmasp_netfilt_rd(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ enum asp_netfilt_reg_type reg_type,
+ u32 offset)
+{
+ int reg_offset;
+
+ /* HW only accepts 4 byte aligned writes */
+ if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE)
+ return 0;
+
+ reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type,
+ offset);
+
+ return rx_filter_core_rl(priv, reg_offset);
+}
+
+static int bcmasp_netfilt_wr_m_wake(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ u32 offset, void *match, void *mask,
+ size_t size)
+{
+ u32 shift, mask_val = 0, match_val = 0;
+ bool first_byte = true;
+
+ if ((offset + size) > MAX_WAKE_FILTER_SIZE)
+ return -EINVAL;
+
+ while (size--) {
+ /* The HW only accepts 4 byte aligned writes, so if we
+ * begin unaligned or if remaining bytes less than 4,
+ * we need to read then write to avoid losing current
+ * register state
+ */
+ if (first_byte && (!IS_ALIGNED(offset, 4) || size < 3)) {
+ match_val = bcmasp_netfilt_rd(priv, nfilt,
+ ASP_NETFILT_MATCH,
+ ALIGN_DOWN(offset, 4));
+ mask_val = bcmasp_netfilt_rd(priv, nfilt,
+ ASP_NETFILT_MASK,
+ ALIGN_DOWN(offset, 4));
+ }
+
+ shift = (3 - (offset % 4)) * 8;
+ match_val &= ~GENMASK(shift + 7, shift);
+ mask_val &= ~GENMASK(shift + 7, shift);
+ match_val |= (u32)(*((u8 *)match) << shift);
+ mask_val |= (u32)(*((u8 *)mask) << shift);
+
+ /* If last byte or last byte of word, write to reg */
+ if (!size || ((offset % 4) == 3)) {
+ bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH,
+ match_val, ALIGN_DOWN(offset, 4));
+ bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK,
+ mask_val, ALIGN_DOWN(offset, 4));
+ first_byte = true;
+ } else {
+ first_byte = false;
+ }
+
+ offset++;
+ match++;
+ mask++;
+ }
+
+ return 0;
+}
+
+static void bcmasp_netfilt_reset_hw(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt)
+{
+ int i;
+
+ for (i = 0; i < MAX_WAKE_FILTER_SIZE; i += 4) {
+ bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH, 0, i);
+ bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK, 0, i);
+ }
+}
+
+static void bcmasp_netfilt_tcpip4_wr(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ struct ethtool_tcpip4_spec *match,
+ struct ethtool_tcpip4_spec *mask,
+ u32 offset)
+{
+ __be16 val_16, mask_16;
+
+ val_16 = htons(ETH_P_IP);
+ mask_16 = htons(0xFFFF);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
+ &val_16, &mask_16, sizeof(val_16));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1,
+ &match->tos, &mask->tos,
+ sizeof(match->tos));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12,
+ &match->ip4src, &mask->ip4src,
+ sizeof(match->ip4src));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16,
+ &match->ip4dst, &mask->ip4dst,
+ sizeof(match->ip4dst));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 20,
+ &match->psrc, &mask->psrc,
+ sizeof(match->psrc));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 22,
+ &match->pdst, &mask->pdst,
+ sizeof(match->pdst));
+}
+
+static void bcmasp_netfilt_tcpip6_wr(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt,
+ struct ethtool_tcpip6_spec *match,
+ struct ethtool_tcpip6_spec *mask,
+ u32 offset)
+{
+ __be16 val_16, mask_16;
+
+ val_16 = htons(ETH_P_IPV6);
+ mask_16 = htons(0xFFFF);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
+ &val_16, &mask_16, sizeof(val_16));
+ val_16 = htons(match->tclass << 4);
+ mask_16 = htons(mask->tclass << 4);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset,
+ &val_16, &mask_16, sizeof(val_16));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 8,
+ &match->ip6src, &mask->ip6src,
+ sizeof(match->ip6src));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 24,
+ &match->ip6dst, &mask->ip6dst,
+ sizeof(match->ip6dst));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 40,
+ &match->psrc, &mask->psrc,
+ sizeof(match->psrc));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 42,
+ &match->pdst, &mask->pdst,
+ sizeof(match->pdst));
+}
+
+static int bcmasp_netfilt_wr_to_hw(struct bcmasp_priv *priv,
+ struct bcmasp_net_filter *nfilt)
+{
+ struct ethtool_rx_flow_spec *fs = &nfilt->fs;
+ unsigned int offset = 0;
+ __be16 val_16, mask_16;
+ u8 val_8, mask_8;
+
+ /* Currently only supports wake filters */
+ if (!nfilt->wake_filter)
+ return -EINVAL;
+
+ bcmasp_netfilt_reset_hw(priv, nfilt);
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, 0, &fs->h_ext.h_dest,
+ &fs->m_ext.h_dest,
+ sizeof(fs->h_ext.h_dest));
+ }
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->m_ext.vlan_etype || fs->m_ext.vlan_tci)) {
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2),
+ &fs->h_ext.vlan_etype,
+ &fs->m_ext.vlan_etype,
+ sizeof(fs->h_ext.vlan_etype));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ((ETH_ALEN * 2) + 2),
+ &fs->h_ext.vlan_tci,
+ &fs->m_ext.vlan_tci,
+ sizeof(fs->h_ext.vlan_tci));
+ offset += VLAN_HLEN;
+ }
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case ETHER_FLOW:
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, 0,
+ &fs->h_u.ether_spec.h_dest,
+ &fs->m_u.ether_spec.h_dest,
+ sizeof(fs->h_u.ether_spec.h_dest));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_ALEN,
+ &fs->h_u.ether_spec.h_source,
+ &fs->m_u.ether_spec.h_source,
+ sizeof(fs->h_u.ether_spec.h_source));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
+ &fs->h_u.ether_spec.h_proto,
+ &fs->m_u.ether_spec.h_proto,
+ sizeof(fs->h_u.ether_spec.h_proto));
+
+ break;
+ case IP_USER_FLOW:
+ val_16 = htons(ETH_P_IP);
+ mask_16 = htons(0xFFFF);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset,
+ &val_16, &mask_16, sizeof(val_16));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1,
+ &fs->h_u.usr_ip4_spec.tos,
+ &fs->m_u.usr_ip4_spec.tos,
+ sizeof(fs->h_u.usr_ip4_spec.tos));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
+ &fs->h_u.usr_ip4_spec.proto,
+ &fs->m_u.usr_ip4_spec.proto,
+ sizeof(fs->h_u.usr_ip4_spec.proto));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12,
+ &fs->h_u.usr_ip4_spec.ip4src,
+ &fs->m_u.usr_ip4_spec.ip4src,
+ sizeof(fs->h_u.usr_ip4_spec.ip4src));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16,
+ &fs->h_u.usr_ip4_spec.ip4dst,
+ &fs->m_u.usr_ip4_spec.ip4dst,
+ sizeof(fs->h_u.usr_ip4_spec.ip4dst));
+ if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
+ break;
+
+ /* Only supports 20 byte IPv4 header */
+ val_8 = 0x45;
+ mask_8 = 0xFF;
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset,
+ &val_8, &mask_8, sizeof(val_8));
+ bcmasp_netfilt_wr_m_wake(priv, nfilt,
+ ETH_HLEN + 20 + offset,
+ &fs->h_u.usr_ip4_spec.l4_4_bytes,
+ &fs->m_u.usr_ip4_spec.l4_4_bytes,
+ sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes)
+ );
+ break;
+ case TCP_V4_FLOW:
+ val_8 = IPPROTO_TCP;
+ mask_8 = 0xFF;
+ bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.tcp_ip4_spec,
+ &fs->m_u.tcp_ip4_spec, offset);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
+ &val_8, &mask_8, sizeof(val_8));
+ break;
+ case UDP_V4_FLOW:
+ val_8 = IPPROTO_UDP;
+ mask_8 = 0xFF;
+ bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.udp_ip4_spec,
+ &fs->m_u.udp_ip4_spec, offset);
+
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9,
+ &val_8, &mask_8, sizeof(val_8));
+ break;
+ case TCP_V6_FLOW:
+ val_8 = IPPROTO_TCP;
+ mask_8 = 0xFF;
+ bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.tcp_ip6_spec,
+ &fs->m_u.tcp_ip6_spec, offset);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6,
+ &val_8, &mask_8, sizeof(val_8));
+ break;
+ case UDP_V6_FLOW:
+ val_8 = IPPROTO_UDP;
+ mask_8 = 0xFF;
+ bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.udp_ip6_spec,
+ &fs->m_u.udp_ip6_spec, offset);
+ bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6,
+ &val_8, &mask_8, sizeof(val_8));
+ break;
+ }
+
+ bcmasp_netfilt_hw_en_wake(priv, nfilt);
+
+ return 0;
+}
+
+void bcmasp_netfilt_suspend(struct bcmasp_intf *intf)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ bool write = false;
+ int ret, i;
+
+ /* Write all filters to HW */
+ for (i = 0; i < NUM_NET_FILTERS; i++) {
+ /* If the filter does not match the port, skip programming. */
+ if (!priv->net_filters[i].claimed ||
+ priv->net_filters[i].port != intf->port)
+ continue;
+
+ if (i > 0 && (i % 2) &&
+ priv->net_filters[i].wake_filter &&
+ priv->net_filters[i - 1].wake_filter)
+ continue;
+
+ ret = bcmasp_netfilt_wr_to_hw(priv, &priv->net_filters[i]);
+ if (!ret)
+ write = true;
+ }
+
+ /* Successfully programmed at least one wake filter
+ * so enable top level wake config
+ */
+ if (write)
+ rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN |
+ ASP_RX_FILTER_LNR_MD |
+ ASP_RX_FILTER_GEN_WK_EN |
+ ASP_RX_FILTER_NT_FLT_EN),
+ ASP_RX_FILTER_BLK_CTRL);
+}
+
+void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
+ u32 *rule_cnt)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ int j = 0, i;
+
+ for (i = 0; i < NUM_NET_FILTERS; i++) {
+ if (!priv->net_filters[i].claimed ||
+ priv->net_filters[i].port != intf->port)
+ continue;
+
+ if (i > 0 && (i % 2) &&
+ priv->net_filters[i].wake_filter &&
+ priv->net_filters[i - 1].wake_filter)
+ continue;
+
+ rule_locs[j++] = priv->net_filters[i].fs.location;
+ }
+
+ *rule_cnt = j;
+}
+
+int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ int cnt = 0, i;
+
+ for (i = 0; i < NUM_NET_FILTERS; i++) {
+ if (!priv->net_filters[i].claimed ||
+ priv->net_filters[i].port != intf->port)
+ continue;
+
+ /* Skip over a wake filter pair */
+ if (i > 0 && (i % 2) &&
+ priv->net_filters[i].wake_filter &&
+ priv->net_filters[i - 1].wake_filter)
+ continue;
+
+ cnt++;
+ }
+
+ return cnt;
+}
+
+bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ struct ethtool_rx_flow_spec *cur;
+ size_t fs_size = 0;
+ int i;
+
+ for (i = 0; i < NUM_NET_FILTERS; i++) {
+ if (!priv->net_filters[i].claimed ||
+ priv->net_filters[i].port != intf->port)
+ continue;
+
+ cur = &priv->net_filters[i].fs;
+
+ if (cur->flow_type != fs->flow_type ||
+ cur->ring_cookie != fs->ring_cookie)
+ continue;
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case ETHER_FLOW:
+ fs_size = sizeof(struct ethhdr);
+ break;
+ case IP_USER_FLOW:
+ fs_size = sizeof(struct ethtool_usrip4_spec);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip6_spec);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip4_spec);
+ break;
+ default:
+ continue;
+ }
+
+ if (memcmp(&cur->h_u, &fs->h_u, fs_size) ||
+ memcmp(&cur->m_u, &fs->m_u, fs_size))
+ continue;
+
+ if (cur->flow_type & FLOW_EXT) {
+ if (cur->h_ext.vlan_etype != fs->h_ext.vlan_etype ||
+ cur->m_ext.vlan_etype != fs->m_ext.vlan_etype ||
+ cur->h_ext.vlan_tci != fs->h_ext.vlan_tci ||
+ cur->m_ext.vlan_tci != fs->m_ext.vlan_tci ||
+ cur->h_ext.data[0] != fs->h_ext.data[0])
+ continue;
+ }
+ if (cur->flow_type & FLOW_MAC_EXT) {
+ if (memcmp(&cur->h_ext.h_dest,
+ &fs->h_ext.h_dest, ETH_ALEN) ||
+ memcmp(&cur->m_ext.h_dest,
+ &fs->m_ext.h_dest, ETH_ALEN))
+ continue;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+/* If no network filter found, return open filter.
+ * If no more open filters return NULL
+ */
+struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
+ u32 loc, bool wake_filter,
+ bool init)
+{
+ struct bcmasp_net_filter *nfilter = NULL;
+ struct bcmasp_priv *priv = intf->parent;
+ int i, open_index = -1;
+
+ /* Check whether we exceed the filter table capacity */
+ if (loc != RX_CLS_LOC_ANY && loc >= NUM_NET_FILTERS)
+ return ERR_PTR(-EINVAL);
+
+ /* If the filter location is busy (already claimed) and we are initializing
+ * the filter (insertion), return a busy error code.
+ */
+ if (loc != RX_CLS_LOC_ANY && init && priv->net_filters[loc].claimed)
+ return ERR_PTR(-EBUSY);
+
+ /* We need two filters for wake-up, so we cannot use an odd filter */
+ if (wake_filter && loc != RX_CLS_LOC_ANY && (loc % 2))
+ return ERR_PTR(-EINVAL);
+
+ /* Initialize the loop index based on the desired location or from 0 */
+ i = loc == RX_CLS_LOC_ANY ? 0 : loc;
+
+ for ( ; i < NUM_NET_FILTERS; i++) {
+ /* Found matching network filter */
+ if (!init &&
+ priv->net_filters[i].claimed &&
+ priv->net_filters[i].hw_index == i &&
+ priv->net_filters[i].port == intf->port)
+ return &priv->net_filters[i];
+
+ /* If we don't need a new filter or new filter already found */
+ if (!init || open_index >= 0)
+ continue;
+
+ /* Wake filter conslidates two filters to cover more bytes
+ * Wake filter is open if...
+ * 1. It is an even filter
+ * 2. The current and next filter is not claimed
+ */
+ if (wake_filter && !(i % 2) && !priv->net_filters[i].claimed &&
+ !priv->net_filters[i + 1].claimed)
+ open_index = i;
+ else if (!priv->net_filters[i].claimed)
+ open_index = i;
+ }
+
+ if (open_index >= 0) {
+ nfilter = &priv->net_filters[open_index];
+ nfilter->claimed = true;
+ nfilter->port = intf->port;
+ nfilter->hw_index = open_index;
+ }
+
+ if (wake_filter && open_index >= 0) {
+ /* Claim next filter */
+ priv->net_filters[open_index + 1].claimed = true;
+ priv->net_filters[open_index + 1].wake_filter = true;
+ nfilter->wake_filter = true;
+ }
+
+ return nfilter ? nfilter : ERR_PTR(-EINVAL);
+}
+
+void bcmasp_netfilt_release(struct bcmasp_intf *intf,
+ struct bcmasp_net_filter *nfilt)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ if (nfilt->wake_filter) {
+ memset(&priv->net_filters[nfilt->hw_index + 1], 0,
+ sizeof(struct bcmasp_net_filter));
+ }
+
+ memset(nfilt, 0, sizeof(struct bcmasp_net_filter));
+}
+
+static void bcmasp_addr_to_uint(unsigned char *addr, u32 *high, u32 *low)
+{
+ *high = (u32)(addr[0] << 8 | addr[1]);
+ *low = (u32)(addr[2] << 24 | addr[3] << 16 | addr[4] << 8 |
+ addr[5]);
+}
+
+static void bcmasp_set_mda_filter(struct bcmasp_intf *intf,
+ const unsigned char *addr,
+ unsigned char *mask,
+ unsigned int i)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ u32 addr_h, addr_l, mask_h, mask_l;
+
+ /* Set local copy */
+ ether_addr_copy(priv->mda_filters[i].mask, mask);
+ ether_addr_copy(priv->mda_filters[i].addr, addr);
+
+ /* Write to HW */
+ bcmasp_addr_to_uint(priv->mda_filters[i].mask, &mask_h, &mask_l);
+ bcmasp_addr_to_uint(priv->mda_filters[i].addr, &addr_h, &addr_l);
+ rx_filter_core_wl(priv, addr_h, ASP_RX_FILTER_MDA_PAT_H(i));
+ rx_filter_core_wl(priv, addr_l, ASP_RX_FILTER_MDA_PAT_L(i));
+ rx_filter_core_wl(priv, mask_h, ASP_RX_FILTER_MDA_MSK_H(i));
+ rx_filter_core_wl(priv, mask_l, ASP_RX_FILTER_MDA_MSK_L(i));
+}
+
+static void bcmasp_en_mda_filter(struct bcmasp_intf *intf, bool en,
+ unsigned int i)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ if (priv->mda_filters[i].en == en)
+ return;
+
+ priv->mda_filters[i].en = en;
+ priv->mda_filters[i].port = intf->port;
+
+ rx_filter_core_wl(priv, ((intf->channel + 8) |
+ (en << ASP_RX_FILTER_MDA_CFG_EN_SHIFT) |
+ ASP_RX_FILTER_MDA_CFG_UMC_SEL(intf->port)),
+ ASP_RX_FILTER_MDA_CFG(i));
+}
+
+/* There are 32 MDA filters shared between all ports, we reserve 4 filters per
+ * port for the following.
+ * - Promisc: Filter to allow all packets when promisc is enabled
+ * - All Multicast
+ * - Broadcast
+ * - Own address
+ *
+ * The reserved filters are identified as so.
+ * - Promisc: (index * 4) + 0
+ * - All Multicast: (index * 4) + 1
+ * - Broadcast: (index * 4) + 2
+ * - Own address: (index * 4) + 3
+ */
+enum asp_rx_filter_id {
+ ASP_RX_FILTER_MDA_PROMISC = 0,
+ ASP_RX_FILTER_MDA_ALLMULTI,
+ ASP_RX_FILTER_MDA_BROADCAST,
+ ASP_RX_FILTER_MDA_OWN_ADDR,
+ ASP_RX_FILTER_MDA_RES_MAX,
+};
+
+#define ASP_RX_FILT_MDA(intf, name) (((intf)->index * \
+ ASP_RX_FILTER_MDA_RES_MAX) \
+ + ASP_RX_FILTER_MDA_##name)
+
+static int bcmasp_total_res_mda_cnt(struct bcmasp_priv *priv)
+{
+ return list_count_nodes(&priv->intfs) * ASP_RX_FILTER_MDA_RES_MAX;
+}
+
+void bcmasp_set_promisc(struct bcmasp_intf *intf, bool en)
+{
+ unsigned int i = ASP_RX_FILT_MDA(intf, PROMISC);
+ unsigned char promisc[ETH_ALEN];
+
+ eth_zero_addr(promisc);
+ /* Set mask to 00:00:00:00:00:00 to match all packets */
+ bcmasp_set_mda_filter(intf, promisc, promisc, i);
+ bcmasp_en_mda_filter(intf, en, i);
+}
+
+void bcmasp_set_allmulti(struct bcmasp_intf *intf, bool en)
+{
+ unsigned char allmulti[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
+ unsigned int i = ASP_RX_FILT_MDA(intf, ALLMULTI);
+
+ /* Set mask to 01:00:00:00:00:00 to match all multicast */
+ bcmasp_set_mda_filter(intf, allmulti, allmulti, i);
+ bcmasp_en_mda_filter(intf, en, i);
+}
+
+void bcmasp_set_broad(struct bcmasp_intf *intf, bool en)
+{
+ unsigned int i = ASP_RX_FILT_MDA(intf, BROADCAST);
+ unsigned char addr[ETH_ALEN];
+
+ eth_broadcast_addr(addr);
+ bcmasp_set_mda_filter(intf, addr, addr, i);
+ bcmasp_en_mda_filter(intf, en, i);
+}
+
+void bcmasp_set_oaddr(struct bcmasp_intf *intf, const unsigned char *addr,
+ bool en)
+{
+ unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ unsigned int i = ASP_RX_FILT_MDA(intf, OWN_ADDR);
+
+ bcmasp_set_mda_filter(intf, addr, mask, i);
+ bcmasp_en_mda_filter(intf, en, i);
+}
+
+void bcmasp_disable_all_filters(struct bcmasp_intf *intf)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ unsigned int i;
+ int res_count;
+
+ res_count = bcmasp_total_res_mda_cnt(intf->parent);
+
+ /* Disable all filters held by this port */
+ for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ if (priv->mda_filters[i].en &&
+ priv->mda_filters[i].port == intf->port)
+ bcmasp_en_mda_filter(intf, 0, i);
+ }
+}
+
+static int bcmasp_combine_set_filter(struct bcmasp_intf *intf,
+ unsigned char *addr, unsigned char *mask,
+ int i)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ u64 addr1, addr2, mask1, mask2, mask3;
+
+ /* Switch to u64 to help with the calculations */
+ addr1 = ether_addr_to_u64(priv->mda_filters[i].addr);
+ mask1 = ether_addr_to_u64(priv->mda_filters[i].mask);
+ addr2 = ether_addr_to_u64(addr);
+ mask2 = ether_addr_to_u64(mask);
+
+ /* Check if one filter resides within the other */
+ mask3 = mask1 & mask2;
+ if (mask3 == mask1 && ((addr1 & mask1) == (addr2 & mask1))) {
+ /* Filter 2 resides within filter 1, so everything is good */
+ return 0;
+ } else if (mask3 == mask2 && ((addr1 & mask2) == (addr2 & mask2))) {
+ /* Filter 1 resides within filter 2, so swap filters */
+ bcmasp_set_mda_filter(intf, addr, mask, i);
+ return 0;
+ }
+
+ /* Unable to combine */
+ return -EINVAL;
+}
+
+int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
+ unsigned char *mask)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ int ret, res_count;
+ unsigned int i;
+
+ res_count = bcmasp_total_res_mda_cnt(intf->parent);
+
+ for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ /* If filter not enabled or belongs to another port skip */
+ if (!priv->mda_filters[i].en ||
+ priv->mda_filters[i].port != intf->port)
+ continue;
+
+ /* Attempt to combine filters */
+ ret = bcmasp_combine_set_filter(intf, addr, mask, i);
+ if (!ret) {
+ intf->mib.filters_combine_cnt++;
+ return 0;
+ }
+ }
+
+ /* Create new filter if possible */
+ for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ if (priv->mda_filters[i].en)
+ continue;
+
+ bcmasp_set_mda_filter(intf, addr, mask, i);
+ bcmasp_en_mda_filter(intf, 1, i);
+ return 0;
+ }
+
+ /* No room for new filter */
+ return -EINVAL;
+}
+
+static void bcmasp_core_init_filters(struct bcmasp_priv *priv)
+{
+ unsigned int i;
+
+ /* Disable all filters and reset software view since the HW
+ * can lose context while in deep sleep suspend states
+ */
+ for (i = 0; i < NUM_MDA_FILTERS; i++) {
+ rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_MDA_CFG(i));
+ priv->mda_filters[i].en = 0;
+ }
+
+ for (i = 0; i < NUM_NET_FILTERS; i++)
+ rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_NET_CFG(i));
+
+ /* Top level filter enable bit should be enabled at all times, set
+ * GEN_WAKE_CLEAR to clear the network filter wake-up which would
+ * otherwise be sticky
+ */
+ rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN |
+ ASP_RX_FILTER_MDA_EN |
+ ASP_RX_FILTER_GEN_WK_CLR |
+ ASP_RX_FILTER_NT_FLT_EN),
+ ASP_RX_FILTER_BLK_CTRL);
+}
+
+/* ASP core initialization */
+static void bcmasp_core_init(struct bcmasp_priv *priv)
+{
+ tx_analytics_core_wl(priv, 0x0, ASP_TX_ANALYTICS_CTRL);
+ rx_analytics_core_wl(priv, 0x4, ASP_RX_ANALYTICS_CTRL);
+
+ rx_edpkt_core_wl(priv, (ASP_EDPKT_HDR_SZ_128 << ASP_EDPKT_HDR_SZ_SHIFT),
+ ASP_EDPKT_HDR_CFG);
+ rx_edpkt_core_wl(priv,
+ (ASP_EDPKT_ENDI_BT_SWP_WD << ASP_EDPKT_ENDI_DESC_SHIFT),
+ ASP_EDPKT_ENDI);
+
+ rx_edpkt_core_wl(priv, 0x1b, ASP_EDPKT_BURST_BUF_PSCAL_TOUT);
+ rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_WRITE_TOUT);
+ rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_READ_TOUT);
+
+ rx_edpkt_core_wl(priv, ASP_EDPKT_ENABLE_EN, ASP_EDPKT_ENABLE);
+
+ /* Disable and clear both UniMAC's wake-up interrupts to avoid
+ * sticky interrupts.
+ */
+ _intr2_mask_set(priv, ASP_INTR2_UMC0_WAKE | ASP_INTR2_UMC1_WAKE);
+ intr2_core_wl(priv, ASP_INTR2_UMC0_WAKE | ASP_INTR2_UMC1_WAKE,
+ ASP_INTR2_CLEAR);
+}
+
+static void bcmasp_core_clock_select(struct bcmasp_priv *priv, bool slow)
+{
+ u32 reg;
+
+ reg = ctrl_core_rl(priv, ASP_CTRL_CORE_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL_CORE_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL_CORE_CLOCK_SELECT_MAIN;
+ ctrl_core_wl(priv, reg, ASP_CTRL_CORE_CLOCK_SELECT);
+}
+
+static void bcmasp_core_clock_set_ll(struct bcmasp_priv *priv, u32 clr, u32 set)
+{
+ u32 reg;
+
+ reg = ctrl_core_rl(priv, ASP_CTRL_CLOCK_CTRL);
+ reg &= ~clr;
+ reg |= set;
+ ctrl_core_wl(priv, reg, ASP_CTRL_CLOCK_CTRL);
+
+ reg = ctrl_core_rl(priv, ASP_CTRL_SCRATCH_0);
+ reg &= ~clr;
+ reg |= set;
+ ctrl_core_wl(priv, reg, ASP_CTRL_SCRATCH_0);
+}
+
+static void bcmasp_core_clock_set(struct bcmasp_priv *priv, u32 clr, u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->clk_lock, flags);
+ bcmasp_core_clock_set_ll(priv, clr, set);
+ spin_unlock_irqrestore(&priv->clk_lock, flags);
+}
+
+void bcmasp_core_clock_set_intf(struct bcmasp_intf *intf, bool en)
+{
+ u32 intf_mask = ASP_CTRL_CLOCK_CTRL_ASP_RGMII_DIS(intf->port);
+ struct bcmasp_priv *priv = intf->parent;
+ unsigned long flags;
+ u32 reg;
+
+ /* When enabling an interface, if the RX or TX clocks were not enabled,
+ * enable them. Conversely, while disabling an interface, if this is
+ * the last one enabled, we can turn off the shared RX and TX clocks as
+ * well. We control enable bits which is why we test for equality on
+ * the RGMII clock bit mask.
+ */
+ spin_lock_irqsave(&priv->clk_lock, flags);
+ if (en) {
+ intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE |
+ ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE;
+ bcmasp_core_clock_set_ll(priv, intf_mask, 0);
+ } else {
+ reg = ctrl_core_rl(priv, ASP_CTRL_SCRATCH_0) | intf_mask;
+ if ((reg & ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK) ==
+ ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK)
+ intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE |
+ ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE;
+ bcmasp_core_clock_set_ll(priv, 0, intf_mask);
+ }
+ spin_unlock_irqrestore(&priv->clk_lock, flags);
+}
+
+static irqreturn_t bcmasp_isr_wol(int irq, void *data)
+{
+ struct bcmasp_priv *priv = data;
+ u32 status;
+
+ /* No L3 IRQ, so we good */
+ if (priv->wol_irq <= 0)
+ goto irq_handled;
+
+ status = wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_STATUS) &
+ ~wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_MASK_STATUS);
+ wakeup_intr2_core_wl(priv, status, ASP_WAKEUP_INTR2_CLEAR);
+
+irq_handled:
+ pm_wakeup_event(&priv->pdev->dev, 0);
+ return IRQ_HANDLED;
+}
+
+static int bcmasp_get_and_request_irq(struct bcmasp_priv *priv, int i)
+{
+ struct platform_device *pdev = priv->pdev;
+ int irq, ret;
+
+ irq = platform_get_irq_optional(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(&pdev->dev, irq, bcmasp_isr_wol, 0,
+ pdev->name, priv);
+ if (ret)
+ return ret;
+
+ return irq;
+}
+
+static void bcmasp_init_wol_shared(struct bcmasp_priv *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ int irq;
+
+ irq = bcmasp_get_and_request_irq(priv, 1);
+ if (irq < 0) {
+ dev_warn(dev, "Failed to init WoL irq: %d\n", irq);
+ return;
+ }
+
+ priv->wol_irq = irq;
+ priv->wol_irq_enabled_mask = 0;
+ device_set_wakeup_capable(&pdev->dev, 1);
+}
+
+static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en)
+{
+ struct bcmasp_priv *priv = intf->parent;
+ struct device *dev = &priv->pdev->dev;
+
+ if (en) {
+ if (priv->wol_irq_enabled_mask) {
+ set_bit(intf->port, &priv->wol_irq_enabled_mask);
+ return;
+ }
+
+ /* First enable */
+ set_bit(intf->port, &priv->wol_irq_enabled_mask);
+ enable_irq_wake(priv->wol_irq);
+ device_set_wakeup_enable(dev, 1);
+ } else {
+ if (!priv->wol_irq_enabled_mask)
+ return;
+
+ clear_bit(intf->port, &priv->wol_irq_enabled_mask);
+ if (priv->wol_irq_enabled_mask)
+ return;
+
+ /* Last disable */
+ disable_irq_wake(priv->wol_irq);
+ device_set_wakeup_enable(dev, 0);
+ }
+}
+
+static void bcmasp_wol_irq_destroy_shared(struct bcmasp_priv *priv)
+{
+ if (priv->wol_irq > 0)
+ free_irq(priv->wol_irq, priv);
+}
+
+static void bcmasp_init_wol_per_intf(struct bcmasp_priv *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ struct bcmasp_intf *intf;
+ int irq;
+
+ list_for_each_entry(intf, &priv->intfs, list) {
+ irq = bcmasp_get_and_request_irq(priv, intf->port + 1);
+ if (irq < 0) {
+ dev_warn(dev, "Failed to init WoL irq(port %d): %d\n",
+ intf->port, irq);
+ continue;
+ }
+
+ intf->wol_irq = irq;
+ intf->wol_irq_enabled = false;
+ device_set_wakeup_capable(&pdev->dev, 1);
+ }
+}
+
+static void bcmasp_enable_wol_per_intf(struct bcmasp_intf *intf, bool en)
+{
+ struct device *dev = &intf->parent->pdev->dev;
+
+ if (en ^ intf->wol_irq_enabled)
+ irq_set_irq_wake(intf->wol_irq, en);
+
+ intf->wol_irq_enabled = en;
+ device_set_wakeup_enable(dev, en);
+}
+
+static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv)
+{
+ struct bcmasp_intf *intf;
+
+ list_for_each_entry(intf, &priv->intfs, list) {
+ if (intf->wol_irq > 0)
+ free_irq(intf->wol_irq, priv);
+ }
+}
+
+static struct bcmasp_hw_info v20_hw_info = {
+ .rx_ctrl_flush = ASP_RX_CTRL_FLUSH,
+ .umac2fb = UMAC2FB_OFFSET,
+ .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT,
+ .rx_ctrl_fb_filt_out_frame_count = ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT,
+ .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH,
+};
+
+static const struct bcmasp_plat_data v20_plat_data = {
+ .init_wol = bcmasp_init_wol_per_intf,
+ .enable_wol = bcmasp_enable_wol_per_intf,
+ .destroy_wol = bcmasp_wol_irq_destroy_per_intf,
+ .hw_info = &v20_hw_info,
+};
+
+static struct bcmasp_hw_info v21_hw_info = {
+ .rx_ctrl_flush = ASP_RX_CTRL_FLUSH_2_1,
+ .umac2fb = UMAC2FB_OFFSET_2_1,
+ .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1,
+ .rx_ctrl_fb_filt_out_frame_count =
+ ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1,
+ .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1,
+};
+
+static const struct bcmasp_plat_data v21_plat_data = {
+ .init_wol = bcmasp_init_wol_shared,
+ .enable_wol = bcmasp_enable_wol_shared,
+ .destroy_wol = bcmasp_wol_irq_destroy_shared,
+ .hw_info = &v21_hw_info,
+};
+
+static const struct of_device_id bcmasp_of_match[] = {
+ { .compatible = "brcm,asp-v2.0", .data = &v20_plat_data },
+ { .compatible = "brcm,asp-v2.1", .data = &v21_plat_data },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcmasp_of_match);
+
+static const struct of_device_id bcmasp_mdio_of_match[] = {
+ { .compatible = "brcm,asp-v2.1-mdio", },
+ { .compatible = "brcm,asp-v2.0-mdio", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcmasp_mdio_of_match);
+
+static void bcmasp_remove_intfs(struct bcmasp_priv *priv)
+{
+ struct bcmasp_intf *intf, *n;
+
+ list_for_each_entry_safe(intf, n, &priv->intfs, list) {
+ list_del(&intf->list);
+ bcmasp_interface_destroy(intf);
+ }
+}
+
+static int bcmasp_probe(struct platform_device *pdev)
+{
+ struct device_node *ports_node, *intf_node;
+ const struct bcmasp_plat_data *pdata;
+ struct device *dev = &pdev->dev;
+ struct bcmasp_priv *priv;
+ struct bcmasp_intf *intf;
+ int ret = 0, count = 0;
+ unsigned int i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq <= 0)
+ return -EINVAL;
+
+ priv->clk = devm_clk_get_optional_enabled(dev, "sw_asp");
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk),
+ "failed to request clock\n");
+
+ /* Base from parent node */
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return dev_err_probe(dev, PTR_ERR(priv->base), "failed to iomap\n");
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+ if (ret)
+ return dev_err_probe(dev, ret, "unable to set DMA mask: %d\n", ret);
+
+ dev_set_drvdata(&pdev->dev, priv);
+ priv->pdev = pdev;
+ spin_lock_init(&priv->mda_lock);
+ spin_lock_init(&priv->clk_lock);
+ mutex_init(&priv->wol_lock);
+ mutex_init(&priv->net_lock);
+ INIT_LIST_HEAD(&priv->intfs);
+
+ pdata = device_get_match_data(&pdev->dev);
+ if (!pdata)
+ return dev_err_probe(dev, -EINVAL, "unable to find platform data\n");
+
+ priv->init_wol = pdata->init_wol;
+ priv->enable_wol = pdata->enable_wol;
+ priv->destroy_wol = pdata->destroy_wol;
+ priv->hw_info = pdata->hw_info;
+
+ /* Enable all clocks to ensure successful probing */
+ bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
+
+ /* Switch to the main clock */
+ bcmasp_core_clock_select(priv, false);
+
+ bcmasp_intr2_mask_set_all(priv);
+ bcmasp_intr2_clear_all(priv);
+
+ ret = devm_request_irq(&pdev->dev, priv->irq, bcmasp_isr, 0,
+ pdev->name, priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request ASP interrupt: %d", ret);
+
+ /* Register mdio child nodes */
+ of_platform_populate(dev->of_node, bcmasp_mdio_of_match, NULL, dev);
+
+ /* ASP specific initialization, Needs to be done regardless of
+ * how many interfaces come up.
+ */
+ bcmasp_core_init(priv);
+ bcmasp_core_init_filters(priv);
+
+ ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports");
+ if (!ports_node) {
+ dev_warn(dev, "No ports found\n");
+ return -EINVAL;
+ }
+
+ i = 0;
+ for_each_available_child_of_node(ports_node, intf_node) {
+ intf = bcmasp_interface_create(priv, intf_node, i);
+ if (!intf) {
+ dev_err(dev, "Cannot create eth interface %d\n", i);
+ bcmasp_remove_intfs(priv);
+ goto of_put_exit;
+ }
+ list_add_tail(&intf->list, &priv->intfs);
+ i++;
+ }
+
+ /* Check and enable WoL */
+ priv->init_wol(priv);
+
+ /* Drop the clock reference count now and let ndo_open()/ndo_close()
+ * manage it for us from now on.
+ */
+ bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE);
+
+ clk_disable_unprepare(priv->clk);
+
+ /* Now do the registration of the network ports which will take care
+ * of managing the clock properly.
+ */
+ list_for_each_entry(intf, &priv->intfs, list) {
+ ret = register_netdev(intf->ndev);
+ if (ret) {
+ netdev_err(intf->ndev,
+ "failed to register net_device: %d\n", ret);
+ priv->destroy_wol(priv);
+ bcmasp_remove_intfs(priv);
+ goto of_put_exit;
+ }
+ count++;
+ }
+
+ dev_info(dev, "Initialized %d port(s)\n", count);
+
+of_put_exit:
+ of_node_put(ports_node);
+ return ret;
+}
+
+static int bcmasp_remove(struct platform_device *pdev)
+{
+ struct bcmasp_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ if (!priv)
+ return 0;
+
+ priv->destroy_wol(priv);
+ bcmasp_remove_intfs(priv);
+
+ return 0;
+}
+
+static void bcmasp_shutdown(struct platform_device *pdev)
+{
+ bcmasp_remove(pdev);
+}
+
+static int __maybe_unused bcmasp_suspend(struct device *d)
+{
+ struct bcmasp_priv *priv = dev_get_drvdata(d);
+ struct bcmasp_intf *intf;
+ int ret;
+
+ list_for_each_entry(intf, &priv->intfs, list) {
+ ret = bcmasp_interface_suspend(intf);
+ if (ret)
+ break;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ /* Whether Wake-on-LAN is enabled or not, we can always disable
+ * the shared TX clock
+ */
+ bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE);
+
+ bcmasp_core_clock_select(priv, true);
+
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static int __maybe_unused bcmasp_resume(struct device *d)
+{
+ struct bcmasp_priv *priv = dev_get_drvdata(d);
+ struct bcmasp_intf *intf;
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ /* Switch to the main clock domain */
+ bcmasp_core_clock_select(priv, false);
+
+ /* Re-enable all clocks for re-initialization */
+ bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
+
+ bcmasp_core_init(priv);
+ bcmasp_core_init_filters(priv);
+
+ /* And disable them to let the network devices take care of them */
+ bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE);
+
+ clk_disable_unprepare(priv->clk);
+
+ list_for_each_entry(intf, &priv->intfs, list) {
+ ret = bcmasp_interface_resume(intf);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(bcmasp_pm_ops,
+ bcmasp_suspend, bcmasp_resume);
+
+static struct platform_driver bcmasp_driver = {
+ .probe = bcmasp_probe,
+ .remove = bcmasp_remove,
+ .shutdown = bcmasp_shutdown,
+ .driver = {
+ .name = "brcm,asp-v2",
+ .of_match_table = bcmasp_of_match,
+ .pm = &bcmasp_pm_ops,
+ },
+};
+module_platform_driver(bcmasp_driver);
+
+MODULE_DESCRIPTION("Broadcom ASP 2.0 Ethernet controller driver");
+MODULE_ALIAS("platform:brcm,asp-v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
new file mode 100644
index 000000000000..5b512f7f5e94
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -0,0 +1,586 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BCMASP_H
+#define __BCMASP_H
+
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <uapi/linux/ethtool.h>
+
+#define ASP_INTR2_OFFSET 0x1000
+#define ASP_INTR2_STATUS 0x0
+#define ASP_INTR2_SET 0x4
+#define ASP_INTR2_CLEAR 0x8
+#define ASP_INTR2_MASK_STATUS 0xc
+#define ASP_INTR2_MASK_SET 0x10
+#define ASP_INTR2_MASK_CLEAR 0x14
+
+#define ASP_INTR2_RX_ECH(intr) BIT(intr)
+#define ASP_INTR2_TX_DESC(intr) BIT((intr) + 14)
+#define ASP_INTR2_UMC0_WAKE BIT(22)
+#define ASP_INTR2_UMC1_WAKE BIT(28)
+
+#define ASP_WAKEUP_INTR2_OFFSET 0x1200
+#define ASP_WAKEUP_INTR2_STATUS 0x0
+#define ASP_WAKEUP_INTR2_SET 0x4
+#define ASP_WAKEUP_INTR2_CLEAR 0x8
+#define ASP_WAKEUP_INTR2_MASK_STATUS 0xc
+#define ASP_WAKEUP_INTR2_MASK_SET 0x10
+#define ASP_WAKEUP_INTR2_MASK_CLEAR 0x14
+#define ASP_WAKEUP_INTR2_MPD_0 BIT(0)
+#define ASP_WAKEUP_INTR2_MPD_1 BIT(1)
+#define ASP_WAKEUP_INTR2_FILT_0 BIT(2)
+#define ASP_WAKEUP_INTR2_FILT_1 BIT(3)
+#define ASP_WAKEUP_INTR2_FW BIT(4)
+
+#define ASP_TX_ANALYTICS_OFFSET 0x4c000
+#define ASP_TX_ANALYTICS_CTRL 0x0
+
+#define ASP_RX_ANALYTICS_OFFSET 0x98000
+#define ASP_RX_ANALYTICS_CTRL 0x0
+
+#define ASP_RX_CTRL_OFFSET 0x9f000
+#define ASP_RX_CTRL_UMAC_0_FRAME_COUNT 0x8
+#define ASP_RX_CTRL_UMAC_1_FRAME_COUNT 0xc
+#define ASP_RX_CTRL_FB_0_FRAME_COUNT 0x14
+#define ASP_RX_CTRL_FB_1_FRAME_COUNT 0x18
+#define ASP_RX_CTRL_FB_8_FRAME_COUNT 0x1c
+/* asp2.1 diverges offsets here */
+/* ASP2.0 */
+#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT 0x20
+#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT 0x24
+#define ASP_RX_CTRL_FLUSH 0x28
+#define ASP_CTRL_UMAC0_FLUSH_MASK (BIT(0) | BIT(12))
+#define ASP_CTRL_UMAC1_FLUSH_MASK (BIT(1) | BIT(13))
+#define ASP_CTRL_SPB_FLUSH_MASK (BIT(8) | BIT(20))
+#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH 0x30
+/* ASP2.1 */
+#define ASP_RX_CTRL_FB_9_FRAME_COUNT_2_1 0x20
+#define ASP_RX_CTRL_FB_10_FRAME_COUNT_2_1 0x24
+#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1 0x28
+#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1 0x2c
+#define ASP_RX_CTRL_FLUSH_2_1 0x30
+#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1 0x38
+
+#define ASP_RX_FILTER_OFFSET 0x80000
+#define ASP_RX_FILTER_BLK_CTRL 0x0
+#define ASP_RX_FILTER_OPUT_EN BIT(0)
+#define ASP_RX_FILTER_MDA_EN BIT(1)
+#define ASP_RX_FILTER_LNR_MD BIT(2)
+#define ASP_RX_FILTER_GEN_WK_EN BIT(3)
+#define ASP_RX_FILTER_GEN_WK_CLR BIT(4)
+#define ASP_RX_FILTER_NT_FLT_EN BIT(5)
+#define ASP_RX_FILTER_MDA_CFG(sel) (((sel) * 0x14) + 0x100)
+#define ASP_RX_FILTER_MDA_CFG_EN_SHIFT 8
+#define ASP_RX_FILTER_MDA_CFG_UMC_SEL(sel) ((sel) > 1 ? BIT(17) : \
+ BIT((sel) + 9))
+#define ASP_RX_FILTER_MDA_PAT_H(sel) (((sel) * 0x14) + 0x104)
+#define ASP_RX_FILTER_MDA_PAT_L(sel) (((sel) * 0x14) + 0x108)
+#define ASP_RX_FILTER_MDA_MSK_H(sel) (((sel) * 0x14) + 0x10c)
+#define ASP_RX_FILTER_MDA_MSK_L(sel) (((sel) * 0x14) + 0x110)
+#define ASP_RX_FILTER_MDA_CFG(sel) (((sel) * 0x14) + 0x100)
+#define ASP_RX_FILTER_MDA_PAT_H(sel) (((sel) * 0x14) + 0x104)
+#define ASP_RX_FILTER_MDA_PAT_L(sel) (((sel) * 0x14) + 0x108)
+#define ASP_RX_FILTER_MDA_MSK_H(sel) (((sel) * 0x14) + 0x10c)
+#define ASP_RX_FILTER_MDA_MSK_L(sel) (((sel) * 0x14) + 0x110)
+#define ASP_RX_FILTER_NET_CFG(sel) (((sel) * 0xa04) + 0x400)
+#define ASP_RX_FILTER_NET_CFG_CH(sel) ((sel) << 0)
+#define ASP_RX_FILTER_NET_CFG_EN BIT(9)
+#define ASP_RX_FILTER_NET_CFG_L2_EN BIT(10)
+#define ASP_RX_FILTER_NET_CFG_L3_EN BIT(11)
+#define ASP_RX_FILTER_NET_CFG_L4_EN BIT(12)
+#define ASP_RX_FILTER_NET_CFG_L3_FRM(sel) ((sel) << 13)
+#define ASP_RX_FILTER_NET_CFG_L4_FRM(sel) ((sel) << 15)
+#define ASP_RX_FILTER_NET_CFG_UMC(sel) BIT((sel) + 19)
+#define ASP_RX_FILTER_NET_CFG_DMA_EN BIT(27)
+
+#define ASP_RX_FILTER_NET_OFFSET_MAX 32
+#define ASP_RX_FILTER_NET_PAT(sel, block, off) \
+ (((sel) * 0xa04) + ((block) * 0x200) + (off) + 0x600)
+#define ASP_RX_FILTER_NET_MASK(sel, block, off) \
+ (((sel) * 0xa04) + ((block) * 0x200) + (off) + 0x700)
+
+#define ASP_RX_FILTER_NET_OFFSET(sel) (((sel) * 0xa04) + 0xe00)
+#define ASP_RX_FILTER_NET_OFFSET_L2(val) ((val) << 0)
+#define ASP_RX_FILTER_NET_OFFSET_L3_0(val) ((val) << 8)
+#define ASP_RX_FILTER_NET_OFFSET_L3_1(val) ((val) << 16)
+#define ASP_RX_FILTER_NET_OFFSET_L4(val) ((val) << 24)
+
+enum asp_rx_net_filter_block {
+ ASP_RX_FILTER_NET_L2 = 0,
+ ASP_RX_FILTER_NET_L3_0,
+ ASP_RX_FILTER_NET_L3_1,
+ ASP_RX_FILTER_NET_L4,
+ ASP_RX_FILTER_NET_BLOCK_MAX
+};
+
+#define ASP_EDPKT_OFFSET 0x9c000
+#define ASP_EDPKT_ENABLE 0x4
+#define ASP_EDPKT_ENABLE_EN BIT(0)
+#define ASP_EDPKT_HDR_CFG 0xc
+#define ASP_EDPKT_HDR_SZ_SHIFT 2
+#define ASP_EDPKT_HDR_SZ_32 0
+#define ASP_EDPKT_HDR_SZ_64 1
+#define ASP_EDPKT_HDR_SZ_96 2
+#define ASP_EDPKT_HDR_SZ_128 3
+#define ASP_EDPKT_BURST_BUF_PSCAL_TOUT 0x10
+#define ASP_EDPKT_BURST_BUF_WRITE_TOUT 0x14
+#define ASP_EDPKT_BURST_BUF_READ_TOUT 0x18
+#define ASP_EDPKT_RX_TS_COUNTER 0x38
+#define ASP_EDPKT_ENDI 0x48
+#define ASP_EDPKT_ENDI_DESC_SHIFT 8
+#define ASP_EDPKT_ENDI_NO_BT_SWP 0
+#define ASP_EDPKT_ENDI_BT_SWP_WD 1
+#define ASP_EDPKT_RX_PKT_CNT 0x138
+#define ASP_EDPKT_HDR_EXTR_CNT 0x13c
+#define ASP_EDPKT_HDR_OUT_CNT 0x140
+
+#define ASP_CTRL 0x101000
+#define ASP_CTRL_ASP_SW_INIT 0x04
+#define ASP_CTRL_ASP_SW_INIT_ACPUSS_CORE BIT(0)
+#define ASP_CTRL_ASP_SW_INIT_ASP_TX BIT(1)
+#define ASP_CTRL_ASP_SW_INIT_AS_RX BIT(2)
+#define ASP_CTRL_ASP_SW_INIT_ASP_RGMII_UMAC0 BIT(3)
+#define ASP_CTRL_ASP_SW_INIT_ASP_RGMII_UMAC1 BIT(4)
+#define ASP_CTRL_ASP_SW_INIT_ASP_XMEMIF BIT(5)
+#define ASP_CTRL_CLOCK_CTRL 0x04
+#define ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE BIT(0)
+#define ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE BIT(1)
+#define ASP_CTRL_CLOCK_CTRL_ASP_RGMII_SHIFT 2
+#define ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK (0x7 << ASP_CTRL_CLOCK_CTRL_ASP_RGMII_SHIFT)
+#define ASP_CTRL_CLOCK_CTRL_ASP_RGMII_DIS(x) BIT(ASP_CTRL_CLOCK_CTRL_ASP_RGMII_SHIFT + (x))
+#define ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE GENMASK(4, 0)
+#define ASP_CTRL_CORE_CLOCK_SELECT 0x08
+#define ASP_CTRL_CORE_CLOCK_SELECT_MAIN BIT(0)
+#define ASP_CTRL_SCRATCH_0 0x0c
+
+struct bcmasp_tx_cb {
+ struct sk_buff *skb;
+ unsigned int bytes_sent;
+ bool last;
+
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+struct bcmasp_res {
+ /* Per interface resources */
+ /* Port */
+ void __iomem *umac;
+ void __iomem *umac2fb;
+ void __iomem *rgmii;
+
+ /* TX slowpath/configuration */
+ void __iomem *tx_spb_ctrl;
+ void __iomem *tx_spb_top;
+ void __iomem *tx_epkt_core;
+ void __iomem *tx_pause_ctrl;
+};
+
+#define DESC_ADDR(x) ((x) & GENMASK_ULL(39, 0))
+#define DESC_FLAGS(x) ((x) & GENMASK_ULL(63, 40))
+
+struct bcmasp_desc {
+ u64 buf;
+ #define DESC_CHKSUM BIT_ULL(40)
+ #define DESC_CRC_ERR BIT_ULL(41)
+ #define DESC_RX_SYM_ERR BIT_ULL(42)
+ #define DESC_NO_OCT_ALN BIT_ULL(43)
+ #define DESC_PKT_TRUC BIT_ULL(44)
+ /* 39:0 (TX/RX) bits 0-39 of buf addr
+ * 40 (RX) checksum
+ * 41 (RX) crc_error
+ * 42 (RX) rx_symbol_error
+ * 43 (RX) non_octet_aligned
+ * 44 (RX) pkt_truncated
+ * 45 Reserved
+ * 56:46 (RX) mac_filter_id
+ * 60:57 (RX) rx_port_num (0-unicmac0, 1-unimac1)
+ * 61 Reserved
+ * 63:62 (TX) forward CRC, overwrite CRC
+ */
+ u32 size;
+ u32 flags;
+ #define DESC_INT_EN BIT(0)
+ #define DESC_SOF BIT(1)
+ #define DESC_EOF BIT(2)
+ #define DESC_EPKT_CMD BIT(3)
+ #define DESC_SCRAM_ST BIT(8)
+ #define DESC_SCRAM_END BIT(9)
+ #define DESC_PCPP BIT(10)
+ #define DESC_PPPP BIT(11)
+ /* 0 (TX) tx_int_en
+ * 1 (TX/RX) SOF
+ * 2 (TX/RX) EOF
+ * 3 (TX) epkt_command
+ * 6:4 (TX) PA
+ * 7 (TX) pause at desc end
+ * 8 (TX) scram_start
+ * 9 (TX) scram_end
+ * 10 (TX) PCPP
+ * 11 (TX) PPPP
+ * 14:12 Reserved
+ * 15 (TX) pid ch Valid
+ * 19:16 (TX) data_pkt_type
+ * 32:20 (TX) pid_channel (RX) nw_filter_id
+ */
+};
+
+struct bcmasp_intf;
+
+struct bcmasp_intf_stats64 {
+ /* Rx Stats */
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_errors;
+ u64_stats_t rx_dropped;
+ u64_stats_t rx_crc_errs;
+ u64_stats_t rx_sym_errs;
+
+ /* Tx Stats*/
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+
+ struct u64_stats_sync syncp;
+};
+
+struct bcmasp_mib_counters {
+ u32 edpkt_ts;
+ u32 edpkt_rx_pkt_cnt;
+ u32 edpkt_hdr_ext_cnt;
+ u32 edpkt_hdr_out_cnt;
+ u32 umac_frm_cnt;
+ u32 fb_frm_cnt;
+ u32 fb_rx_fifo_depth;
+ u32 fb_out_frm_cnt;
+ u32 fb_filt_out_frm_cnt;
+ u32 alloc_rx_skb_failed;
+ u32 tx_dma_failed;
+ u32 mc_filters_full_cnt;
+ u32 uc_filters_full_cnt;
+ u32 filters_combine_cnt;
+ u32 promisc_filters_cnt;
+ u32 tx_realloc_offload_failed;
+ u32 tx_timeout_cnt;
+};
+
+struct bcmasp_intf_ops {
+ unsigned long (*rx_desc_read)(struct bcmasp_intf *intf);
+ void (*rx_buffer_write)(struct bcmasp_intf *intf, dma_addr_t addr);
+ void (*rx_desc_write)(struct bcmasp_intf *intf, dma_addr_t addr);
+ unsigned long (*tx_read)(struct bcmasp_intf *intf);
+ void (*tx_write)(struct bcmasp_intf *intf, dma_addr_t addr);
+};
+
+struct bcmasp_priv;
+
+struct bcmasp_intf {
+ struct list_head list;
+ struct net_device *ndev;
+ struct bcmasp_priv *parent;
+
+ /* ASP Ch */
+ int channel;
+ int port;
+ const struct bcmasp_intf_ops *ops;
+
+ /* Used for splitting shared resources */
+ int index;
+
+ struct napi_struct tx_napi;
+ /* TX ring, starts on a new cacheline boundary */
+ void __iomem *tx_spb_dma;
+ int tx_spb_index;
+ int tx_spb_clean_index;
+ struct bcmasp_desc *tx_spb_cpu;
+ dma_addr_t tx_spb_dma_addr;
+ dma_addr_t tx_spb_dma_valid;
+ dma_addr_t tx_spb_dma_read;
+ struct bcmasp_tx_cb *tx_cbs;
+
+ /* RX ring, starts on a new cacheline boundary */
+ void __iomem *rx_edpkt_cfg;
+ void __iomem *rx_edpkt_dma;
+ int rx_edpkt_index;
+ int rx_buf_order;
+ struct bcmasp_desc *rx_edpkt_cpu;
+ dma_addr_t rx_edpkt_dma_addr;
+ dma_addr_t rx_edpkt_dma_read;
+
+ /* RX buffer prefetcher ring*/
+ void *rx_ring_cpu;
+ dma_addr_t rx_ring_dma;
+ dma_addr_t rx_ring_dma_valid;
+ struct napi_struct rx_napi;
+
+ struct bcmasp_res res;
+ unsigned int crc_fwd;
+
+ /* PHY device */
+ struct device_node *phy_dn;
+ struct device_node *ndev_dn;
+ phy_interface_t phy_interface;
+ bool internal_phy;
+ int old_pause;
+ int old_link;
+ int old_duplex;
+
+ u32 msg_enable;
+
+ /* Statistics */
+ struct bcmasp_intf_stats64 stats64;
+ struct bcmasp_mib_counters mib;
+
+ u32 wolopts;
+ u8 sopass[SOPASS_MAX];
+ /* Used if per intf wol irq */
+ int wol_irq;
+ unsigned int wol_irq_enabled:1;
+
+ struct ethtool_eee eee;
+};
+
+#define NUM_NET_FILTERS 32
+struct bcmasp_net_filter {
+ struct ethtool_rx_flow_spec fs;
+
+ bool claimed;
+ bool wake_filter;
+
+ int port;
+ unsigned int hw_index;
+};
+
+#define NUM_MDA_FILTERS 32
+struct bcmasp_mda_filter {
+ /* Current owner of this filter */
+ int port;
+ bool en;
+ u8 addr[ETH_ALEN];
+ u8 mask[ETH_ALEN];
+};
+
+struct bcmasp_hw_info {
+ u32 rx_ctrl_flush;
+ u32 umac2fb;
+ u32 rx_ctrl_fb_out_frame_count;
+ u32 rx_ctrl_fb_filt_out_frame_count;
+ u32 rx_ctrl_fb_rx_fifo_depth;
+};
+
+struct bcmasp_plat_data {
+ void (*init_wol)(struct bcmasp_priv *priv);
+ void (*enable_wol)(struct bcmasp_intf *intf, bool en);
+ void (*destroy_wol)(struct bcmasp_priv *priv);
+ struct bcmasp_hw_info *hw_info;
+};
+
+struct bcmasp_priv {
+ struct platform_device *pdev;
+ struct clk *clk;
+
+ int irq;
+ u32 irq_mask;
+
+ /* Used if shared wol irq */
+ struct mutex wol_lock;
+ int wol_irq;
+ unsigned long wol_irq_enabled_mask;
+
+ void (*init_wol)(struct bcmasp_priv *priv);
+ void (*enable_wol)(struct bcmasp_intf *intf, bool en);
+ void (*destroy_wol)(struct bcmasp_priv *priv);
+
+ void __iomem *base;
+ struct bcmasp_hw_info *hw_info;
+
+ struct list_head intfs;
+
+ struct bcmasp_mda_filter mda_filters[NUM_MDA_FILTERS];
+
+ /* MAC destination address filters lock */
+ spinlock_t mda_lock;
+
+ /* Protects accesses to ASP_CTRL_CLOCK_CTRL */
+ spinlock_t clk_lock;
+
+ struct bcmasp_net_filter net_filters[NUM_NET_FILTERS];
+
+ /* Network filter lock */
+ struct mutex net_lock;
+};
+
+static inline unsigned long bcmasp_intf_rx_desc_read(struct bcmasp_intf *intf)
+{
+ return intf->ops->rx_desc_read(intf);
+}
+
+static inline void bcmasp_intf_rx_buffer_write(struct bcmasp_intf *intf,
+ dma_addr_t addr)
+{
+ intf->ops->rx_buffer_write(intf, addr);
+}
+
+static inline void bcmasp_intf_rx_desc_write(struct bcmasp_intf *intf,
+ dma_addr_t addr)
+{
+ intf->ops->rx_desc_write(intf, addr);
+}
+
+static inline unsigned long bcmasp_intf_tx_read(struct bcmasp_intf *intf)
+{
+ return intf->ops->tx_read(intf);
+}
+
+static inline void bcmasp_intf_tx_write(struct bcmasp_intf *intf,
+ dma_addr_t addr)
+{
+ intf->ops->tx_write(intf, addr);
+}
+
+#define __BCMASP_IO_MACRO(name, m) \
+static inline u32 name##_rl(struct bcmasp_intf *intf, u32 off) \
+{ \
+ u32 reg = readl_relaxed(intf->m + off); \
+ return reg; \
+} \
+static inline void name##_wl(struct bcmasp_intf *intf, u32 val, u32 off)\
+{ \
+ writel_relaxed(val, intf->m + off); \
+}
+
+#define BCMASP_IO_MACRO(name) __BCMASP_IO_MACRO(name, res.name)
+#define BCMASP_FP_IO_MACRO(name) __BCMASP_IO_MACRO(name, name)
+
+BCMASP_IO_MACRO(umac);
+BCMASP_IO_MACRO(umac2fb);
+BCMASP_IO_MACRO(rgmii);
+BCMASP_FP_IO_MACRO(tx_spb_dma);
+BCMASP_IO_MACRO(tx_spb_ctrl);
+BCMASP_IO_MACRO(tx_spb_top);
+BCMASP_IO_MACRO(tx_epkt_core);
+BCMASP_IO_MACRO(tx_pause_ctrl);
+BCMASP_FP_IO_MACRO(rx_edpkt_dma);
+BCMASP_FP_IO_MACRO(rx_edpkt_cfg);
+
+#define __BCMASP_FP_IO_MACRO_Q(name, m) \
+static inline u64 name##_rq(struct bcmasp_intf *intf, u32 off) \
+{ \
+ u64 reg = readq_relaxed(intf->m + off); \
+ return reg; \
+} \
+static inline void name##_wq(struct bcmasp_intf *intf, u64 val, u32 off)\
+{ \
+ writeq_relaxed(val, intf->m + off); \
+}
+
+#define BCMASP_FP_IO_MACRO_Q(name) __BCMASP_FP_IO_MACRO_Q(name, name)
+
+BCMASP_FP_IO_MACRO_Q(tx_spb_dma);
+BCMASP_FP_IO_MACRO_Q(rx_edpkt_dma);
+BCMASP_FP_IO_MACRO_Q(rx_edpkt_cfg);
+
+#define PKT_OFFLOAD_NOP (0 << 28)
+#define PKT_OFFLOAD_HDR_OP (1 << 28)
+#define PKT_OFFLOAD_HDR_WRBACK BIT(19)
+#define PKT_OFFLOAD_HDR_COUNT(x) ((x) << 16)
+#define PKT_OFFLOAD_HDR_SIZE_1(x) ((x) << 4)
+#define PKT_OFFLOAD_HDR_SIZE_2(x) (x)
+#define PKT_OFFLOAD_HDR2_SIZE_2(x) ((x) << 24)
+#define PKT_OFFLOAD_HDR2_SIZE_3(x) ((x) << 12)
+#define PKT_OFFLOAD_HDR2_SIZE_4(x) (x)
+#define PKT_OFFLOAD_EPKT_OP (2 << 28)
+#define PKT_OFFLOAD_EPKT_WRBACK BIT(23)
+#define PKT_OFFLOAD_EPKT_IP(x) ((x) << 21)
+#define PKT_OFFLOAD_EPKT_TP(x) ((x) << 19)
+#define PKT_OFFLOAD_EPKT_LEN(x) ((x) << 16)
+#define PKT_OFFLOAD_EPKT_CSUM_L3 BIT(15)
+#define PKT_OFFLOAD_EPKT_CSUM_L2 BIT(14)
+#define PKT_OFFLOAD_EPKT_ID(x) ((x) << 12)
+#define PKT_OFFLOAD_EPKT_SEQ(x) ((x) << 10)
+#define PKT_OFFLOAD_EPKT_TS(x) ((x) << 8)
+#define PKT_OFFLOAD_EPKT_BLOC(x) (x)
+#define PKT_OFFLOAD_END_OP (7 << 28)
+
+struct bcmasp_pkt_offload {
+ __be32 nop;
+ __be32 header;
+ __be32 header2;
+ __be32 epkt;
+ __be32 end;
+};
+
+#define BCMASP_CORE_IO_MACRO(name, offset) \
+static inline u32 name##_core_rl(struct bcmasp_priv *priv, \
+ u32 off) \
+{ \
+ u32 reg = readl_relaxed(priv->base + (offset) + off); \
+ return reg; \
+} \
+static inline void name##_core_wl(struct bcmasp_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, priv->base + (offset) + off); \
+}
+
+BCMASP_CORE_IO_MACRO(intr2, ASP_INTR2_OFFSET);
+BCMASP_CORE_IO_MACRO(wakeup_intr2, ASP_WAKEUP_INTR2_OFFSET);
+BCMASP_CORE_IO_MACRO(tx_analytics, ASP_TX_ANALYTICS_OFFSET);
+BCMASP_CORE_IO_MACRO(rx_analytics, ASP_RX_ANALYTICS_OFFSET);
+BCMASP_CORE_IO_MACRO(rx_ctrl, ASP_RX_CTRL_OFFSET);
+BCMASP_CORE_IO_MACRO(rx_filter, ASP_RX_FILTER_OFFSET);
+BCMASP_CORE_IO_MACRO(rx_edpkt, ASP_EDPKT_OFFSET);
+BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL);
+
+struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
+ struct device_node *ndev_dn, int i);
+
+void bcmasp_interface_destroy(struct bcmasp_intf *intf);
+
+void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en);
+
+void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en);
+
+void bcmasp_flush_rx_port(struct bcmasp_intf *intf);
+
+extern const struct ethtool_ops bcmasp_ethtool_ops;
+
+int bcmasp_interface_suspend(struct bcmasp_intf *intf);
+
+int bcmasp_interface_resume(struct bcmasp_intf *intf);
+
+void bcmasp_set_promisc(struct bcmasp_intf *intf, bool en);
+
+void bcmasp_set_allmulti(struct bcmasp_intf *intf, bool en);
+
+void bcmasp_set_broad(struct bcmasp_intf *intf, bool en);
+
+void bcmasp_set_oaddr(struct bcmasp_intf *intf, const unsigned char *addr,
+ bool en);
+
+int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
+ unsigned char *mask);
+
+void bcmasp_disable_all_filters(struct bcmasp_intf *intf);
+
+void bcmasp_core_clock_set_intf(struct bcmasp_intf *intf, bool en);
+
+struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
+ u32 loc, bool wake_filter,
+ bool init);
+
+bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
+ struct ethtool_rx_flow_spec *fs);
+
+void bcmasp_netfilt_release(struct bcmasp_intf *intf,
+ struct bcmasp_net_filter *nfilt);
+
+int bcmasp_netfilt_get_active(struct bcmasp_intf *intf);
+
+void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
+ u32 *rule_cnt);
+
+void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);
+
+void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable);
+#endif
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
new file mode 100644
index 000000000000..c4f1604d5ab3
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) "bcmasp_ethtool: " fmt
+
+#include <asm-generic/unaligned.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "bcmasp.h"
+#include "bcmasp_intf_defs.h"
+
+enum bcmasp_stat_type {
+ BCMASP_STAT_RX_EDPKT,
+ BCMASP_STAT_RX_CTRL,
+ BCMASP_STAT_RX_CTRL_PER_INTF,
+ BCMASP_STAT_SOFT,
+};
+
+struct bcmasp_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ enum bcmasp_stat_type type;
+ u32 reg_offset;
+};
+
+#define STAT_BCMASP_SOFT_MIB(str) { \
+ .stat_string = str, \
+ .type = BCMASP_STAT_SOFT, \
+}
+
+#define STAT_BCMASP_OFFSET(str, _type, offset) { \
+ .stat_string = str, \
+ .type = _type, \
+ .reg_offset = offset, \
+}
+
+#define STAT_BCMASP_RX_EDPKT(str, offset) \
+ STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_EDPKT, offset)
+#define STAT_BCMASP_RX_CTRL(str, offset) \
+ STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_CTRL, offset)
+#define STAT_BCMASP_RX_CTRL_PER_INTF(str, offset) \
+ STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_CTRL_PER_INTF, offset)
+
+/* Must match the order of struct bcmasp_mib_counters */
+static const struct bcmasp_stats bcmasp_gstrings_stats[] = {
+ /* EDPKT counters */
+ STAT_BCMASP_RX_EDPKT("RX Time Stamp", ASP_EDPKT_RX_TS_COUNTER),
+ STAT_BCMASP_RX_EDPKT("RX PKT Count", ASP_EDPKT_RX_PKT_CNT),
+ STAT_BCMASP_RX_EDPKT("RX PKT Buffered", ASP_EDPKT_HDR_EXTR_CNT),
+ STAT_BCMASP_RX_EDPKT("RX PKT Pushed to DRAM", ASP_EDPKT_HDR_OUT_CNT),
+ /* ASP RX control */
+ STAT_BCMASP_RX_CTRL_PER_INTF("Frames From Unimac",
+ ASP_RX_CTRL_UMAC_0_FRAME_COUNT),
+ STAT_BCMASP_RX_CTRL_PER_INTF("Frames From Port",
+ ASP_RX_CTRL_FB_0_FRAME_COUNT),
+ STAT_BCMASP_RX_CTRL_PER_INTF("RX Buffer FIFO Depth",
+ ASP_RX_CTRL_FB_RX_FIFO_DEPTH),
+ STAT_BCMASP_RX_CTRL("Frames Out(Buffer)",
+ ASP_RX_CTRL_FB_OUT_FRAME_COUNT),
+ STAT_BCMASP_RX_CTRL("Frames Out(Filters)",
+ ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT),
+ /* Software maintained statistics */
+ STAT_BCMASP_SOFT_MIB("RX SKB Alloc Failed"),
+ STAT_BCMASP_SOFT_MIB("TX DMA Failed"),
+ STAT_BCMASP_SOFT_MIB("Multicast Filters Full"),
+ STAT_BCMASP_SOFT_MIB("Unicast Filters Full"),
+ STAT_BCMASP_SOFT_MIB("MDA Filters Combined"),
+ STAT_BCMASP_SOFT_MIB("Promisc Filter Set"),
+ STAT_BCMASP_SOFT_MIB("TX Realloc For Offload Failed"),
+ STAT_BCMASP_SOFT_MIB("Tx Timeout Count"),
+};
+
+#define BCMASP_STATS_LEN ARRAY_SIZE(bcmasp_gstrings_stats)
+
+static u16 bcmasp_stat_fixup_offset(struct bcmasp_intf *intf,
+ const struct bcmasp_stats *s)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ if (!strcmp("Frames Out(Buffer)", s->stat_string))
+ return priv->hw_info->rx_ctrl_fb_out_frame_count;
+
+ if (!strcmp("Frames Out(Filters)", s->stat_string))
+ return priv->hw_info->rx_ctrl_fb_filt_out_frame_count;
+
+ if (!strcmp("RX Buffer FIFO Depth", s->stat_string))
+ return priv->hw_info->rx_ctrl_fb_rx_fifo_depth;
+
+ return s->reg_offset;
+}
+
+static int bcmasp_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BCMASP_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void bcmasp_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ unsigned int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BCMASP_STATS_LEN; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ bcmasp_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ default:
+ return;
+ }
+}
+
+static void bcmasp_update_mib_counters(struct bcmasp_intf *intf)
+{
+ unsigned int i;
+
+ for (i = 0; i < BCMASP_STATS_LEN; i++) {
+ const struct bcmasp_stats *s;
+ u32 offset, val;
+ char *p;
+
+ s = &bcmasp_gstrings_stats[i];
+ offset = bcmasp_stat_fixup_offset(intf, s);
+ switch (s->type) {
+ case BCMASP_STAT_SOFT:
+ continue;
+ case BCMASP_STAT_RX_EDPKT:
+ val = rx_edpkt_core_rl(intf->parent, offset);
+ break;
+ case BCMASP_STAT_RX_CTRL:
+ val = rx_ctrl_core_rl(intf->parent, offset);
+ break;
+ case BCMASP_STAT_RX_CTRL_PER_INTF:
+ offset += sizeof(u32) * intf->port;
+ val = rx_ctrl_core_rl(intf->parent, offset);
+ break;
+ default:
+ continue;
+ }
+ p = (char *)(&intf->mib) + (i * sizeof(u32));
+ put_unaligned(val, (u32 *)p);
+ }
+}
+
+static void bcmasp_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ unsigned int i;
+ char *p;
+
+ if (netif_running(dev))
+ bcmasp_update_mib_counters(intf);
+
+ for (i = 0; i < BCMASP_STATS_LEN; i++) {
+ p = (char *)(&intf->mib) + (i * sizeof(u32));
+ data[i] = *(u32 *)p;
+ }
+}
+
+static void bcmasp_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, "bcmasp", sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
+ sizeof(info->bus_info));
+}
+
+static u32 bcmasp_get_msglevel(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ return intf->msg_enable;
+}
+
+static void bcmasp_set_msglevel(struct net_device *dev, u32 level)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ intf->msg_enable = level;
+}
+
+#define BCMASP_SUPPORTED_WAKE (WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER)
+static void bcmasp_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ wol->supported = BCMASP_SUPPORTED_WAKE;
+ wol->wolopts = intf->wolopts;
+ memset(wol->sopass, 0, sizeof(wol->sopass));
+
+ if (wol->wolopts & WAKE_MAGICSECURE)
+ memcpy(wol->sopass, intf->sopass, sizeof(intf->sopass));
+}
+
+static int bcmasp_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct bcmasp_priv *priv = intf->parent;
+ struct device *kdev = &priv->pdev->dev;
+
+ if (!device_can_wakeup(kdev))
+ return -EOPNOTSUPP;
+
+ /* Interface Specific */
+ intf->wolopts = wol->wolopts;
+ if (intf->wolopts & WAKE_MAGICSECURE)
+ memcpy(intf->sopass, wol->sopass, sizeof(wol->sopass));
+
+ mutex_lock(&priv->wol_lock);
+ priv->enable_wol(intf, !!intf->wolopts);
+ mutex_unlock(&priv->wol_lock);
+
+ return 0;
+}
+
+static int bcmasp_flow_insert(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct bcmasp_net_filter *nfilter;
+ u32 loc = cmd->fs.location;
+ bool wake = false;
+
+ if (cmd->fs.ring_cookie == RX_CLS_FLOW_WAKE)
+ wake = true;
+
+ /* Currently only supports WAKE filters */
+ if (!wake)
+ return -EOPNOTSUPP;
+
+ switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case ETHER_FLOW:
+ case IP_USER_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Check if filter already exists */
+ if (bcmasp_netfilt_check_dup(intf, &cmd->fs))
+ return -EINVAL;
+
+ nfilter = bcmasp_netfilt_get_init(intf, loc, wake, true);
+ if (IS_ERR(nfilter))
+ return PTR_ERR(nfilter);
+
+ /* Return the location where we did insert the filter */
+ cmd->fs.location = nfilter->hw_index;
+ memcpy(&nfilter->fs, &cmd->fs, sizeof(struct ethtool_rx_flow_spec));
+
+ /* Since we only support wake filters, defer register programming till
+ * suspend time.
+ */
+ return 0;
+}
+
+static int bcmasp_flow_delete(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct bcmasp_net_filter *nfilter;
+
+ nfilter = bcmasp_netfilt_get_init(intf, cmd->fs.location, false, false);
+ if (IS_ERR(nfilter))
+ return PTR_ERR(nfilter);
+
+ bcmasp_netfilt_release(intf, nfilter);
+
+ return 0;
+}
+
+static int bcmasp_flow_get(struct bcmasp_intf *intf, struct ethtool_rxnfc *cmd)
+{
+ struct bcmasp_net_filter *nfilter;
+
+ nfilter = bcmasp_netfilt_get_init(intf, cmd->fs.location, false, false);
+ if (IS_ERR(nfilter))
+ return PTR_ERR(nfilter);
+
+ memcpy(&cmd->fs, &nfilter->fs, sizeof(nfilter->fs));
+
+ cmd->data = NUM_NET_FILTERS;
+
+ return 0;
+}
+
+static int bcmasp_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ mutex_lock(&intf->parent->net_lock);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = bcmasp_flow_insert(dev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = bcmasp_flow_delete(dev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&intf->parent->net_lock);
+
+ return ret;
+}
+
+static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ int err = 0;
+
+ mutex_lock(&intf->parent->net_lock);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = bcmasp_netfilt_get_active(intf);
+ /* We support specifying rule locations */
+ cmd->data |= RX_CLS_LOC_SPECIAL;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = bcmasp_flow_get(intf, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
+ cmd->data = NUM_NET_FILTERS;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&intf->parent->net_lock);
+
+ return err;
+}
+
+void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable)
+{
+ u32 reg;
+
+ reg = umac_rl(intf, UMC_EEE_CTRL);
+ if (enable)
+ reg |= EEE_EN;
+ else
+ reg &= ~EEE_EN;
+ umac_wl(intf, reg, UMC_EEE_CTRL);
+
+ intf->eee.eee_enabled = enable;
+ intf->eee.eee_active = enable;
+}
+
+static int bcmasp_get_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct ethtool_eee *p = &intf->eee;
+
+ if (!dev->phydev)
+ return -ENODEV;
+
+ e->eee_enabled = p->eee_enabled;
+ e->eee_active = p->eee_active;
+ e->tx_lpi_enabled = p->tx_lpi_enabled;
+ e->tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
+
+ return phy_ethtool_get_eee(dev->phydev, e);
+}
+
+static int bcmasp_set_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct ethtool_eee *p = &intf->eee;
+ int ret;
+
+ if (!dev->phydev)
+ return -ENODEV;
+
+ if (!p->eee_enabled) {
+ bcmasp_eee_enable_set(intf, false);
+ } else {
+ ret = phy_init_eee(dev->phydev, 0);
+ if (ret) {
+ netif_err(intf, hw, dev,
+ "EEE initialization failed: %d\n", ret);
+ return ret;
+ }
+
+ umac_wl(intf, e->tx_lpi_timer, UMC_EEE_LPI_TIMER);
+ intf->eee.eee_active = ret >= 0;
+ intf->eee.tx_lpi_enabled = e->tx_lpi_enabled;
+ bcmasp_eee_enable_set(intf, true);
+ }
+
+ return phy_ethtool_set_eee(dev->phydev, e);
+}
+
+static void bcmasp_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ mac_stats->FramesTransmittedOK = umac_rl(intf, UMC_GTPOK);
+ mac_stats->SingleCollisionFrames = umac_rl(intf, UMC_GTSCL);
+ mac_stats->MultipleCollisionFrames = umac_rl(intf, UMC_GTMCL);
+ mac_stats->FramesReceivedOK = umac_rl(intf, UMC_GRPOK);
+ mac_stats->FrameCheckSequenceErrors = umac_rl(intf, UMC_GRFCS);
+ mac_stats->AlignmentErrors = umac_rl(intf, UMC_GRALN);
+ mac_stats->OctetsTransmittedOK = umac_rl(intf, UMC_GTBYT);
+ mac_stats->FramesWithDeferredXmissions = umac_rl(intf, UMC_GTDRF);
+ mac_stats->LateCollisions = umac_rl(intf, UMC_GTLCL);
+ mac_stats->FramesAbortedDueToXSColls = umac_rl(intf, UMC_GTXCL);
+ mac_stats->OctetsReceivedOK = umac_rl(intf, UMC_GRBYT);
+ mac_stats->MulticastFramesXmittedOK = umac_rl(intf, UMC_GTMCA);
+ mac_stats->BroadcastFramesXmittedOK = umac_rl(intf, UMC_GTBCA);
+ mac_stats->FramesWithExcessiveDeferral = umac_rl(intf, UMC_GTEDF);
+ mac_stats->MulticastFramesReceivedOK = umac_rl(intf, UMC_GRMCA);
+ mac_stats->BroadcastFramesReceivedOK = umac_rl(intf, UMC_GRBCA);
+}
+
+static const struct ethtool_rmon_hist_range bcmasp_rmon_ranges[] = {
+ { 0, 64},
+ { 65, 127},
+ { 128, 255},
+ { 256, 511},
+ { 512, 1023},
+ { 1024, 1518},
+ { 1519, 1522},
+ {}
+};
+
+static void bcmasp_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ *ranges = bcmasp_rmon_ranges;
+
+ rmon_stats->undersize_pkts = umac_rl(intf, UMC_RRUND);
+ rmon_stats->oversize_pkts = umac_rl(intf, UMC_GROVR);
+ rmon_stats->fragments = umac_rl(intf, UMC_RRFRG);
+ rmon_stats->jabbers = umac_rl(intf, UMC_GRJBR);
+
+ rmon_stats->hist[0] = umac_rl(intf, UMC_GR64);
+ rmon_stats->hist[1] = umac_rl(intf, UMC_GR127);
+ rmon_stats->hist[2] = umac_rl(intf, UMC_GR255);
+ rmon_stats->hist[3] = umac_rl(intf, UMC_GR511);
+ rmon_stats->hist[4] = umac_rl(intf, UMC_GR1023);
+ rmon_stats->hist[5] = umac_rl(intf, UMC_GR1518);
+ rmon_stats->hist[6] = umac_rl(intf, UMC_GRMGV);
+
+ rmon_stats->hist_tx[0] = umac_rl(intf, UMC_TR64);
+ rmon_stats->hist_tx[1] = umac_rl(intf, UMC_TR127);
+ rmon_stats->hist_tx[2] = umac_rl(intf, UMC_TR255);
+ rmon_stats->hist_tx[3] = umac_rl(intf, UMC_TR511);
+ rmon_stats->hist_tx[4] = umac_rl(intf, UMC_TR1023);
+ rmon_stats->hist_tx[5] = umac_rl(intf, UMC_TR1518);
+ rmon_stats->hist_tx[6] = umac_rl(intf, UMC_TRMGV);
+}
+
+static void bcmasp_get_eth_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ ctrl_stats->MACControlFramesTransmitted = umac_rl(intf, UMC_GTXCF);
+ ctrl_stats->MACControlFramesReceived = umac_rl(intf, UMC_GRXCF);
+ ctrl_stats->UnsupportedOpcodesReceived = umac_rl(intf, UMC_GRXUO);
+}
+
+const struct ethtool_ops bcmasp_ethtool_ops = {
+ .get_drvinfo = bcmasp_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_msglevel = bcmasp_get_msglevel,
+ .set_msglevel = bcmasp_set_msglevel,
+ .get_wol = bcmasp_get_wol,
+ .set_wol = bcmasp_set_wol,
+ .get_rxnfc = bcmasp_get_rxnfc,
+ .set_rxnfc = bcmasp_set_rxnfc,
+ .set_eee = bcmasp_set_eee,
+ .get_eee = bcmasp_get_eee,
+ .get_eth_mac_stats = bcmasp_get_eth_mac_stats,
+ .get_rmon_stats = bcmasp_get_rmon_stats,
+ .get_eth_ctrl_stats = bcmasp_get_eth_ctrl_stats,
+ .get_strings = bcmasp_get_strings,
+ .get_ethtool_stats = bcmasp_get_ethtool_stats,
+ .get_sset_count = bcmasp_get_sset_count,
+};
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
new file mode 100644
index 000000000000..53e542881255
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -0,0 +1,1415 @@
+// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) "bcmasp_intf: " fmt
+
+#include <asm/byteorder.h>
+#include <linux/brcmphy.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/ptp_classify.h>
+#include <linux/platform_device.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "bcmasp.h"
+#include "bcmasp_intf_defs.h"
+
+static int incr_ring(int index, int ring_count)
+{
+ index++;
+ if (index == ring_count)
+ return 0;
+
+ return index;
+}
+
+/* Points to last byte of descriptor */
+static dma_addr_t incr_last_byte(dma_addr_t addr, dma_addr_t beg,
+ int ring_count)
+{
+ dma_addr_t end = beg + (ring_count * DESC_SIZE);
+
+ addr += DESC_SIZE;
+ if (addr > end)
+ return beg + DESC_SIZE - 1;
+
+ return addr;
+}
+
+/* Points to first byte of descriptor */
+static dma_addr_t incr_first_byte(dma_addr_t addr, dma_addr_t beg,
+ int ring_count)
+{
+ dma_addr_t end = beg + (ring_count * DESC_SIZE);
+
+ addr += DESC_SIZE;
+ if (addr >= end)
+ return beg;
+
+ return addr;
+}
+
+static void bcmasp_enable_tx(struct bcmasp_intf *intf, int en)
+{
+ if (en) {
+ tx_spb_ctrl_wl(intf, TX_SPB_CTRL_ENABLE_EN, TX_SPB_CTRL_ENABLE);
+ tx_epkt_core_wl(intf, (TX_EPKT_C_CFG_MISC_EN |
+ TX_EPKT_C_CFG_MISC_PT |
+ (intf->port << TX_EPKT_C_CFG_MISC_PS_SHIFT)),
+ TX_EPKT_C_CFG_MISC);
+ } else {
+ tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
+ tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
+ }
+}
+
+static void bcmasp_enable_rx(struct bcmasp_intf *intf, int en)
+{
+ if (en)
+ rx_edpkt_cfg_wl(intf, RX_EDPKT_CFG_ENABLE_EN,
+ RX_EDPKT_CFG_ENABLE);
+ else
+ rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
+}
+
+static void bcmasp_set_rx_mode(struct net_device *dev)
+{
+ unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ int ret;
+
+ spin_lock_bh(&intf->parent->mda_lock);
+
+ bcmasp_disable_all_filters(intf);
+
+ if (dev->flags & IFF_PROMISC)
+ goto set_promisc;
+
+ bcmasp_set_promisc(intf, 0);
+
+ bcmasp_set_broad(intf, 1);
+
+ bcmasp_set_oaddr(intf, dev->dev_addr, 1);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ bcmasp_set_allmulti(intf, 1);
+ } else {
+ bcmasp_set_allmulti(intf, 0);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
+ if (ret) {
+ intf->mib.mc_filters_full_cnt++;
+ goto set_promisc;
+ }
+ }
+ }
+
+ netdev_for_each_uc_addr(ha, dev) {
+ ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
+ if (ret) {
+ intf->mib.uc_filters_full_cnt++;
+ goto set_promisc;
+ }
+ }
+
+ spin_unlock_bh(&intf->parent->mda_lock);
+ return;
+
+set_promisc:
+ bcmasp_set_promisc(intf, 1);
+ intf->mib.promisc_filters_cnt++;
+
+ /* disable all filters used by this port */
+ bcmasp_disable_all_filters(intf);
+
+ spin_unlock_bh(&intf->parent->mda_lock);
+}
+
+static void bcmasp_clean_txcb(struct bcmasp_intf *intf, int index)
+{
+ struct bcmasp_tx_cb *txcb = &intf->tx_cbs[index];
+
+ txcb->skb = NULL;
+ dma_unmap_addr_set(txcb, dma_addr, 0);
+ dma_unmap_len_set(txcb, dma_len, 0);
+ txcb->last = false;
+}
+
+static int tx_spb_ring_full(struct bcmasp_intf *intf, int cnt)
+{
+ int next_index, i;
+
+ /* Check if we have enough room for cnt descriptors */
+ for (i = 0; i < cnt; i++) {
+ next_index = incr_ring(intf->tx_spb_index, DESC_RING_COUNT);
+ if (next_index == intf->tx_spb_clean_index)
+ return 1;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
+ struct sk_buff *skb,
+ bool *csum_hw)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ u32 header = 0, header2 = 0, epkt = 0;
+ struct bcmasp_pkt_offload *offload;
+ unsigned int header_cnt = 0;
+ u8 ip_proto;
+ int ret;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return skb;
+
+ ret = skb_cow_head(skb, sizeof(*offload));
+ if (ret < 0) {
+ intf->mib.tx_realloc_offload_failed++;
+ goto help;
+ }
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf);
+ header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff);
+ epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2;
+ ip_proto = ip_hdr(skb)->protocol;
+ header_cnt += 2;
+ break;
+ case htons(ETH_P_IPV6):
+ header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf);
+ header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff);
+ epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2;
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ header_cnt += 2;
+ break;
+ default:
+ goto help;
+ }
+
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb));
+ epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3;
+ header_cnt++;
+ break;
+ case IPPROTO_UDP:
+ header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN);
+ epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3;
+ header_cnt++;
+ break;
+ default:
+ goto help;
+ }
+
+ offload = (struct bcmasp_pkt_offload *)skb_push(skb, sizeof(*offload));
+
+ header |= PKT_OFFLOAD_HDR_OP | PKT_OFFLOAD_HDR_COUNT(header_cnt) |
+ PKT_OFFLOAD_HDR_SIZE_1(ETH_HLEN);
+ epkt |= PKT_OFFLOAD_EPKT_OP;
+
+ offload->nop = htonl(PKT_OFFLOAD_NOP);
+ offload->header = htonl(header);
+ offload->header2 = htonl(header2);
+ offload->epkt = htonl(epkt);
+ offload->end = htonl(PKT_OFFLOAD_END_OP);
+ *csum_hw = true;
+
+ return skb;
+
+help:
+ skb_checksum_help(skb);
+
+ return skb;
+}
+
+static unsigned long bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf *intf)
+{
+ return rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID);
+}
+
+static void bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf *intf, dma_addr_t addr)
+{
+ rx_edpkt_cfg_wq(intf, addr, RX_EDPKT_RING_BUFFER_READ);
+}
+
+static void bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
+{
+ rx_edpkt_dma_wq(intf, addr, RX_EDPKT_DMA_READ);
+}
+
+static unsigned long bcmasp_tx_spb_dma_rq(struct bcmasp_intf *intf)
+{
+ return tx_spb_dma_rq(intf, TX_SPB_DMA_READ);
+}
+
+static void bcmasp_tx_spb_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
+{
+ tx_spb_dma_wq(intf, addr, TX_SPB_DMA_VALID);
+}
+
+static const struct bcmasp_intf_ops bcmasp_intf_ops = {
+ .rx_desc_read = bcmasp_rx_edpkt_dma_rq,
+ .rx_buffer_write = bcmasp_rx_edpkt_cfg_wq,
+ .rx_desc_write = bcmasp_rx_edpkt_dma_wq,
+ .tx_read = bcmasp_tx_spb_dma_rq,
+ .tx_write = bcmasp_tx_spb_dma_wq,
+};
+
+static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ unsigned int total_bytes, size;
+ int spb_index, nr_frags, i, j;
+ struct bcmasp_tx_cb *txcb;
+ dma_addr_t mapping, valid;
+ struct bcmasp_desc *desc;
+ bool csum_hw = false;
+ struct device *kdev;
+ skb_frag_t *frag;
+
+ kdev = &intf->parent->pdev->dev;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (tx_spb_ring_full(intf, nr_frags + 1)) {
+ netif_stop_queue(dev);
+ if (net_ratelimit())
+ netdev_err(dev, "Tx Ring Full!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Save skb len before adding csum offload header */
+ total_bytes = skb->len;
+ skb = bcmasp_csum_offload(dev, skb, &csum_hw);
+ if (!skb)
+ return NETDEV_TX_OK;
+
+ spb_index = intf->tx_spb_index;
+ valid = intf->tx_spb_dma_valid;
+ for (i = 0; i <= nr_frags; i++) {
+ if (!i) {
+ size = skb_headlen(skb);
+ if (!nr_frags && size < (ETH_ZLEN + ETH_FCS_LEN)) {
+ if (skb_put_padto(skb, ETH_ZLEN + ETH_FCS_LEN))
+ return NETDEV_TX_OK;
+ size = skb->len;
+ }
+ mapping = dma_map_single(kdev, skb->data, size,
+ DMA_TO_DEVICE);
+ } else {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+ size = skb_frag_size(frag);
+ mapping = skb_frag_dma_map(kdev, frag, 0, size,
+ DMA_TO_DEVICE);
+ }
+
+ if (dma_mapping_error(kdev, mapping)) {
+ intf->mib.tx_dma_failed++;
+ spb_index = intf->tx_spb_index;
+ for (j = 0; j < i; j++) {
+ bcmasp_clean_txcb(intf, spb_index);
+ spb_index = incr_ring(spb_index,
+ DESC_RING_COUNT);
+ }
+ /* Rewind so we do not have a hole */
+ spb_index = intf->tx_spb_index;
+ return NETDEV_TX_OK;
+ }
+
+ txcb = &intf->tx_cbs[spb_index];
+ desc = &intf->tx_spb_cpu[spb_index];
+ memset(desc, 0, sizeof(*desc));
+ txcb->skb = skb;
+ txcb->bytes_sent = total_bytes;
+ dma_unmap_addr_set(txcb, dma_addr, mapping);
+ dma_unmap_len_set(txcb, dma_len, size);
+ if (!i) {
+ desc->flags |= DESC_SOF;
+ if (csum_hw)
+ desc->flags |= DESC_EPKT_CMD;
+ }
+
+ if (i == nr_frags) {
+ desc->flags |= DESC_EOF;
+ txcb->last = true;
+ }
+
+ desc->buf = mapping;
+ desc->size = size;
+ desc->flags |= DESC_INT_EN;
+
+ netif_dbg(intf, tx_queued, dev,
+ "%s dma_buf=%pad dma_len=0x%x flags=0x%x index=0x%x\n",
+ __func__, &mapping, desc->size, desc->flags,
+ spb_index);
+
+ spb_index = incr_ring(spb_index, DESC_RING_COUNT);
+ valid = incr_last_byte(valid, intf->tx_spb_dma_addr,
+ DESC_RING_COUNT);
+ }
+
+ /* Ensure all descriptors have been written to DRAM for the
+ * hardware to see up-to-date contents.
+ */
+ wmb();
+
+ intf->tx_spb_index = spb_index;
+ intf->tx_spb_dma_valid = valid;
+ bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid);
+
+ if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1))
+ netif_stop_queue(dev);
+
+ return NETDEV_TX_OK;
+}
+
+static void bcmasp_netif_start(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ bcmasp_set_rx_mode(dev);
+ napi_enable(&intf->tx_napi);
+ napi_enable(&intf->rx_napi);
+
+ bcmasp_enable_rx_irq(intf, 1);
+ bcmasp_enable_tx_irq(intf, 1);
+
+ phy_start(dev->phydev);
+}
+
+static void umac_reset(struct bcmasp_intf *intf)
+{
+ umac_wl(intf, 0x0, UMC_CMD);
+ umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
+ usleep_range(10, 100);
+ umac_wl(intf, 0x0, UMC_CMD);
+}
+
+static void umac_set_hw_addr(struct bcmasp_intf *intf,
+ const unsigned char *addr)
+{
+ u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
+ addr[3];
+ u32 mac1 = (addr[4] << 8) | addr[5];
+
+ umac_wl(intf, mac0, UMC_MAC0);
+ umac_wl(intf, mac1, UMC_MAC1);
+}
+
+static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
+ unsigned int enable)
+{
+ u32 reg;
+
+ reg = umac_rl(intf, UMC_CMD);
+ if (enable)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ umac_wl(intf, reg, UMC_CMD);
+
+ /* UniMAC stops on a packet boundary, wait for a full-sized packet
+ * to be processed (1 msec).
+ */
+ if (enable == 0)
+ usleep_range(1000, 2000);
+}
+
+static void umac_init(struct bcmasp_intf *intf)
+{
+ umac_wl(intf, 0x800, UMC_FRM_LEN);
+ umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL);
+ umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
+ umac_enable_set(intf, UMC_CMD_PROMISC, 1);
+}
+
+static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmasp_intf *intf =
+ container_of(napi, struct bcmasp_intf, tx_napi);
+ struct bcmasp_intf_stats64 *stats = &intf->stats64;
+ struct device *kdev = &intf->parent->pdev->dev;
+ unsigned long read, released = 0;
+ struct bcmasp_tx_cb *txcb;
+ struct bcmasp_desc *desc;
+ dma_addr_t mapping;
+
+ read = bcmasp_intf_tx_read(intf);
+ while (intf->tx_spb_dma_read != read) {
+ txcb = &intf->tx_cbs[intf->tx_spb_clean_index];
+ mapping = dma_unmap_addr(txcb, dma_addr);
+
+ dma_unmap_single(kdev, mapping,
+ dma_unmap_len(txcb, dma_len),
+ DMA_TO_DEVICE);
+
+ if (txcb->last) {
+ dev_consume_skb_any(txcb->skb);
+
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, txcb->bytes_sent);
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ desc = &intf->tx_spb_cpu[intf->tx_spb_clean_index];
+
+ netif_dbg(intf, tx_done, intf->ndev,
+ "%s dma_buf=%pad dma_len=0x%x flags=0x%x c_index=0x%x\n",
+ __func__, &mapping, desc->size, desc->flags,
+ intf->tx_spb_clean_index);
+
+ bcmasp_clean_txcb(intf, intf->tx_spb_clean_index);
+ released++;
+
+ intf->tx_spb_clean_index = incr_ring(intf->tx_spb_clean_index,
+ DESC_RING_COUNT);
+ intf->tx_spb_dma_read = incr_first_byte(intf->tx_spb_dma_read,
+ intf->tx_spb_dma_addr,
+ DESC_RING_COUNT);
+ }
+
+ /* Ensure all descriptors have been written to DRAM for the hardware
+ * to see updated contents.
+ */
+ wmb();
+
+ napi_complete(&intf->tx_napi);
+
+ bcmasp_enable_tx_irq(intf, 1);
+
+ if (released)
+ netif_wake_queue(intf->ndev);
+
+ return 0;
+}
+
+static int bcmasp_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmasp_intf *intf =
+ container_of(napi, struct bcmasp_intf, rx_napi);
+ struct bcmasp_intf_stats64 *stats = &intf->stats64;
+ struct device *kdev = &intf->parent->pdev->dev;
+ unsigned long processed = 0;
+ struct bcmasp_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t valid;
+ void *data;
+ u64 flags;
+ u32 len;
+
+ valid = bcmasp_intf_rx_desc_read(intf) + 1;
+ if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE)
+ valid = intf->rx_edpkt_dma_addr;
+
+ while ((processed < budget) && (valid != intf->rx_edpkt_dma_read)) {
+ desc = &intf->rx_edpkt_cpu[intf->rx_edpkt_index];
+
+ /* Ensure that descriptor has been fully written to DRAM by
+ * hardware before reading by the CPU
+ */
+ rmb();
+
+ /* Calculate virt addr by offsetting from physical addr */
+ data = intf->rx_ring_cpu +
+ (DESC_ADDR(desc->buf) - intf->rx_ring_dma);
+
+ flags = DESC_FLAGS(desc->buf);
+ if (unlikely(flags & (DESC_CRC_ERR | DESC_RX_SYM_ERR))) {
+ if (net_ratelimit()) {
+ netif_err(intf, rx_status, intf->ndev,
+ "flags=0x%llx\n", flags);
+ }
+
+ u64_stats_update_begin(&stats->syncp);
+ if (flags & DESC_CRC_ERR)
+ u64_stats_inc(&stats->rx_crc_errs);
+ if (flags & DESC_RX_SYM_ERR)
+ u64_stats_inc(&stats->rx_sym_errs);
+ u64_stats_update_end(&stats->syncp);
+
+ goto next;
+ }
+
+ dma_sync_single_for_cpu(kdev, DESC_ADDR(desc->buf), desc->size,
+ DMA_FROM_DEVICE);
+
+ len = desc->size;
+
+ skb = napi_alloc_skb(napi, len);
+ if (!skb) {
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_dropped);
+ u64_stats_update_end(&stats->syncp);
+ intf->mib.alloc_rx_skb_failed++;
+
+ goto next;
+ }
+
+ skb_put(skb, len);
+ memcpy(skb->data, data, len);
+
+ skb_pull(skb, 2);
+ len -= 2;
+ if (likely(intf->crc_fwd)) {
+ skb_trim(skb, len - ETH_FCS_LEN);
+ len -= ETH_FCS_LEN;
+ }
+
+ if ((intf->ndev->features & NETIF_F_RXCSUM) &&
+ (desc->buf & DESC_CHKSUM))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb->protocol = eth_type_trans(skb, intf->ndev);
+
+ napi_gro_receive(napi, skb);
+
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, len);
+ u64_stats_update_end(&stats->syncp);
+
+next:
+ bcmasp_intf_rx_buffer_write(intf, (DESC_ADDR(desc->buf) +
+ desc->size));
+
+ processed++;
+ intf->rx_edpkt_dma_read =
+ incr_first_byte(intf->rx_edpkt_dma_read,
+ intf->rx_edpkt_dma_addr,
+ DESC_RING_COUNT);
+ intf->rx_edpkt_index = incr_ring(intf->rx_edpkt_index,
+ DESC_RING_COUNT);
+ }
+
+ bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read);
+
+ if (processed < budget) {
+ napi_complete_done(&intf->rx_napi, processed);
+ bcmasp_enable_rx_irq(intf, 1);
+ }
+
+ return processed;
+}
+
+static void bcmasp_adj_link(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
+ u32 cmd_bits = 0, reg;
+ int changed = 0;
+
+ if (intf->old_link != phydev->link) {
+ changed = 1;
+ intf->old_link = phydev->link;
+ }
+
+ if (intf->old_duplex != phydev->duplex) {
+ changed = 1;
+ intf->old_duplex = phydev->duplex;
+ }
+
+ switch (phydev->speed) {
+ case SPEED_2500:
+ cmd_bits = UMC_CMD_SPEED_2500;
+ break;
+ case SPEED_1000:
+ cmd_bits = UMC_CMD_SPEED_1000;
+ break;
+ case SPEED_100:
+ cmd_bits = UMC_CMD_SPEED_100;
+ break;
+ case SPEED_10:
+ cmd_bits = UMC_CMD_SPEED_10;
+ break;
+ default:
+ break;
+ }
+ cmd_bits <<= UMC_CMD_SPEED_SHIFT;
+
+ if (phydev->duplex == DUPLEX_HALF)
+ cmd_bits |= UMC_CMD_HD_EN;
+
+ if (intf->old_pause != phydev->pause) {
+ changed = 1;
+ intf->old_pause = phydev->pause;
+ }
+
+ if (!phydev->pause)
+ cmd_bits |= UMC_CMD_RX_PAUSE_IGNORE | UMC_CMD_TX_PAUSE_IGNORE;
+
+ if (!changed)
+ return;
+
+ if (phydev->link) {
+ reg = umac_rl(intf, UMC_CMD);
+ reg &= ~((UMC_CMD_SPEED_MASK << UMC_CMD_SPEED_SHIFT) |
+ UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
+ UMC_CMD_TX_PAUSE_IGNORE);
+ reg |= cmd_bits;
+ umac_wl(intf, reg, UMC_CMD);
+
+ intf->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
+ bcmasp_eee_enable_set(intf, intf->eee.eee_active);
+ }
+
+ reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
+ if (phydev->link)
+ reg |= RGMII_LINK;
+ else
+ reg &= ~RGMII_LINK;
+ rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
+
+ if (changed)
+ phy_print_status(phydev);
+}
+
+static int bcmasp_init_rx(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+ struct page *buffer_pg;
+ dma_addr_t dma;
+ void *p;
+ u32 reg;
+ int ret;
+
+ intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
+ buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
+
+ dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(kdev, dma)) {
+ __free_pages(buffer_pg, intf->rx_buf_order);
+ return -ENOMEM;
+ }
+ intf->rx_ring_cpu = page_to_virt(buffer_pg);
+ intf->rx_ring_dma = dma;
+ intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
+
+ p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->rx_edpkt_dma_addr,
+ GFP_KERNEL);
+ if (!p) {
+ ret = -ENOMEM;
+ goto free_rx_ring;
+ }
+ intf->rx_edpkt_cpu = p;
+
+ netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
+
+ intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
+ intf->rx_edpkt_index = 0;
+
+ /* Make sure channels are disabled */
+ rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
+
+ /* Rx SPB */
+ rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_READ);
+ rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_WRITE);
+ rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_BASE);
+ rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
+ RX_EDPKT_RING_BUFFER_END);
+ rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
+ RX_EDPKT_RING_BUFFER_VALID);
+
+ /* EDPKT */
+ rx_edpkt_cfg_wl(intf, (RX_EDPKT_CFG_CFG0_RBUF_4K <<
+ RX_EDPKT_CFG_CFG0_DBUF_SHIFT) |
+ (RX_EDPKT_CFG_CFG0_64_ALN <<
+ RX_EDPKT_CFG_CFG0_BALN_SHIFT) |
+ (RX_EDPKT_CFG_CFG0_EFRM_STUF),
+ RX_EDPKT_CFG_CFG0);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
+ RX_EDPKT_DMA_END);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
+ RX_EDPKT_DMA_VALID);
+
+ reg = UMAC2FB_CFG_DEFAULT_EN |
+ ((intf->channel + 11) << UMAC2FB_CFG_CHID_SHIFT);
+ reg |= (0xd << UMAC2FB_CFG_OK_SEND_SHIFT);
+ umac2fb_wl(intf, reg, UMAC2FB_CFG);
+
+ return 0;
+
+free_rx_ring:
+ dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
+
+ return ret;
+}
+
+static void bcmasp_reclaim_free_all_rx(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
+ intf->rx_edpkt_dma_addr);
+ dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
+}
+
+static int bcmasp_init_tx(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+ void *p;
+ int ret;
+
+ p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->tx_spb_dma_addr,
+ GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ intf->tx_spb_cpu = p;
+ intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
+ intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
+
+ intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
+ GFP_KERNEL);
+ if (!intf->tx_cbs) {
+ ret = -ENOMEM;
+ goto free_tx_spb;
+ }
+
+ intf->tx_spb_index = 0;
+ intf->tx_spb_clean_index = 0;
+
+ netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
+
+ /* Make sure channels are disabled */
+ tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
+ tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
+
+ /* Tx SPB */
+ tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
+ TX_SPB_CTRL_XF_CTRL2);
+ tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
+ tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
+ tx_spb_top_wl(intf, 0x0, TX_SPB_TOP_SPRE_BW_CTRL);
+
+ tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
+ tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
+ tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END);
+ tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
+
+ return 0;
+
+free_tx_spb:
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
+ intf->tx_spb_dma_addr);
+
+ return ret;
+}
+
+static void bcmasp_reclaim_free_all_tx(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+
+ /* Free descriptors */
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
+ intf->tx_spb_dma_addr);
+
+ /* Free cbs */
+ kfree(intf->tx_cbs);
+}
+
+static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
+{
+ u32 mask = RGMII_EPHY_CFG_IDDQ_BIAS | RGMII_EPHY_CFG_EXT_PWRDOWN |
+ RGMII_EPHY_CFG_IDDQ_GLOBAL;
+ u32 reg;
+
+ reg = rgmii_rl(intf, RGMII_EPHY_CNTRL);
+ if (enable) {
+ reg &= ~RGMII_EPHY_CK25_DIS;
+ rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
+ mdelay(1);
+
+ reg &= ~mask;
+ reg |= RGMII_EPHY_RESET;
+ rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
+ mdelay(1);
+
+ reg &= ~RGMII_EPHY_RESET;
+ } else {
+ reg |= mask | RGMII_EPHY_RESET;
+ rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
+ mdelay(1);
+ reg |= RGMII_EPHY_CK25_DIS;
+ }
+ rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
+ mdelay(1);
+
+ /* Set or clear the LED control override to avoid lighting up LEDs
+ * while the EPHY is powered off and drawing unnecessary current.
+ */
+ reg = rgmii_rl(intf, RGMII_SYS_LED_CNTRL);
+ if (enable)
+ reg &= ~RGMII_SYS_LED_CNTRL_LINK_OVRD;
+ else
+ reg |= RGMII_SYS_LED_CNTRL_LINK_OVRD;
+ rgmii_wl(intf, reg, RGMII_SYS_LED_CNTRL);
+}
+
+static void bcmasp_rgmii_mode_en_set(struct bcmasp_intf *intf, bool enable)
+{
+ u32 reg;
+
+ reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
+ reg &= ~RGMII_OOB_DIS;
+ if (enable)
+ reg |= RGMII_MODE_EN;
+ else
+ reg &= ~RGMII_MODE_EN;
+ rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
+}
+
+static void bcmasp_netif_deinit(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ u32 reg, timeout = 1000;
+
+ napi_disable(&intf->tx_napi);
+
+ bcmasp_enable_tx(intf, 0);
+
+ /* Flush any TX packets in the pipe */
+ tx_spb_dma_wl(intf, TX_SPB_DMA_FIFO_FLUSH, TX_SPB_DMA_FIFO_CTRL);
+ do {
+ reg = tx_spb_dma_rl(intf, TX_SPB_DMA_FIFO_STATUS);
+ if (!(reg & TX_SPB_DMA_FIFO_FLUSH))
+ break;
+ usleep_range(1000, 2000);
+ } while (timeout-- > 0);
+ tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
+
+ umac_enable_set(intf, UMC_CMD_TX_EN, 0);
+
+ phy_stop(dev->phydev);
+
+ umac_enable_set(intf, UMC_CMD_RX_EN, 0);
+
+ bcmasp_flush_rx_port(intf);
+ usleep_range(1000, 2000);
+ bcmasp_enable_rx(intf, 0);
+
+ napi_disable(&intf->rx_napi);
+
+ /* Disable interrupts */
+ bcmasp_enable_tx_irq(intf, 0);
+ bcmasp_enable_rx_irq(intf, 0);
+
+ netif_napi_del(&intf->tx_napi);
+ bcmasp_reclaim_free_all_tx(intf);
+
+ netif_napi_del(&intf->rx_napi);
+ bcmasp_reclaim_free_all_rx(intf);
+}
+
+static int bcmasp_stop(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ netif_dbg(intf, ifdown, dev, "bcmasp stop\n");
+
+ /* Stop tx from updating HW */
+ netif_tx_disable(dev);
+
+ bcmasp_netif_deinit(dev);
+
+ phy_disconnect(dev->phydev);
+
+ /* Disable internal EPHY or external PHY */
+ if (intf->internal_phy)
+ bcmasp_ephy_enable_set(intf, false);
+ else
+ bcmasp_rgmii_mode_en_set(intf, false);
+
+ /* Disable the interface clocks */
+ bcmasp_core_clock_set_intf(intf, false);
+
+ clk_disable_unprepare(intf->parent->clk);
+
+ return 0;
+}
+
+static void bcmasp_configure_port(struct bcmasp_intf *intf)
+{
+ u32 reg, id_mode_dis = 0;
+
+ reg = rgmii_rl(intf, RGMII_PORT_CNTRL);
+ reg &= ~RGMII_PORT_MODE_MASK;
+
+ switch (intf->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ /* RGMII_NO_ID: TXC transitions at the same time as TXD
+ * (requires PCB or receiver-side delay)
+ * RGMII: Add 2ns delay on TXC (90 degree shift)
+ *
+ * ID is implicitly disabled for 100Mbps (RG)MII operation.
+ */
+ id_mode_dis = RGMII_ID_MODE_DIS;
+ fallthrough;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ reg |= RGMII_PORT_MODE_EXT_GPHY;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ reg |= RGMII_PORT_MODE_EXT_EPHY;
+ break;
+ default:
+ break;
+ }
+
+ if (intf->internal_phy)
+ reg |= RGMII_PORT_MODE_EPHY;
+
+ rgmii_wl(intf, reg, RGMII_PORT_CNTRL);
+
+ reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
+ reg &= ~RGMII_ID_MODE_DIS;
+ reg |= id_mode_dis;
+ rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
+}
+
+static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ phy_interface_t phy_iface = intf->phy_interface;
+ u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE |
+ PHY_BRCM_DIS_TXCRXC_NOENRGY |
+ PHY_BRCM_IDDQ_SUSPEND;
+ struct phy_device *phydev = NULL;
+ int ret;
+
+ /* Always enable interface clocks */
+ bcmasp_core_clock_set_intf(intf, true);
+
+ /* Enable internal PHY or external PHY before any MAC activity */
+ if (intf->internal_phy)
+ bcmasp_ephy_enable_set(intf, true);
+ else
+ bcmasp_rgmii_mode_en_set(intf, true);
+ bcmasp_configure_port(intf);
+
+ /* This is an ugly quirk but we have not been correctly
+ * interpreting the phy_interface values and we have done that
+ * across different drivers, so at least we are consistent in
+ * our mistakes.
+ *
+ * When the Generic PHY driver is in use either the PHY has
+ * been strapped or programmed correctly by the boot loader so
+ * we should stick to our incorrect interpretation since we
+ * have validated it.
+ *
+ * Now when a dedicated PHY driver is in use, we need to
+ * reverse the meaning of the phy_interface_mode values to
+ * something that the PHY driver will interpret and act on such
+ * that we have two mistakes canceling themselves so to speak.
+ * We only do this for the two modes that GENET driver
+ * officially supports on Broadcom STB chips:
+ * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID.
+ * Other modes are not *officially* supported with the boot
+ * loader and the scripted environment generating Device Tree
+ * blobs for those platforms.
+ *
+ * Note that internal PHY and fixed-link configurations are not
+ * affected because they use different phy_interface_t values
+ * or the Generic PHY driver.
+ */
+ switch (phy_iface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ phy_iface = PHY_INTERFACE_MODE_RGMII_ID;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phy_iface = PHY_INTERFACE_MODE_RGMII_RXID;
+ break;
+ default:
+ break;
+ }
+
+ if (phy_connect) {
+ phydev = of_phy_connect(dev, intf->phy_dn,
+ bcmasp_adj_link, phy_flags,
+ phy_iface);
+ if (!phydev) {
+ ret = -ENODEV;
+ netdev_err(dev, "could not attach to PHY\n");
+ goto err_phy_disable;
+ }
+ } else if (!intf->wolopts) {
+ ret = phy_resume(dev->phydev);
+ if (ret)
+ goto err_phy_disable;
+ }
+
+ umac_reset(intf);
+
+ umac_init(intf);
+
+ /* Disable the UniMAC RX/TX */
+ umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 0);
+
+ umac_set_hw_addr(intf, dev->dev_addr);
+
+ intf->old_duplex = -1;
+ intf->old_link = -1;
+ intf->old_pause = -1;
+
+ ret = bcmasp_init_tx(intf);
+ if (ret)
+ goto err_phy_disconnect;
+
+ /* Turn on asp */
+ bcmasp_enable_tx(intf, 1);
+
+ ret = bcmasp_init_rx(intf);
+ if (ret)
+ goto err_reclaim_tx;
+
+ bcmasp_enable_rx(intf, 1);
+
+ /* Turn on UniMAC TX/RX */
+ umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 1);
+
+ intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
+
+ bcmasp_netif_start(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+
+err_reclaim_tx:
+ bcmasp_reclaim_free_all_tx(intf);
+err_phy_disconnect:
+ if (phydev)
+ phy_disconnect(phydev);
+err_phy_disable:
+ if (intf->internal_phy)
+ bcmasp_ephy_enable_set(intf, false);
+ else
+ bcmasp_rgmii_mode_en_set(intf, false);
+ return ret;
+}
+
+static int bcmasp_open(struct net_device *dev)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ int ret;
+
+ netif_dbg(intf, ifup, dev, "bcmasp open\n");
+
+ ret = clk_prepare_enable(intf->parent->clk);
+ if (ret)
+ return ret;
+
+ ret = bcmasp_netif_init(dev, true);
+ if (ret)
+ clk_disable_unprepare(intf->parent->clk);
+
+ return ret;
+}
+
+static void bcmasp_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ netif_dbg(intf, tx_err, dev, "transmit timeout!\n");
+ intf->mib.tx_timeout_cnt++;
+}
+
+static int bcmasp_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+
+ if (snprintf(name, len, "p%d", intf->port) >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void bcmasp_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct bcmasp_intf *intf = netdev_priv(dev);
+ struct bcmasp_intf_stats64 *lstats;
+ unsigned int start;
+
+ lstats = &intf->stats64;
+
+ do {
+ start = u64_stats_fetch_begin(&lstats->syncp);
+ stats->rx_packets = u64_stats_read(&lstats->rx_packets);
+ stats->rx_bytes = u64_stats_read(&lstats->rx_bytes);
+ stats->rx_dropped = u64_stats_read(&lstats->rx_dropped);
+ stats->rx_crc_errors = u64_stats_read(&lstats->rx_crc_errs);
+ stats->rx_frame_errors = u64_stats_read(&lstats->rx_sym_errs);
+ stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
+
+ stats->tx_packets = u64_stats_read(&lstats->tx_packets);
+ stats->tx_bytes = u64_stats_read(&lstats->tx_bytes);
+ } while (u64_stats_fetch_retry(&lstats->syncp, start));
+}
+
+static const struct net_device_ops bcmasp_netdev_ops = {
+ .ndo_open = bcmasp_open,
+ .ndo_stop = bcmasp_stop,
+ .ndo_start_xmit = bcmasp_xmit,
+ .ndo_tx_timeout = bcmasp_tx_timeout,
+ .ndo_set_rx_mode = bcmasp_set_rx_mode,
+ .ndo_get_phys_port_name = bcmasp_get_phys_port_name,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_get_stats64 = bcmasp_get_stats64,
+};
+
+static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
+{
+ /* Per port */
+ intf->res.umac = priv->base + UMC_OFFSET(intf);
+ intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb +
+ (intf->port * 0x4));
+ intf->res.rgmii = priv->base + RGMII_OFFSET(intf);
+
+ /* Per ch */
+ intf->tx_spb_dma = priv->base + TX_SPB_DMA_OFFSET(intf);
+ intf->res.tx_spb_ctrl = priv->base + TX_SPB_CTRL_OFFSET(intf);
+ intf->res.tx_spb_top = priv->base + TX_SPB_TOP_OFFSET(intf);
+ intf->res.tx_epkt_core = priv->base + TX_EPKT_C_OFFSET(intf);
+ intf->res.tx_pause_ctrl = priv->base + TX_PAUSE_CTRL_OFFSET(intf);
+
+ intf->rx_edpkt_dma = priv->base + RX_EDPKT_DMA_OFFSET(intf);
+ intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf);
+}
+
+#define MAX_IRQ_STR_LEN 64
+struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
+ struct device_node *ndev_dn, int i)
+{
+ struct device *dev = &priv->pdev->dev;
+ struct bcmasp_intf *intf;
+ struct net_device *ndev;
+ int ch, port, ret;
+
+ if (of_property_read_u32(ndev_dn, "reg", &port)) {
+ dev_warn(dev, "%s: invalid port number\n", ndev_dn->name);
+ goto err;
+ }
+
+ if (of_property_read_u32(ndev_dn, "brcm,channel", &ch)) {
+ dev_warn(dev, "%s: invalid ch number\n", ndev_dn->name);
+ goto err;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct bcmasp_intf));
+ if (!ndev) {
+ dev_warn(dev, "%s: unable to alloc ndev\n", ndev_dn->name);
+ goto err;
+ }
+ intf = netdev_priv(ndev);
+
+ intf->parent = priv;
+ intf->ndev = ndev;
+ intf->channel = ch;
+ intf->port = port;
+ intf->ndev_dn = ndev_dn;
+ intf->index = i;
+
+ ret = of_get_phy_mode(ndev_dn, &intf->phy_interface);
+ if (ret < 0) {
+ dev_err(dev, "invalid PHY mode property\n");
+ goto err_free_netdev;
+ }
+
+ if (intf->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
+ intf->internal_phy = true;
+
+ intf->phy_dn = of_parse_phandle(ndev_dn, "phy-handle", 0);
+ if (!intf->phy_dn && of_phy_is_fixed_link(ndev_dn)) {
+ ret = of_phy_register_fixed_link(ndev_dn);
+ if (ret) {
+ dev_warn(dev, "%s: failed to register fixed PHY\n",
+ ndev_dn->name);
+ goto err_free_netdev;
+ }
+ intf->phy_dn = ndev_dn;
+ }
+
+ /* Map resource */
+ bcmasp_map_res(priv, intf);
+
+ if ((!phy_interface_mode_is_rgmii(intf->phy_interface) &&
+ intf->phy_interface != PHY_INTERFACE_MODE_MII &&
+ intf->phy_interface != PHY_INTERFACE_MODE_INTERNAL) ||
+ (intf->port != 1 && intf->internal_phy)) {
+ netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n",
+ phy_modes(intf->phy_interface), intf->port);
+ ret = -EINVAL;
+ goto err_free_netdev;
+ }
+
+ ret = of_get_ethdev_address(ndev_dn, ndev);
+ if (ret) {
+ netdev_warn(ndev, "using random Ethernet MAC\n");
+ eth_hw_addr_random(ndev);
+ }
+
+ SET_NETDEV_DEV(ndev, dev);
+ intf->ops = &bcmasp_intf_ops;
+ ndev->netdev_ops = &bcmasp_netdev_ops;
+ ndev->ethtool_ops = &bcmasp_ethtool_ops;
+ intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
+ NETIF_MSG_PROBE |
+ NETIF_MSG_LINK);
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM;
+ ndev->hw_features |= ndev->features;
+ ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload);
+
+ return intf;
+
+err_free_netdev:
+ free_netdev(ndev);
+err:
+ return NULL;
+}
+
+void bcmasp_interface_destroy(struct bcmasp_intf *intf)
+{
+ if (intf->ndev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(intf->ndev);
+ if (of_phy_is_fixed_link(intf->ndev_dn))
+ of_phy_deregister_fixed_link(intf->ndev_dn);
+ free_netdev(intf->ndev);
+}
+
+static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
+{
+ struct net_device *ndev = intf->ndev;
+ u32 reg;
+
+ reg = umac_rl(intf, UMC_MPD_CTRL);
+ if (intf->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
+ reg |= UMC_MPD_CTRL_MPD_EN;
+ reg &= ~UMC_MPD_CTRL_PSW_EN;
+ if (intf->wolopts & WAKE_MAGICSECURE) {
+ /* Program the SecureOn password */
+ umac_wl(intf, get_unaligned_be16(&intf->sopass[0]),
+ UMC_PSW_MS);
+ umac_wl(intf, get_unaligned_be32(&intf->sopass[2]),
+ UMC_PSW_LS);
+ reg |= UMC_MPD_CTRL_PSW_EN;
+ }
+ umac_wl(intf, reg, UMC_MPD_CTRL);
+
+ if (intf->wolopts & WAKE_FILTER)
+ bcmasp_netfilt_suspend(intf);
+
+ /* UniMAC receive needs to be turned on */
+ umac_enable_set(intf, UMC_CMD_RX_EN, 1);
+
+ if (intf->parent->wol_irq > 0) {
+ wakeup_intr2_core_wl(intf->parent, 0xffffffff,
+ ASP_WAKEUP_INTR2_MASK_CLEAR);
+ }
+
+ netif_dbg(intf, wol, ndev, "entered WOL mode\n");
+}
+
+int bcmasp_interface_suspend(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+ struct net_device *dev = intf->ndev;
+ int ret = 0;
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_detach(dev);
+
+ bcmasp_netif_deinit(dev);
+
+ if (!intf->wolopts) {
+ ret = phy_suspend(dev->phydev);
+ if (ret)
+ goto out;
+
+ if (intf->internal_phy)
+ bcmasp_ephy_enable_set(intf, false);
+ else
+ bcmasp_rgmii_mode_en_set(intf, false);
+
+ /* If Wake-on-LAN is disabled, we can safely
+ * disable the network interface clocks.
+ */
+ bcmasp_core_clock_set_intf(intf, false);
+ }
+
+ if (device_may_wakeup(kdev) && intf->wolopts)
+ bcmasp_suspend_to_wol(intf);
+
+ clk_disable_unprepare(intf->parent->clk);
+
+ return ret;
+
+out:
+ bcmasp_netif_init(dev, false);
+ return ret;
+}
+
+static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
+{
+ u32 reg;
+
+ reg = umac_rl(intf, UMC_MPD_CTRL);
+ reg &= ~UMC_MPD_CTRL_MPD_EN;
+ umac_wl(intf, reg, UMC_MPD_CTRL);
+
+ if (intf->parent->wol_irq > 0) {
+ wakeup_intr2_core_wl(intf->parent, 0xffffffff,
+ ASP_WAKEUP_INTR2_MASK_SET);
+ }
+}
+
+int bcmasp_interface_resume(struct bcmasp_intf *intf)
+{
+ struct net_device *dev = intf->ndev;
+ int ret;
+
+ if (!netif_running(dev))
+ return 0;
+
+ ret = clk_prepare_enable(intf->parent->clk);
+ if (ret)
+ return ret;
+
+ ret = bcmasp_netif_init(dev, false);
+ if (ret)
+ goto out;
+
+ bcmasp_resume_from_wol(intf);
+
+ if (intf->eee.eee_enabled)
+ bcmasp_eee_enable_set(intf, true);
+
+ netif_device_attach(dev);
+
+ return 0;
+
+out:
+ clk_disable_unprepare(intf->parent->clk);
+ return ret;
+}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
new file mode 100644
index 000000000000..ad742612895f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BCMASP_INTF_DEFS_H
+#define __BCMASP_INTF_DEFS_H
+
+#define UMC_OFFSET(intf) \
+ ((((intf)->port) * 0x800) + 0xc000)
+#define UMC_CMD 0x008
+#define UMC_CMD_TX_EN BIT(0)
+#define UMC_CMD_RX_EN BIT(1)
+#define UMC_CMD_SPEED_SHIFT 0x2
+#define UMC_CMD_SPEED_MASK 0x3
+#define UMC_CMD_SPEED_10 0x0
+#define UMC_CMD_SPEED_100 0x1
+#define UMC_CMD_SPEED_1000 0x2
+#define UMC_CMD_SPEED_2500 0x3
+#define UMC_CMD_PROMISC BIT(4)
+#define UMC_CMD_PAD_EN BIT(5)
+#define UMC_CMD_CRC_FWD BIT(6)
+#define UMC_CMD_PAUSE_FWD BIT(7)
+#define UMC_CMD_RX_PAUSE_IGNORE BIT(8)
+#define UMC_CMD_TX_ADDR_INS BIT(9)
+#define UMC_CMD_HD_EN BIT(10)
+#define UMC_CMD_SW_RESET BIT(13)
+#define UMC_CMD_LCL_LOOP_EN BIT(15)
+#define UMC_CMD_AUTO_CONFIG BIT(22)
+#define UMC_CMD_CNTL_FRM_EN BIT(23)
+#define UMC_CMD_NO_LEN_CHK BIT(24)
+#define UMC_CMD_RMT_LOOP_EN BIT(25)
+#define UMC_CMD_PRBL_EN BIT(27)
+#define UMC_CMD_TX_PAUSE_IGNORE BIT(28)
+#define UMC_CMD_TX_RX_EN BIT(29)
+#define UMC_CMD_RUNT_FILTER_DIS BIT(30)
+#define UMC_MAC0 0x0c
+#define UMC_MAC1 0x10
+#define UMC_FRM_LEN 0x14
+#define UMC_EEE_CTRL 0x64
+#define EN_LPI_RX_PAUSE BIT(0)
+#define EN_LPI_TX_PFC BIT(1)
+#define EN_LPI_TX_PAUSE BIT(2)
+#define EEE_EN BIT(3)
+#define RX_FIFO_CHECK BIT(4)
+#define EEE_TX_CLK_DIS BIT(5)
+#define DIS_EEE_10M BIT(6)
+#define LP_IDLE_PREDICTION_MODE BIT(7)
+#define UMC_EEE_LPI_TIMER 0x68
+#define UMC_PAUSE_CNTRL 0x330
+#define UMC_TX_FLUSH 0x334
+#define UMC_GR64 0x400
+#define UMC_GR127 0x404
+#define UMC_GR255 0x408
+#define UMC_GR511 0x40c
+#define UMC_GR1023 0x410
+#define UMC_GR1518 0x414
+#define UMC_GRMGV 0x418
+#define UMC_GR2047 0x41c
+#define UMC_GR4095 0x420
+#define UMC_GR9216 0x424
+#define UMC_GRPKT 0x428
+#define UMC_GRBYT 0x42c
+#define UMC_GRMCA 0x430
+#define UMC_GRBCA 0x434
+#define UMC_GRFCS 0x438
+#define UMC_GRXCF 0x43c
+#define UMC_GRXPF 0x440
+#define UMC_GRXUO 0x444
+#define UMC_GRALN 0x448
+#define UMC_GRFLR 0x44c
+#define UMC_GRCDE 0x450
+#define UMC_GRFCR 0x454
+#define UMC_GROVR 0x458
+#define UMC_GRJBR 0x45c
+#define UMC_GRMTUE 0x460
+#define UMC_GRPOK 0x464
+#define UMC_GRUC 0x468
+#define UMC_GRPPP 0x46c
+#define UMC_GRMCRC 0x470
+#define UMC_TR64 0x480
+#define UMC_TR127 0x484
+#define UMC_TR255 0x488
+#define UMC_TR511 0x48c
+#define UMC_TR1023 0x490
+#define UMC_TR1518 0x494
+#define UMC_TRMGV 0x498
+#define UMC_TR2047 0x49c
+#define UMC_TR4095 0x4a0
+#define UMC_TR9216 0x4a4
+#define UMC_GTPKT 0x4a8
+#define UMC_GTMCA 0x4ac
+#define UMC_GTBCA 0x4b0
+#define UMC_GTXPF 0x4b4
+#define UMC_GTXCF 0x4b8
+#define UMC_GTFCS 0x4bc
+#define UMC_GTOVR 0x4c0
+#define UMC_GTDRF 0x4c4
+#define UMC_GTEDF 0x4c8
+#define UMC_GTSCL 0x4cc
+#define UMC_GTMCL 0x4d0
+#define UMC_GTLCL 0x4d4
+#define UMC_GTXCL 0x4d8
+#define UMC_GTFRG 0x4dc
+#define UMC_GTNCL 0x4e0
+#define UMC_GTJBR 0x4e4
+#define UMC_GTBYT 0x4e8
+#define UMC_GTPOK 0x4ec
+#define UMC_GTUC 0x4f0
+#define UMC_RRPKT 0x500
+#define UMC_RRUND 0x504
+#define UMC_RRFRG 0x508
+#define UMC_RRBYT 0x50c
+#define UMC_MIB_CNTRL 0x580
+#define UMC_MIB_CNTRL_RX_CNT_RST BIT(0)
+#define UMC_MIB_CNTRL_RUNT_CNT_RST BIT(1)
+#define UMC_MIB_CNTRL_TX_CNT_RST BIT(2)
+#define UMC_RX_MAX_PKT_SZ 0x608
+#define UMC_MPD_CTRL 0x620
+#define UMC_MPD_CTRL_MPD_EN BIT(0)
+#define UMC_MPD_CTRL_PSW_EN BIT(27)
+#define UMC_PSW_MS 0x624
+#define UMC_PSW_LS 0x628
+
+#define UMAC2FB_OFFSET_2_1 0x9f044
+#define UMAC2FB_OFFSET 0x9f03c
+#define UMAC2FB_CFG 0x0
+#define UMAC2FB_CFG_OPUT_EN BIT(0)
+#define UMAC2FB_CFG_VLAN_EN BIT(1)
+#define UMAC2FB_CFG_SNAP_EN BIT(2)
+#define UMAC2FB_CFG_BCM_TG_EN BIT(3)
+#define UMAC2FB_CFG_IPUT_EN BIT(4)
+#define UMAC2FB_CFG_CHID_SHIFT 8
+#define UMAC2FB_CFG_OK_SEND_SHIFT 24
+#define UMAC2FB_CFG_DEFAULT_EN \
+ (UMAC2FB_CFG_OPUT_EN | UMAC2FB_CFG_VLAN_EN \
+ | UMAC2FB_CFG_SNAP_EN | UMAC2FB_CFG_IPUT_EN)
+
+#define RGMII_OFFSET(intf) \
+ ((((intf)->port) * 0x100) + 0xd000)
+#define RGMII_EPHY_CNTRL 0x00
+#define RGMII_EPHY_CFG_IDDQ_BIAS BIT(0)
+#define RGMII_EPHY_CFG_EXT_PWRDOWN BIT(1)
+#define RGMII_EPHY_CFG_FORCE_DLL_EN BIT(2)
+#define RGMII_EPHY_CFG_IDDQ_GLOBAL BIT(3)
+#define RGMII_EPHY_CK25_DIS BIT(4)
+#define RGMII_EPHY_RESET BIT(7)
+#define RGMII_OOB_CNTRL 0x0c
+#define RGMII_LINK BIT(4)
+#define RGMII_OOB_DIS BIT(5)
+#define RGMII_MODE_EN BIT(6)
+#define RGMII_ID_MODE_DIS BIT(16)
+
+#define RGMII_PORT_CNTRL 0x60
+#define RGMII_PORT_MODE_EPHY 0
+#define RGMII_PORT_MODE_GPHY 1
+#define RGMII_PORT_MODE_EXT_EPHY 2
+#define RGMII_PORT_MODE_EXT_GPHY 3
+#define RGMII_PORT_MODE_EXT_RVMII 4
+#define RGMII_PORT_MODE_MASK GENMASK(2, 0)
+
+#define RGMII_SYS_LED_CNTRL 0x74
+#define RGMII_SYS_LED_CNTRL_LINK_OVRD BIT(15)
+
+#define TX_SPB_DMA_OFFSET(intf) \
+ ((((intf)->channel) * 0x30) + 0x48180)
+#define TX_SPB_DMA_READ 0x00
+#define TX_SPB_DMA_BASE 0x08
+#define TX_SPB_DMA_END 0x10
+#define TX_SPB_DMA_VALID 0x18
+#define TX_SPB_DMA_FIFO_CTRL 0x20
+#define TX_SPB_DMA_FIFO_FLUSH BIT(0)
+#define TX_SPB_DMA_FIFO_STATUS 0x24
+
+#define TX_SPB_CTRL_OFFSET(intf) \
+ ((((intf)->channel) * 0x68) + 0x49340)
+#define TX_SPB_CTRL_ENABLE 0x0
+#define TX_SPB_CTRL_ENABLE_EN BIT(0)
+#define TX_SPB_CTRL_XF_CTRL2 0x20
+#define TX_SPB_CTRL_XF_BID_SHIFT 16
+
+#define TX_SPB_TOP_OFFSET(intf) \
+ ((((intf)->channel) * 0x1c) + 0x4a0e0)
+#define TX_SPB_TOP_BLKOUT 0x0
+#define TX_SPB_TOP_SPRE_BW_CTRL 0x4
+
+#define TX_EPKT_C_OFFSET(intf) \
+ ((((intf)->channel) * 0x120) + 0x40900)
+#define TX_EPKT_C_CFG_MISC 0x0
+#define TX_EPKT_C_CFG_MISC_EN BIT(0)
+#define TX_EPKT_C_CFG_MISC_PT BIT(1)
+#define TX_EPKT_C_CFG_MISC_PS_SHIFT 14
+#define TX_EPKT_C_CFG_MISC_FD_SHIFT 20
+
+#define TX_PAUSE_CTRL_OFFSET(intf) \
+ ((((intf)->channel * 0xc) + 0x49a20))
+#define TX_PAUSE_MAP_VECTOR 0x8
+
+#define RX_EDPKT_DMA_OFFSET(intf) \
+ ((((intf)->channel) * 0x38) + 0x9ca00)
+#define RX_EDPKT_DMA_WRITE 0x00
+#define RX_EDPKT_DMA_READ 0x08
+#define RX_EDPKT_DMA_BASE 0x10
+#define RX_EDPKT_DMA_END 0x18
+#define RX_EDPKT_DMA_VALID 0x20
+#define RX_EDPKT_DMA_FULLNESS 0x28
+#define RX_EDPKT_DMA_MIN_THRES 0x2c
+#define RX_EDPKT_DMA_CH_XONOFF 0x30
+
+#define RX_EDPKT_CFG_OFFSET(intf) \
+ ((((intf)->channel) * 0x70) + 0x9c600)
+#define RX_EDPKT_CFG_CFG0 0x0
+#define RX_EDPKT_CFG_CFG0_DBUF_SHIFT 9
+#define RX_EDPKT_CFG_CFG0_RBUF 0x0
+#define RX_EDPKT_CFG_CFG0_RBUF_4K 0x1
+#define RX_EDPKT_CFG_CFG0_BUF_4K 0x2
+/* EFRM STUFF, 0 = no byte stuff, 1 = two byte stuff */
+#define RX_EDPKT_CFG_CFG0_EFRM_STUF BIT(11)
+#define RX_EDPKT_CFG_CFG0_BALN_SHIFT 12
+#define RX_EDPKT_CFG_CFG0_NO_ALN 0
+#define RX_EDPKT_CFG_CFG0_4_ALN 2
+#define RX_EDPKT_CFG_CFG0_64_ALN 6
+#define RX_EDPKT_RING_BUFFER_WRITE 0x38
+#define RX_EDPKT_RING_BUFFER_READ 0x40
+#define RX_EDPKT_RING_BUFFER_BASE 0x48
+#define RX_EDPKT_RING_BUFFER_END 0x50
+#define RX_EDPKT_RING_BUFFER_VALID 0x58
+#define RX_EDPKT_CFG_ENABLE 0x6c
+#define RX_EDPKT_CFG_ENABLE_EN BIT(0)
+
+#define RX_SPB_DMA_OFFSET(intf) \
+ ((((intf)->channel) * 0x30) + 0xa0000)
+#define RX_SPB_DMA_READ 0x00
+#define RX_SPB_DMA_BASE 0x08
+#define RX_SPB_DMA_END 0x10
+#define RX_SPB_DMA_VALID 0x18
+#define RX_SPB_DMA_FIFO_CTRL 0x20
+#define RX_SPB_DMA_FIFO_FLUSH BIT(0)
+#define RX_SPB_DMA_FIFO_STATUS 0x24
+
+#define RX_SPB_CTRL_OFFSET(intf) \
+ ((((intf)->channel - 6) * 0x68) + 0xa1000)
+#define RX_SPB_CTRL_ENABLE 0x00
+#define RX_SPB_CTRL_ENABLE_EN BIT(0)
+
+#define RX_PAUSE_CTRL_OFFSET(intf) \
+ ((((intf)->channel - 6) * 0x4) + 0xa1138)
+#define RX_PAUSE_MAP_VECTOR 0x00
+
+#define RX_SPB_TOP_CTRL_OFFSET(intf) \
+ ((((intf)->channel - 6) * 0x14) + 0xa2000)
+#define RX_SPB_TOP_BLKOUT 0x00
+
+#define NUM_4K_BUFFERS 32
+#define RING_BUFFER_SIZE (PAGE_SIZE * NUM_4K_BUFFERS)
+
+#define DESC_RING_COUNT (64 * NUM_4K_BUFFERS)
+#define DESC_SIZE 16
+#define DESC_RING_SIZE (DESC_RING_COUNT * DESC_SIZE)
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 2cf96892e565..a741070f1f9a 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1940,7 +1940,6 @@ static struct platform_driver bcm63xx_enet_driver = {
.remove = bcm_enet_remove,
.driver = {
.name = "bcm63xx_enet",
- .owner = THIS_MODULE,
},
};
@@ -2761,7 +2760,6 @@ static struct platform_driver bcm63xx_enetsw_driver = {
.remove = bcm_enetsw_remove,
.driver = {
.name = "bcm63xx_enetsw",
- .owner = THIS_MODULE,
},
};
@@ -2791,7 +2789,6 @@ struct platform_driver bcm63xx_enet_shared_driver = {
.probe = bcm_enet_shared_probe,
.driver = {
.name = "bcm63xx_enet_shared",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 52ee3751187a..448a1b90de5e 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1450,7 +1450,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
if (IS_ERR(phy_dev)) {
dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
- return -ENODEV;
+ return PTR_ERR(phy_dev);
}
err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 542c69822649..8e04552d2216 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -890,7 +890,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
(struct eth_classify_rules_ramrod_data *)(raw->rdata);
int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
- bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+ bool add = cmd == BNX2X_VLAN_MAC_ADD;
unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
@@ -1075,7 +1075,7 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
int rule_cnt = rule_idx + 1;
union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
- bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+ bool add = cmd == BNX2X_VLAN_MAC_ADD;
u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
/* Reset the ramrod data buffer for the first rule */
@@ -1125,7 +1125,7 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
int rule_cnt = rule_idx + 1;
union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
- bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+ bool add = cmd == BNX2X_VLAN_MAC_ADD;
u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
u16 inner_mac;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 1eb490c48c52..5cc0dbe12132 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -54,7 +54,7 @@
#include <net/pkt_cls.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
#include <linux/align.h>
#include <net/netdev_queues.h>
@@ -293,6 +293,60 @@ static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
BNXT_DB_CQ(db, idx);
}
+static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
+{
+ if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
+ return;
+
+ if (BNXT_PF(bp))
+ queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
+ else
+ schedule_delayed_work(&bp->fw_reset_task, delay);
+}
+
+static void __bnxt_queue_sp_work(struct bnxt *bp)
+{
+ if (BNXT_PF(bp))
+ queue_work(bnxt_pf_wq, &bp->sp_task);
+ else
+ schedule_work(&bp->sp_task);
+}
+
+static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
+{
+ set_bit(event, &bp->sp_event);
+ __bnxt_queue_sp_work(bp);
+}
+
+static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ if (!rxr->bnapi->in_reset) {
+ rxr->bnapi->in_reset = true;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+ else
+ set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
+ __bnxt_queue_sp_work(bp);
+ }
+ rxr->rx_next_cons = 0xffff;
+}
+
+void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ int idx)
+{
+ struct bnxt_napi *bnapi = txr->bnapi;
+
+ if (bnapi->tx_fault)
+ return;
+
+ netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_pkts:%d cons:%u prod:%u i:%d)",
+ txr->txq_index, bnapi->tx_pkts,
+ txr->tx_cons, txr->tx_prod, idx);
+ WARN_ON_ONCE(1);
+ bnapi->tx_fault = 1;
+ bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
+}
+
const u16 bnxt_lhint_arr[] = {
TX_BD_FLAGS_LHINT_512_AND_SMALLER,
TX_BD_FLAGS_LHINT_512_TO_1023,
@@ -653,6 +707,11 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
skb = tx_buf->skb;
tx_buf->skb = NULL;
+ if (unlikely(!skb)) {
+ bnxt_sched_reset_txr(bp, txr, i);
+ return;
+ }
+
tx_bytes += skb->len;
if (tx_buf->is_push) {
@@ -686,7 +745,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
next_tx_int:
cons = NEXT_TX(cons);
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
}
bnapi->tx_pkts = 0;
@@ -702,7 +761,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
unsigned int *offset,
gfp_t gfp)
{
- struct device *dev = &bp->pdev->dev;
struct page *page;
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
@@ -715,12 +773,7 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
if (!page)
return NULL;
- *mapping = dma_map_page_attrs(dev, page, *offset, BNXT_RX_PAGE_SIZE,
- bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
- if (dma_mapping_error(dev, *mapping)) {
- page_pool_recycle_direct(rxr->page_pool, page);
- return NULL;
- }
+ *mapping = page_pool_get_dma_addr(page) + *offset;
return page;
}
@@ -818,48 +871,15 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
struct rx_bd *rxbd =
&rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
struct bnxt_sw_rx_agg_bd *rx_agg_buf;
- struct pci_dev *pdev = bp->pdev;
struct page *page;
dma_addr_t mapping;
u16 sw_prod = rxr->rx_sw_agg_prod;
unsigned int offset = 0;
- if (BNXT_RX_PAGE_MODE(bp)) {
- page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
-
- if (!page)
- return -ENOMEM;
-
- } else {
- if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
- page = rxr->rx_page;
- if (!page) {
- page = alloc_page(gfp);
- if (!page)
- return -ENOMEM;
- rxr->rx_page = page;
- rxr->rx_page_offset = 0;
- }
- offset = rxr->rx_page_offset;
- rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
- if (rxr->rx_page_offset == PAGE_SIZE)
- rxr->rx_page = NULL;
- else
- get_page(page);
- } else {
- page = alloc_page(gfp);
- if (!page)
- return -ENOMEM;
- }
+ page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
- mapping = dma_map_page_attrs(&pdev->dev, page, offset,
- BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
- DMA_ATTR_WEAK_ORDERING);
- if (dma_mapping_error(&pdev->dev, mapping)) {
- __free_page(page);
- return -EIO;
- }
- }
+ if (!page)
+ return -ENOMEM;
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
@@ -972,9 +992,9 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
- dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
- bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
- skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
+ bp->rx_dir);
+ skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
if (!skb) {
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
@@ -1006,8 +1026,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
- dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
- bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
+ bp->rx_dir);
if (unlikely(!payload))
payload = eth_get_headlen(bp->dev, data_ptr, len);
@@ -1049,7 +1069,7 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return NULL;
}
- skb = build_skb(data, bp->rx_buf_size);
+ skb = napi_build_skb(data, bp->rx_buf_size);
dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (!skb) {
@@ -1123,9 +1143,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
return 0;
}
- dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
- bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
+ dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
+ bp->rx_dir);
total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
@@ -1145,6 +1164,7 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
agg_bufs, tpa, NULL);
if (!total_frag_len) {
+ skb_mark_for_recycle(skb);
dev_kfree_skb(skb);
return NULL;
}
@@ -1244,38 +1264,6 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
return 0;
}
-static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
-{
- if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
- return;
-
- if (BNXT_PF(bp))
- queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
- else
- schedule_delayed_work(&bp->fw_reset_task, delay);
-}
-
-static void bnxt_queue_sp_work(struct bnxt *bp)
-{
- if (BNXT_PF(bp))
- queue_work(bnxt_pf_wq, &bp->sp_task);
- else
- schedule_work(&bp->sp_task);
-}
-
-static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
-{
- if (!rxr->bnapi->in_reset) {
- rxr->bnapi->in_reset = true;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
- else
- set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
- rxr->rx_next_cons = 0xffff;
-}
-
static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
{
struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
@@ -1330,7 +1318,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
cons, rxr->rx_next_cons,
TPA_START_ERROR_CODE(tpa_start1));
- bnxt_sched_reset(bp, rxr);
+ bnxt_sched_reset_rxr(bp, rxr);
return;
}
/* Store cfa_code in tpa_info to use in tpa_end
@@ -1689,7 +1677,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
tpa_info->data_ptr = new_data + bp->rx_offset;
tpa_info->mapping = new_mapping;
- skb = build_skb(data, bp->rx_buf_size);
+ skb = napi_build_skb(data, bp->rx_buf_size);
dma_unmap_single_attrs(&bp->pdev->dev, mapping,
bp->rx_buf_use_size, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
@@ -1767,6 +1755,7 @@ static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
return;
}
skb_record_rx_queue(skb, bnapi->index);
+ skb_mark_for_recycle(skb);
napi_gro_receive(&bnapi->napi, skb);
}
@@ -1854,7 +1843,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (rxr->rx_next_cons != 0xffff)
netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
cons, rxr->rx_next_cons);
- bnxt_sched_reset(bp, rxr);
+ bnxt_sched_reset_rxr(bp, rxr);
if (rc1)
return rc1;
goto next_rx_no_prod_no_len;
@@ -1892,7 +1881,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
!(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
netdev_warn_once(bp->dev, "RX buffer error %x\n",
rx_err);
- bnxt_sched_reset(bp, rxr);
+ bnxt_sched_reset_rxr(bp, rxr);
}
}
goto next_rx_no_len;
@@ -2339,7 +2328,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
goto async_event_process_exit;
}
rxr = bp->bnapi[grp_idx]->rx_ring;
- bnxt_sched_reset(bp, rxr);
+ bnxt_sched_reset_rxr(bp, rxr);
goto async_event_process_exit;
}
case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
@@ -2394,7 +2383,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
default:
goto async_event_process_exit;
}
- bnxt_queue_sp_work(bp);
+ __bnxt_queue_sp_work(bp);
async_event_process_exit:
return 0;
}
@@ -2423,8 +2412,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
}
set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
- set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
+ bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
break;
case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
@@ -2582,7 +2570,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
int budget)
{
- if (bnapi->tx_pkts)
+ if (bnapi->tx_pkts && !bnapi->tx_fault)
bnapi->tx_int(bp, bnapi, budget);
if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
@@ -2952,10 +2940,6 @@ skip_rx_tpa_free:
rx_buf->data = NULL;
if (BNXT_RX_PAGE_MODE(bp)) {
- mapping -= bp->rx_dma_offset;
- dma_unmap_page_attrs(&pdev->dev, mapping,
- BNXT_RX_PAGE_SIZE, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
page_pool_recycle_direct(rxr->page_pool, data);
} else {
dma_unmap_single_attrs(&pdev->dev, mapping,
@@ -2976,30 +2960,13 @@ skip_rx_buf_free:
if (!page)
continue;
- if (BNXT_RX_PAGE_MODE(bp)) {
- dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
- BNXT_RX_PAGE_SIZE, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
- rx_agg_buf->page = NULL;
- __clear_bit(i, rxr->rx_agg_bmap);
-
- page_pool_recycle_direct(rxr->page_pool, page);
- } else {
- dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
- BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
- DMA_ATTR_WEAK_ORDERING);
- rx_agg_buf->page = NULL;
- __clear_bit(i, rxr->rx_agg_bmap);
+ rx_agg_buf->page = NULL;
+ __clear_bit(i, rxr->rx_agg_bmap);
- __free_page(page);
- }
+ page_pool_recycle_direct(rxr->page_pool, page);
}
skip_rx_agg_free:
- if (rxr->rx_page) {
- __free_page(rxr->rx_page);
- rxr->rx_page = NULL;
- }
map = rxr->rx_tpa_idx_map;
if (map)
memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
@@ -3218,11 +3185,15 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
{
struct page_pool_params pp = { 0 };
- pp.pool_size = bp->rx_ring_size;
+ pp.pool_size = bp->rx_agg_ring_size;
+ if (BNXT_RX_PAGE_MODE(bp))
+ pp.pool_size += bp->rx_ring_size;
pp.nid = dev_to_node(&bp->pdev->dev);
pp.napi = &rxr->bnapi->napi;
pp.dev = &bp->pdev->dev;
- pp.dma_dir = DMA_BIDIRECTIONAL;
+ pp.dma_dir = bp->rx_dir;
+ pp.max_len = PAGE_SIZE;
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
pp.flags |= PP_FLAG_PAGE_FRAG;
@@ -9422,10 +9393,16 @@ static void bnxt_disable_napi(struct bnxt *bp)
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
- napi_disable(&bp->bnapi[i]->napi);
- if (bp->bnapi[i]->rx_ring)
+ cpr = &bnapi->cp_ring;
+ if (bnapi->tx_fault)
+ cpr->sw_stats.tx.tx_resets++;
+ if (bnapi->in_reset)
+ cpr->sw_stats.rx.rx_resets++;
+ napi_disable(&bnapi->napi);
+ if (bnapi->rx_ring)
cancel_work_sync(&cpr->dim.work);
}
}
@@ -9439,9 +9416,9 @@ static void bnxt_enable_napi(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr;
+ bnapi->tx_fault = 0;
+
cpr = &bnapi->cp_ring;
- if (bnapi->in_reset)
- cpr->sw_stats.rx.rx_resets++;
bnapi->in_reset = false;
bnapi->tx_pkts = 0;
@@ -10710,8 +10687,10 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bnxt_free_skbs(bp);
/* Save ring stats before shutdown */
- if (bp->bnapi && irq_re_init)
+ if (bp->bnapi && irq_re_init) {
bnxt_get_ring_stats(bp, &bp->net_stats_prev);
+ bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
+ }
if (irq_re_init) {
bnxt_free_irq(bp);
bnxt_del_napi(bp);
@@ -10960,6 +10939,35 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
clear_bit(BNXT_STATE_READ_STATS, &bp->state);
}
+static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
+ struct bnxt_total_ring_err_stats *stats,
+ struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_sw_stats *sw_stats = &cpr->sw_stats;
+ u64 *hw_stats = cpr->stats.sw_stats;
+
+ stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
+ stats->rx_total_resets += sw_stats->rx.rx_resets;
+ stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
+ stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
+ stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
+ stats->rx_total_ring_discards +=
+ BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
+ stats->tx_total_resets += sw_stats->tx.tx_resets;
+ stats->tx_total_ring_discards +=
+ BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
+ stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
+}
+
+void bnxt_get_ring_err_stats(struct bnxt *bp,
+ struct bnxt_total_ring_err_stats *stats)
+{
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++)
+ bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
+}
+
static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
{
struct net_device *dev = bp->dev;
@@ -11048,8 +11056,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
if (mask != vnic->rx_mask || uc_update || mc_update) {
vnic->rx_mask = mask;
- set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
+ bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
}
}
@@ -11614,8 +11621,7 @@ static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
struct bnxt *bp = netdev_priv(dev);
netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
- set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
+ bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
}
static void bnxt_fw_health_check(struct bnxt *bp)
@@ -11652,8 +11658,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
return;
fw_reset:
- set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
+ bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
}
static void bnxt_timer(struct timer_list *t)
@@ -11670,21 +11675,15 @@ static void bnxt_timer(struct timer_list *t)
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
bnxt_fw_health_check(bp);
- if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) {
- set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
+ if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
+ bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
- if (bnxt_tc_flower_enabled(bp)) {
- set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
+ if (bnxt_tc_flower_enabled(bp))
+ bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
#ifdef CONFIG_RFS_ACCEL
- if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
- set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
+ if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
+ bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
#endif /*CONFIG_RFS_ACCEL*/
if (bp->link_info.phy_retry) {
@@ -11692,21 +11691,17 @@ static void bnxt_timer(struct timer_list *t)
bp->link_info.phy_retry = false;
netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
} else {
- set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
+ bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
}
}
- if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) {
- set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
+ if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
+ bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
- netif_carrier_ok(dev)) {
- set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
- }
+ netif_carrier_ok(dev))
+ bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
+
bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
@@ -12985,8 +12980,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
bp->ntp_fltr_count++;
spin_unlock_bh(&bp->ntp_fltr_lock);
- set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
- bnxt_queue_sp_work(bp);
+ bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
return new_fltr->sw_id;
@@ -13118,9 +13112,6 @@ static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue;
- if (nla_len(attr) < sizeof(mode))
- return -EINVAL;
-
mode = nla_get_u16(attr);
if (mode == bp->br_mode)
break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index bb95c3dc5270..84cbcfa61bc1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -919,9 +919,6 @@ struct bnxt_rx_ring_info {
unsigned long *rx_agg_bmap;
u16 rx_agg_bmap_size;
- struct page *rx_page;
- unsigned int rx_page_offset;
-
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
@@ -942,15 +939,32 @@ struct bnxt_rx_sw_stats {
u64 rx_netpoll_discards;
};
+struct bnxt_tx_sw_stats {
+ u64 tx_resets;
+};
+
struct bnxt_cmn_sw_stats {
u64 missed_irqs;
};
struct bnxt_sw_stats {
struct bnxt_rx_sw_stats rx;
+ struct bnxt_tx_sw_stats tx;
struct bnxt_cmn_sw_stats cmn;
};
+struct bnxt_total_ring_err_stats {
+ u64 rx_total_l4_csum_errors;
+ u64 rx_total_resets;
+ u64 rx_total_buf_errors;
+ u64 rx_total_oom_discards;
+ u64 rx_total_netpoll_discards;
+ u64 rx_total_ring_discards;
+ u64 tx_total_resets;
+ u64 tx_total_ring_discards;
+ u64 total_missed_irqs;
+};
+
struct bnxt_stats_mem {
u64 *sw_stats;
u64 *hw_masks;
@@ -1008,6 +1022,7 @@ struct bnxt_napi {
int budget);
int tx_pkts;
u8 events;
+ u8 tx_fault:1;
u32 flags;
#define BNXT_NAPI_FLAG_XDP 0x1
@@ -2020,6 +2035,8 @@ struct bnxt {
u8 pri2cos_idx[8];
u8 pri2cos_valid;
+ struct bnxt_total_ring_err_stats ring_err_stats_prev;
+
u16 hwrm_max_req_len;
u16 hwrm_max_ext_req_len;
unsigned int hwrm_cmd_timeout;
@@ -2329,6 +2346,8 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
+void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ int idx);
void bnxt_report_link(struct bnxt *bp);
int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
int bnxt_hwrm_set_pause(struct bnxt *);
@@ -2344,6 +2363,8 @@ int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
void bnxt_reenable_sriov(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
+void bnxt_get_ring_err_stats(struct bnxt *bp,
+ struct bnxt_total_ring_err_stats *stats);
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
u32 *reg_buf);
void bnxt_fw_exception(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index caab3d626a2a..63e067038385 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -98,7 +98,6 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
{
struct hwrm_queue_cos2bw_cfg_input *req;
struct bnxt_cos2bw_cfg cos2bw;
- void *data;
int rc, i;
rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_CFG);
@@ -129,11 +128,15 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
cpu_to_le32((ets->tc_tx_bw[i] * 100) |
BW_VALUE_UNIT_PERCENT1_100);
}
- data = &req->unused_0 + qidx * (sizeof(cos2bw) - 4);
- memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
if (qidx == 0) {
req->queue_id0 = cos2bw.queue_id;
- req->unused_0 = 0;
+ req->queue_id0_min_bw = cos2bw.min_bw;
+ req->queue_id0_max_bw = cos2bw.max_bw;
+ req->queue_id0_tsa_assign = cos2bw.tsa;
+ req->queue_id0_pri_lvl = cos2bw.pri_lvl;
+ req->queue_id0_bw_weight = cos2bw.bw_weight;
+ } else {
+ memcpy(&req->cfg[i - 1], &cos2bw.cfg, sizeof(cos2bw.cfg));
}
}
return hwrm_req_send(bp, req);
@@ -144,7 +147,6 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
struct hwrm_queue_cos2bw_qcfg_output *resp;
struct hwrm_queue_cos2bw_qcfg_input *req;
struct bnxt_cos2bw_cfg cos2bw;
- void *data;
int rc, i;
rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_QCFG);
@@ -158,13 +160,19 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
return rc;
}
- data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
- for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw.cfg)) {
+ for (i = 0; i < bp->max_tc; i++) {
int tc;
- memcpy(&cos2bw.cfg, data, sizeof(cos2bw.cfg));
- if (i == 0)
+ if (i == 0) {
cos2bw.queue_id = resp->queue_id0;
+ cos2bw.min_bw = resp->queue_id0_min_bw;
+ cos2bw.max_bw = resp->queue_id0_max_bw;
+ cos2bw.tsa = resp->queue_id0_tsa_assign;
+ cos2bw.pri_lvl = resp->queue_id0_pri_lvl;
+ cos2bw.bw_weight = resp->queue_id0_bw_weight;
+ } else {
+ memcpy(&cos2bw.cfg, &resp->cfg[i - 1], sizeof(cos2bw.cfg));
+ }
tc = bnxt_queue_to_tc(bp, cos2bw.queue_id);
if (tc < 0)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
index 716742522161..5b2a6f678244 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
@@ -27,11 +27,12 @@ struct bnxt_cos2bw_cfg {
u8 queue_id;
__le32 min_bw;
__le32 max_bw;
-#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
u8 tsa;
u8 pri_lvl;
u8 bw_weight;
);
+/* for min_bw / max_bw */
+#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
u8 unused;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8fd5071d8b09..547247d98eba 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -339,13 +339,16 @@ enum {
RX_NETPOLL_DISCARDS,
};
-static struct {
- u64 counter;
- char string[ETH_GSTRING_LEN];
-} bnxt_sw_func_stats[] = {
- {0, "rx_total_discard_pkts"},
- {0, "tx_total_discard_pkts"},
- {0, "rx_total_netpoll_discards"},
+static const char *const bnxt_ring_err_stats_arr[] = {
+ "rx_total_l4_csum_errors",
+ "rx_total_resets",
+ "rx_total_buf_errors",
+ "rx_total_oom_discards",
+ "rx_total_netpoll_discards",
+ "rx_total_ring_discards",
+ "tx_total_resets",
+ "tx_total_ring_discards",
+ "total_missed_irqs",
};
#define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str)
@@ -495,7 +498,7 @@ static const struct {
BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
};
-#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
+#define BNXT_NUM_RING_ERR_STATS ARRAY_SIZE(bnxt_ring_err_stats_arr)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_STATS_PRI \
(ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \
@@ -532,7 +535,7 @@ static int bnxt_get_num_stats(struct bnxt *bp)
{
int num_stats = bnxt_get_num_ring_stats(bp);
- num_stats += BNXT_NUM_SW_FUNC_STATS;
+ num_stats += BNXT_NUM_RING_ERR_STATS;
if (bp->flags & BNXT_FLAG_PORT_STATS)
num_stats += BNXT_NUM_PORT_STATS;
@@ -583,18 +586,17 @@ static bool is_tx_ring(struct bnxt *bp, int ring_num)
static void bnxt_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *buf)
{
- u32 i, j = 0;
+ struct bnxt_total_ring_err_stats ring_err_stats = {0};
struct bnxt *bp = netdev_priv(dev);
+ u64 *curr, *prev;
u32 tpa_stats;
+ u32 i, j = 0;
if (!bp->bnapi) {
- j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
+ j += bnxt_get_num_ring_stats(bp);
goto skip_ring_stats;
}
- for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
- bnxt_sw_func_stats[i].counter = 0;
-
tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
@@ -631,19 +633,16 @@ skip_tpa_ring_stats:
sw = (u64 *)&cpr->sw_stats.cmn;
for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
buf[j] = sw[k];
-
- bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
- BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts);
- bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
- BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts);
- bnxt_sw_func_stats[RX_NETPOLL_DISCARDS].counter +=
- cpr->sw_stats.rx.rx_netpoll_discards;
}
- for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
- buf[j] = bnxt_sw_func_stats[i].counter;
+ bnxt_get_ring_err_stats(bp, &ring_err_stats);
skip_ring_stats:
+ curr = &ring_err_stats.rx_total_l4_csum_errors;
+ prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors;
+ for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++)
+ buf[j] = *curr + *prev;
+
if (bp->flags & BNXT_FLAG_PORT_STATS) {
u64 *port_stats = bp->port_stats.sw_stats;
@@ -745,8 +744,8 @@ skip_tpa_stats:
buf += ETH_GSTRING_LEN;
}
}
- for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
- strcpy(buf, bnxt_sw_func_stats[i].string);
+ for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) {
+ strscpy(buf, bnxt_ring_err_stats_arr[i], ETH_GSTRING_LEN);
buf += ETH_GSTRING_LEN;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index b31de4cf6534..f178ed9899a9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -5739,286 +5739,48 @@ struct hwrm_queue_cos2bw_qcfg_output {
#define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id0_pri_lvl;
u8 queue_id0_bw_weight;
- u8 queue_id1;
- __le32 queue_id1_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id1_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id1_pri_lvl;
- u8 queue_id1_bw_weight;
- u8 queue_id2;
- __le32 queue_id2_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id2_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id2_pri_lvl;
- u8 queue_id2_bw_weight;
- u8 queue_id3;
- __le32 queue_id3_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id3_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id3_pri_lvl;
- u8 queue_id3_bw_weight;
- u8 queue_id4;
- __le32 queue_id4_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id4_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id4_pri_lvl;
- u8 queue_id4_bw_weight;
- u8 queue_id5;
- __le32 queue_id5_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id5_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id5_pri_lvl;
- u8 queue_id5_bw_weight;
- u8 queue_id6;
- __le32 queue_id6_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id6_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id6_pri_lvl;
- u8 queue_id6_bw_weight;
- u8 queue_id7;
- __le32 queue_id7_min_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id7_max_bw;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id7_pri_lvl;
- u8 queue_id7_bw_weight;
+ struct {
+ u8 queue_id;
+ __le32 queue_id_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id_pri_lvl;
+ u8 queue_id_bw_weight;
+ } __packed cfg[7];
u8 unused_2[4];
u8 valid;
};
@@ -6082,286 +5844,48 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id0_pri_lvl;
u8 queue_id0_bw_weight;
- u8 queue_id1;
- __le32 queue_id1_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id1_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id1_pri_lvl;
- u8 queue_id1_bw_weight;
- u8 queue_id2;
- __le32 queue_id2_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id2_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id2_pri_lvl;
- u8 queue_id2_bw_weight;
- u8 queue_id3;
- __le32 queue_id3_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id3_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id3_pri_lvl;
- u8 queue_id3_bw_weight;
- u8 queue_id4;
- __le32 queue_id4_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id4_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id4_pri_lvl;
- u8 queue_id4_bw_weight;
- u8 queue_id5;
- __le32 queue_id5_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id5_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id5_pri_lvl;
- u8 queue_id5_bw_weight;
- u8 queue_id6;
- __le32 queue_id6_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id6_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id6_pri_lvl;
- u8 queue_id6_bw_weight;
- u8 queue_id7;
- __le32 queue_id7_min_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
- __le32 queue_id7_max_bw;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
- u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
- u8 queue_id7_pri_lvl;
- u8 queue_id7_bw_weight;
+ struct {
+ u8 queue_id;
+ __le32 queue_id_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id_pri_lvl;
+ u8 queue_id_bw_weight;
+ } __packed cfg[7];
u8 unused_1[5];
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index d8afcf8d6b30..38d89d80b4a9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -373,9 +373,9 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
struct flow_dissector *dissector = rule->match.dissector;
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
- if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
- (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
- netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x\n",
+ if ((dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
+ (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
+ netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%llx\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index fb43232310b2..96f5ca778c67 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -15,7 +15,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/filter.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_xdp.h"
@@ -153,6 +153,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
tx_buf->action = 0;
tx_buf->xdpf = NULL;
} else if (tx_buf->action == XDP_TX) {
+ tx_buf->action = 0;
rx_doorbell_needed = true;
last_tx_cons = tx_cons;
@@ -162,6 +163,9 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
tx_buf = &txr->tx_buf_ring[tx_cons];
page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
}
+ } else {
+ bnxt_sched_reset_txr(bp, txr, i);
+ return;
}
tx_cons = NEXT_TX(tx_cons);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 2b5761ad2f92..24bade875ca6 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2077,12 +2077,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock(&ring->lock);
if (ring->free_bds <= (nr_frags + 1)) {
- if (!netif_tx_queue_stopped(txq)) {
+ if (!netif_tx_queue_stopped(txq))
netif_tx_stop_queue(txq);
- netdev_err(dev,
- "%s: tx ring %d full when queue %d awake\n",
- __func__, index, ring->queue);
- }
ret = NETDEV_TX_BUSY;
goto out;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index cc3afb605b1e..97ea76d443ab 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -619,7 +619,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
if (IS_ERR(phydev)) {
dev_err(kdev, "failed to register fixed PHY device\n");
- return -ENODEV;
+ return PTR_ERR(phydev);
}
/* Make sure we initialize MoCA PHYs with a link down */
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5408ed051772..14b311196b8f 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1539,8 +1539,7 @@ static int tg3_mdio_init(struct tg3 *tp)
return -ENOMEM;
tp->mdio_bus->name = "tg3 mdio bus";
- snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
- (tp->pdev->bus->number << 8) | tp->pdev->devfn);
+ snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
tp->mdio_bus->priv = tp;
tp->mdio_bus->parent = &tp->pdev->dev;
tp->mdio_bus->read = &tg3_mdio_read;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index d6d90f9722a7..31191b520b58 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1037,8 +1037,7 @@ bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
static void
bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
{
- struct bnad_tx_info *tx_info =
- (struct bnad_tx_info *)tx->priv;
+ struct bnad_tx_info *tx_info = tx->priv;
struct bna_tcb *tcb;
u32 txq_id;
int i;
@@ -1056,7 +1055,7 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
static void
bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
{
- struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
+ struct bnad_tx_info *tx_info = tx->priv;
struct bna_tcb *tcb;
u32 txq_id;
int i;
@@ -1133,7 +1132,7 @@ bnad_tx_cleanup(struct delayed_work *work)
static void
bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
{
- struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
+ struct bnad_tx_info *tx_info = tx->priv;
struct bna_tcb *tcb;
int i;
@@ -1149,7 +1148,7 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
static void
bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
{
- struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+ struct bnad_rx_info *rx_info = rx->priv;
struct bna_ccb *ccb;
struct bnad_rx_ctrl *rx_ctrl;
int i;
@@ -1208,7 +1207,7 @@ bnad_rx_cleanup(void *work)
static void
bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
{
- struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+ struct bnad_rx_info *rx_info = rx->priv;
struct bna_ccb *ccb;
struct bnad_rx_ctrl *rx_ctrl;
int i;
@@ -1231,7 +1230,7 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
static void
bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
{
- struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+ struct bnad_rx_info *rx_info = rx->priv;
struct bna_ccb *ccb;
struct bna_rcb *rcb;
struct bnad_rx_ctrl *rx_ctrl;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 82929ee76739..31f664ee4d77 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -26,7 +26,6 @@
#include <linux/platform_device.h>
#include <linux/phylink.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index d3541159487d..72ac4a34424b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -313,15 +313,15 @@ static int cxgb4_validate_flow_match(struct net_device *dev,
u16 ethtype_key = 0;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
- BIT(FLOW_DISSECTOR_KEY_VLAN) |
- BIT(FLOW_DISSECTOR_KEY_IP))) {
- netdev_warn(dev, "Unsupported key used: 0x%x\n",
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IP))) {
+ netdev_warn(dev, "Unsupported key used: 0x%llx\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 68562a82d036..62f62bff74a5 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -22,6 +22,7 @@
#include <crypto/internal/hash.h>
#include <linux/tls.h>
#include <net/tls.h>
+#include <net/tls_prot.h>
#include <net/tls_toe.h>
#include "t4fw_api.h"
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 276c32c3926a..d323c5c23521 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -54,7 +54,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -1855,9 +1854,8 @@ static int __init cs89x0_platform_probe(struct platform_device *pdev)
return -ENOMEM;
dev->irq = platform_get_irq(pdev, 0);
- if (dev->irq <= 0) {
- dev_warn(&dev->dev, "interrupt resource missing\n");
- err = -ENXIO;
+ if (dev->irq < 0) {
+ err = dev->irq;
goto free;
}
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 5715b9ab2712..a8b9d1a3e4d5 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2415,8 +2415,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
/* Interrupt */
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return irq ? irq : -ENODEV;
+ if (irq < 0)
+ return irq;
port->irq = irq;
/* Clock the port */
@@ -2538,7 +2538,7 @@ MODULE_DEVICE_TABLE(of, gemini_ethernet_port_of_match);
static struct platform_driver gemini_ethernet_port_driver = {
.driver = {
.name = "gemini-ethernet-port",
- .of_match_table = of_match_ptr(gemini_ethernet_port_of_match),
+ .of_match_table = gemini_ethernet_port_of_match,
},
.probe = gemini_ethernet_port_probe,
.remove = gemini_ethernet_port_remove,
@@ -2604,7 +2604,7 @@ MODULE_DEVICE_TABLE(of, gemini_ethernet_of_match);
static struct platform_driver gemini_ethernet_driver = {
.driver = {
.name = DRV_NAME,
- .of_match_table = of_match_ptr(gemini_ethernet_of_match),
+ .of_match_table = gemini_ethernet_of_match,
},
.probe = gemini_ethernet_probe,
.remove = gemini_ethernet_remove,
diff --git a/drivers/net/ethernet/davicom/dm9051.c b/drivers/net/ethernet/davicom/dm9051.c
index 70728b2e5f18..bcfe52c11804 100644
--- a/drivers/net/ethernet/davicom/dm9051.c
+++ b/drivers/net/ethernet/davicom/dm9051.c
@@ -510,10 +510,7 @@ static int dm9051_map_init(struct spi_device *spi, struct board_info *db)
regconfigdmbulk.lock_arg = db;
db->regmap_dmbulk = devm_regmap_init_spi(db->spidev, &regconfigdmbulk);
- if (IS_ERR(db->regmap_dmbulk))
- return PTR_ERR(db->regmap_dmbulk);
-
- return 0;
+ return PTR_ERR_OR_ZERO(db->regmap_dmbulk);
}
static int dm9051_map_chipid(struct board_info *db)
@@ -1161,9 +1158,7 @@ static int dm9051_phy_connect(struct board_info *db)
db->phydev = phy_connect(db->ndev, phy_id, dm9051_handle_link_change,
PHY_INTERFACE_MODE_MII);
- if (IS_ERR(db->phydev))
- return PTR_ERR_OR_ZERO(db->phydev);
- return 0;
+ return PTR_ERR_OR_ZERO(db->phydev);
}
static int dm9051_probe(struct spi_device *spi)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0616b5fe241c..ad862ed7888a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4986,9 +4986,6 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue;
- if (nla_len(attr) < sizeof(mode))
- return -EINVAL;
-
mode = nla_get_u16(attr);
if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
index 11b29f56aaf9..6e14c918e3fb 100644
--- a/drivers/net/ethernet/engleder/tsnep.h
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -14,6 +14,7 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/miscdevice.h>
+#include <net/xdp.h>
#define TSNEP "tsnep"
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 84751bb303a6..f61bd89734c5 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -28,6 +28,7 @@
#include <linux/iopoll.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
+#include <net/page_pool/helpers.h>
#include <net/xdp_sock_drv.h>
#define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
@@ -1333,7 +1334,7 @@ static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi,
skb = tsnep_build_skb(rx, page, length);
if (skb) {
- page_pool_release_page(rx->page_pool, page);
+ skb_mark_for_recycle(skb);
rx->packets++;
rx->bytes += length;
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index f1eb660aaee2..edf000e7bab4 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -6,10 +6,9 @@
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
+#include <linux/mod_devicetable.h>
#include <linux/of_net.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include "nps_enet.h"
#define DRV_NAME "nps_mgt_enet"
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index a03879a27b04..9135b918dd49 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -177,16 +177,20 @@ static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac)
iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
}
-static void ftgmac100_initial_mac(struct ftgmac100 *priv)
+static int ftgmac100_initial_mac(struct ftgmac100 *priv)
{
u8 mac[ETH_ALEN];
unsigned int m;
unsigned int l;
+ int err;
- if (!device_get_ethdev_address(priv->dev, priv->netdev)) {
+ err = of_get_ethdev_address(priv->dev->of_node, priv->netdev);
+ if (err == -EPROBE_DEFER)
+ return err;
+ if (!err) {
dev_info(priv->dev, "Read MAC address %pM from device tree\n",
priv->netdev->dev_addr);
- return;
+ return 0;
}
m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR);
@@ -207,6 +211,8 @@ static void ftgmac100_initial_mac(struct ftgmac100 *priv)
dev_info(priv->dev, "Generated random MAC address %pM\n",
priv->netdev->dev_addr);
}
+
+ return 0;
}
static int ftgmac100_set_mac_addr(struct net_device *dev, void *p)
@@ -1843,7 +1849,9 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->aneg_pause = true;
/* MAC address from chip or random one */
- ftgmac100_initial_mac(priv);
+ err = ftgmac100_initial_mac(priv);
+ if (err)
+ goto err_phy_connect;
np = pdev->dev.of_node;
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 139fe66f8bcd..183069581bc0 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -149,6 +149,40 @@ static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac)
iowrite32(laddr, priv->base + FTMAC100_OFFSET_MAC_LADR);
}
+static void ftmac100_setup_mc_ht(struct ftmac100 *priv)
+{
+ struct netdev_hw_addr *ha;
+ u64 maht = 0; /* Multicast Address Hash Table */
+
+ netdev_for_each_mc_addr(ha, priv->netdev) {
+ u32 hash = ether_crc(ETH_ALEN, ha->addr) >> 26;
+
+ maht |= BIT_ULL(hash);
+ }
+
+ iowrite32(lower_32_bits(maht), priv->base + FTMAC100_OFFSET_MAHT0);
+ iowrite32(upper_32_bits(maht), priv->base + FTMAC100_OFFSET_MAHT1);
+}
+
+static void ftmac100_set_rx_bits(struct ftmac100 *priv, unsigned int *maccr)
+{
+ struct net_device *netdev = priv->netdev;
+
+ /* Clear all */
+ *maccr &= ~(FTMAC100_MACCR_RCV_ALL | FTMAC100_MACCR_RX_MULTIPKT |
+ FTMAC100_MACCR_HT_MULTI_EN);
+
+ /* Set the requested bits */
+ if (netdev->flags & IFF_PROMISC)
+ *maccr |= FTMAC100_MACCR_RCV_ALL;
+ if (netdev->flags & IFF_ALLMULTI)
+ *maccr |= FTMAC100_MACCR_RX_MULTIPKT;
+ else if (netdev_mc_count(netdev)) {
+ *maccr |= FTMAC100_MACCR_HT_MULTI_EN;
+ ftmac100_setup_mc_ht(priv);
+ }
+}
+
#define MACCR_ENABLE_ALL (FTMAC100_MACCR_XMT_EN | \
FTMAC100_MACCR_RCV_EN | \
FTMAC100_MACCR_XDMA_EN | \
@@ -182,11 +216,7 @@ static int ftmac100_start_hw(struct ftmac100 *priv)
if (netdev->mtu > ETH_DATA_LEN)
maccr |= FTMAC100_MACCR_RX_FTL;
- /* Add other bits as needed */
- if (netdev->flags & IFF_PROMISC)
- maccr |= FTMAC100_MACCR_RCV_ALL;
- if (netdev->flags & IFF_ALLMULTI)
- maccr |= FTMAC100_MACCR_RX_MULTIPKT;
+ ftmac100_set_rx_bits(priv, &maccr);
iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR);
return 0;
@@ -1067,6 +1097,15 @@ static int ftmac100_change_mtu(struct net_device *netdev, int mtu)
return 0;
}
+static void ftmac100_set_rx_mode(struct net_device *netdev)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ unsigned int maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR);
+
+ ftmac100_set_rx_bits(priv, &maccr);
+ iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR);
+}
+
static const struct net_device_ops ftmac100_netdev_ops = {
.ndo_open = ftmac100_open,
.ndo_stop = ftmac100_stop,
@@ -1075,6 +1114,7 @@ static const struct net_device_ops ftmac100_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_eth_ioctl = ftmac100_do_ioctl,
.ndo_change_mtu = ftmac100_change_mtu,
+ .ndo_set_rx_mode = ftmac100_set_rx_mode,
};
/******************************************************************************
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 431f8917dc39..dcbc598b11c6 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -7,8 +7,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/io.h>
@@ -17,6 +17,7 @@
#include <linux/icmp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/platform_device.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/net.h>
@@ -3497,7 +3498,7 @@ free_netdev:
return err;
}
-static int dpaa_remove(struct platform_device *pdev)
+static void dpaa_remove(struct platform_device *pdev)
{
struct net_device *net_dev;
struct dpaa_priv *priv;
@@ -3516,6 +3517,9 @@ static int dpaa_remove(struct platform_device *pdev)
phylink_destroy(priv->mac_dev->phylink);
err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
+ if (err)
+ dev_err(dev, "Failed to free FQs on remove (%pE)\n",
+ ERR_PTR(err));
qman_delete_cgr_safe(&priv->ingress_cgr);
qman_release_cgrid(priv->ingress_cgr.cgrid);
@@ -3527,8 +3531,6 @@ static int dpaa_remove(struct platform_device *pdev)
dpaa_bps_free(priv);
free_netdev(net_dev);
-
- return err;
}
static const struct platform_device_id dpaa_devtype[] = {
@@ -3546,7 +3548,7 @@ static struct platform_driver dpaa_driver = {
},
.id_table = dpaa_devtype,
.probe = dpaa_eth_probe,
- .remove = dpaa_remove
+ .remove_new = dpaa_remove
};
static int __init dpaa_load(void)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index 35b8cea7f886..ac3c8ed57bbe 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -8,6 +8,7 @@
#include <linux/netdevice.h>
#include <linux/refcount.h>
+#include <net/xdp.h>
#include <soc/fsl/qman.h>
#include <soc/fsl/bman.h>
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 9c71cbbb13d8..5bd0b36d1feb 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -6,7 +6,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/string.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/net_tstamp.h>
#include <linux/fsl/ptp_qoriq.h>
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index a9676d0dece8..15bab41cee48 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -5087,7 +5087,6 @@ MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
static struct fsl_mc_driver dpaa2_eth_driver = {
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
},
.probe = dpaa2_eth_probe,
.remove = dpaa2_eth_remove,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index d56d7a13262e..bfb6c96c3b2f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -12,6 +12,7 @@
#include <linux/fsl/mc.h>
#include <linux/net_tstamp.h>
#include <net/devlink.h>
+#include <net/xdp.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
index c39b866e2582..4798fb7fe35d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -17,14 +17,14 @@ static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
struct dpsw_acl_fields *acl_h, *acl_m;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_VLAN) |
- BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_IP) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
NL_SET_ERR_MSG_MOD(extack,
"Unsupported keys used");
return -EOPNOTSUPP;
@@ -539,9 +539,9 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
int ret = -EOPNOTSUPP;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_VLAN))) {
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) {
NL_SET_ERR_MSG_MOD(extack,
"Mirroring is supported only per VLAN");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 21cc4e52425a..97d3151076d5 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -3457,7 +3457,6 @@ MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table);
static struct fsl_mc_driver dpaa2_switch_drv = {
.driver = {
.name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
},
.probe = dpaa2_switch_probe,
.remove = dpaa2_switch_remove,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 8577cf7699a0..7439739cd81a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -11,6 +11,7 @@
#include <linux/if_vlan.h>
#include <linux/phylink.h>
#include <linux/dim.h>
+#include <net/xdp.h>
#include "enetc_hw.h"
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
index b307bef4dc29..d39617ab9306 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
@@ -18,8 +18,8 @@
*/
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include "enetc.h"
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 270cbd5e8684..2513b44056c1 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -483,13 +483,13 @@ struct enetc_psfp {
static struct actions_fwd enetc_act_fwd[] = {
{
BIT(FLOW_ACTION_GATE),
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS),
FILTER_ACTION_TYPE_PSFP
},
{
BIT(FLOW_ACTION_POLICE) |
BIT(FLOW_ACTION_GATE),
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS),
FILTER_ACTION_TYPE_PSFP
},
/* example for ACL actions */
@@ -1069,8 +1069,8 @@ revert_sid:
return err;
}
-static struct actions_fwd *enetc_check_flow_actions(u64 acts,
- unsigned int inputkeys)
+static struct actions_fwd *
+enetc_check_flow_actions(u64 acts, unsigned long long inputkeys)
{
int i;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 63a053dea819..a8fbcada6b01 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -22,6 +22,7 @@
#include <linux/timecounter.h>
#include <dt-bindings/firmware/imx/rsrc.h>
#include <linux/firmware/imx/sci.h>
+#include <net/xdp.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
@@ -547,13 +548,11 @@ enum {
enum fec_txbuf_type {
FEC_TXBUF_T_SKB,
FEC_TXBUF_T_XDP_NDO,
+ FEC_TXBUF_T_XDP_TX,
};
struct fec_tx_buffer {
- union {
- struct sk_buff *skb;
- struct xdp_frame *xdp;
- };
+ void *buf_p;
enum fec_txbuf_type type;
};
@@ -651,12 +650,9 @@ struct fec_enet_private {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
- unsigned long last_overflow_check;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
- int rx_hwtstamp_filter;
- u32 base_incval;
u32 cycle_speed;
int hwts_rx_en;
int hwts_tx_en;
@@ -679,8 +675,6 @@ struct fec_enet_private {
struct ethtool_eee eee;
unsigned int clk_ref_rate;
- u32 rx_copybreak;
-
/* ptp clock period in ns*/
unsigned int ptp_inc;
@@ -703,9 +697,9 @@ struct fec_enet_private {
void fec_ptp_init(struct platform_device *pdev, int irq_idx);
void fec_ptp_stop(struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
-void fec_ptp_disable_hwts(struct net_device *ndev);
-int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
-int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
+int fec_ptp_set(struct net_device *ndev, struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+void fec_ptp_get(struct net_device *ndev, struct kernel_hwtstamp_config *config);
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 66b5cbdb43b9..77c8e9cfb445 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -38,6 +38,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <net/ip.h>
+#include <net/page_pool/helpers.h>
#include <net/selftests.h>
#include <net/tso.h>
#include <linux/tcp.h>
@@ -68,6 +69,7 @@
#include <soc/imx/cpuidle.h>
#include <linux/filter.h>
#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <asm/cacheflush.h>
@@ -75,6 +77,9 @@
static void set_multicast_list(struct net_device *ndev);
static void fec_enet_itr_coal_set(struct net_device *ndev);
+static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
+ int cpu, struct xdp_buff *xdp,
+ u32 dma_sync_len);
#define DRIVER_NAME "fec"
@@ -325,8 +330,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_WOL_FLAG_ENABLE (0x1 << 1)
#define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
-#define COPYBREAK_DEFAULT 256
-
/* Max number of allowed TCP segments for software TSO */
#define FEC_MAX_TSO_SEGS 100
#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
@@ -397,7 +400,7 @@ static void fec_dump(struct net_device *ndev)
fec16_to_cpu(bdp->cbd_sc),
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
- txq->tx_buf[index].skb);
+ txq->tx_buf[index].buf_p);
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
index++;
} while (bdp != txq->bd.base);
@@ -654,7 +657,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
index = fec_enet_get_bd_index(last_bdp, &txq->bd);
/* Save skb pointer */
- txq->tx_buf[index].skb = skb;
+ txq->tx_buf[index].buf_p = skb;
/* Make sure the updates to rest of the descriptor are performed before
* transferring ownership.
@@ -860,7 +863,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
}
/* Save skb pointer */
- txq->tx_buf[index].skb = skb;
+ txq->tx_buf[index].buf_p = skb;
skb_tx_timestamp(skb);
txq->bd.cur = bdp;
@@ -957,26 +960,27 @@ static void fec_enet_bd_init(struct net_device *dev)
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
- if (txq->tx_buf[i].skb) {
- dev_kfree_skb_any(txq->tx_buf[i].skb);
- txq->tx_buf[i].skb = NULL;
- }
- } else {
+ if (txq->tx_buf[i].buf_p)
+ dev_kfree_skb_any(txq->tx_buf[i].buf_p);
+ } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
if (bdp->cbd_bufaddr)
dma_unmap_single(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
- if (txq->tx_buf[i].xdp) {
- xdp_return_frame(txq->tx_buf[i].xdp);
- txq->tx_buf[i].xdp = NULL;
- }
+ if (txq->tx_buf[i].buf_p)
+ xdp_return_frame(txq->tx_buf[i].buf_p);
+ } else {
+ struct page *page = txq->tx_buf[i].buf_p;
- /* restore default tx buffer type: FEC_TXBUF_T_SKB */
- txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
+ if (page)
+ page_pool_put_page(page->pp, page, 0, false);
}
+ txq->tx_buf[i].buf_p = NULL;
+ /* restore default tx buffer type: FEC_TXBUF_T_SKB */
+ txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
bdp->cbd_bufaddr = cpu_to_fec32(0);
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
}
@@ -1383,6 +1387,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
struct netdev_queue *nq;
int index = 0;
int entries_free;
+ struct page *page;
+ int frame_len;
fep = netdev_priv(ndev);
@@ -1404,8 +1410,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
index = fec_enet_get_bd_index(bdp, &txq->bd);
if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
- skb = txq->tx_buf[index].skb;
- txq->tx_buf[index].skb = NULL;
+ skb = txq->tx_buf[index].buf_p;
if (bdp->cbd_bufaddr &&
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
dma_unmap_single(&fep->pdev->dev,
@@ -1424,17 +1429,24 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
if (unlikely(!budget))
break;
- xdpf = txq->tx_buf[index].xdp;
- if (bdp->cbd_bufaddr)
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- fec16_to_cpu(bdp->cbd_datlen),
- DMA_TO_DEVICE);
+ if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
+ xdpf = txq->tx_buf[index].buf_p;
+ if (bdp->cbd_bufaddr)
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+ } else {
+ page = txq->tx_buf[index].buf_p;
+ }
+
bdp->cbd_bufaddr = cpu_to_fec32(0);
- if (!xdpf) {
+ if (unlikely(!txq->tx_buf[index].buf_p)) {
txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
goto tx_buf_done;
}
+
+ frame_len = fec16_to_cpu(bdp->cbd_datlen);
}
/* Check for errors. */
@@ -1458,7 +1470,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
ndev->stats.tx_bytes += skb->len;
else
- ndev->stats.tx_bytes += xdpf->len;
+ ndev->stats.tx_bytes += frame_len;
}
/* Deferred means some collisions occurred during transmit,
@@ -1482,15 +1494,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
}
/* Free the sk buffer associated with this last transmit */
- dev_kfree_skb_any(skb);
- } else {
- xdp_return_frame(xdpf);
-
- txq->tx_buf[index].xdp = NULL;
- /* restore default tx buffer type: FEC_TXBUF_T_SKB */
- txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
+ napi_consume_skb(skb, budget);
+ } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
+ xdp_return_frame_rx_napi(xdpf);
+ } else { /* recycle pages of XDP_TX frames */
+ /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
+ page_pool_put_page(page->pp, page, 0, true);
}
+ txq->tx_buf[index].buf_p = NULL;
+ /* restore default tx buffer type: FEC_TXBUF_T_SKB */
+ txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
+
tx_buf_done:
/* Make sure the update to bdp and tx_buf are performed
* before dirty_tx
@@ -1543,7 +1558,7 @@ static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
static u32
fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
- struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int index)
+ struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu)
{
unsigned int sync, len = xdp->data_end - xdp->data;
u32 ret = FEC_ENET_XDP_PASS;
@@ -1553,8 +1568,10 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
act = bpf_prog_run_xdp(prog, xdp);
- /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
- sync = xdp->data_end - xdp->data_hard_start - FEC_ENET_XDP_HEADROOM;
+ /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover
+ * max len CPU touch
+ */
+ sync = xdp->data_end - xdp->data;
sync = max(sync, len);
switch (act) {
@@ -1566,31 +1583,38 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
case XDP_REDIRECT:
rxq->stats[RX_XDP_REDIRECT]++;
err = xdp_do_redirect(fep->netdev, xdp, prog);
- if (!err) {
- ret = FEC_ENET_XDP_REDIR;
- } else {
- ret = FEC_ENET_XDP_CONSUMED;
- page = virt_to_head_page(xdp->data);
- page_pool_put_page(rxq->page_pool, page, sync, true);
+ if (unlikely(err))
+ goto xdp_err;
+
+ ret = FEC_ENET_XDP_REDIR;
+ break;
+
+ case XDP_TX:
+ rxq->stats[RX_XDP_TX]++;
+ err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync);
+ if (unlikely(err)) {
+ rxq->stats[RX_XDP_TX_ERRORS]++;
+ goto xdp_err;
}
+
+ ret = FEC_ENET_XDP_TX;
break;
default:
bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
fallthrough;
- case XDP_TX:
- bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
- fallthrough;
-
case XDP_ABORTED:
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
rxq->stats[RX_XDP_DROP]++;
+xdp_err:
ret = FEC_ENET_XDP_CONSUMED;
page = virt_to_head_page(xdp->data);
page_pool_put_page(rxq->page_pool, page, sync, true);
+ if (act != XDP_DROP)
+ trace_xdp_exception(fep->netdev, prog, act);
break;
}
@@ -1621,6 +1645,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
u32 ret, xdp_result = FEC_ENET_XDP_PASS;
u32 data_start = FEC_ENET_XDP_HEADROOM;
+ int cpu = smp_processor_id();
struct xdp_buff xdp;
struct page *page;
u32 sub_len = 4;
@@ -1699,7 +1724,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* subtract 16bit shift and FCS */
xdp_prepare_buff(&xdp, page_address(page),
data_start, pkt_len - sub_len, false);
- ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, index);
+ ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu);
xdp_result |= ret;
if (ret != FEC_ENET_XDP_PASS)
goto rx_processing_done;
@@ -3059,44 +3084,6 @@ static int fec_enet_set_coalesce(struct net_device *ndev,
return 0;
}
-static int fec_enet_get_tunable(struct net_device *netdev,
- const struct ethtool_tunable *tuna,
- void *data)
-{
- struct fec_enet_private *fep = netdev_priv(netdev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- *(u32 *)data = fep->rx_copybreak;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int fec_enet_set_tunable(struct net_device *netdev,
- const struct ethtool_tunable *tuna,
- const void *data)
-{
- struct fec_enet_private *fep = netdev_priv(netdev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- fep->rx_copybreak = *(u32 *)data;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
/* LPI Sleep Ts count base on tx clk (clk_ref).
* The lpi sleep cnt value = X us / (cycle_ns).
*/
@@ -3234,8 +3221,6 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.get_sset_count = fec_enet_get_sset_count,
#endif
.get_ts_info = fec_enet_get_ts_info,
- .get_tunable = fec_enet_get_tunable,
- .set_tunable = fec_enet_set_tunable,
.get_wol = fec_enet_get_wol,
.set_wol = fec_enet_set_wol,
.get_eee = fec_enet_get_eee,
@@ -3245,38 +3230,10 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.self_test = net_selftest,
};
-static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct phy_device *phydev = ndev->phydev;
-
- if (!netif_running(ndev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- if (fep->bufdesc_ex) {
- bool use_fec_hwts = !phy_has_hwtstamp(phydev);
-
- if (cmd == SIOCSHWTSTAMP) {
- if (use_fec_hwts)
- return fec_ptp_set(ndev, rq);
- fec_ptp_disable_hwts(ndev);
- } else if (cmd == SIOCGHWTSTAMP) {
- if (use_fec_hwts)
- return fec_ptp_get(ndev, rq);
- }
- }
-
- return phy_mii_ioctl(phydev, rq, cmd);
-}
-
static void fec_enet_free_buffers(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned int i;
- struct sk_buff *skb;
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
unsigned int q;
@@ -3301,18 +3258,23 @@ static void fec_enet_free_buffers(struct net_device *ndev)
kfree(txq->tx_bounce[i]);
txq->tx_bounce[i] = NULL;
+ if (!txq->tx_buf[i].buf_p) {
+ txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
+ continue;
+ }
+
if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
- skb = txq->tx_buf[i].skb;
- txq->tx_buf[i].skb = NULL;
- dev_kfree_skb(skb);
+ dev_kfree_skb(txq->tx_buf[i].buf_p);
+ } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
+ xdp_return_frame(txq->tx_buf[i].buf_p);
} else {
- if (txq->tx_buf[i].xdp) {
- xdp_return_frame(txq->tx_buf[i].xdp);
- txq->tx_buf[i].xdp = NULL;
- }
+ struct page *page = txq->tx_buf[i].buf_p;
- txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
+ page_pool_put_page(page->pp, page, 0, false);
}
+
+ txq->tx_buf[i].buf_p = NULL;
+ txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
}
}
}
@@ -3835,12 +3797,14 @@ fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
struct fec_enet_priv_tx_q *txq,
- struct xdp_frame *frame)
+ void *frame, u32 dma_sync_len,
+ bool ndo_xmit)
{
unsigned int index, status, estatus;
struct bufdesc *bdp;
dma_addr_t dma_addr;
int entries_free;
+ u16 frame_len;
entries_free = fec_enet_get_free_txdesc_num(txq);
if (entries_free < MAX_SKB_FRAGS + 1) {
@@ -3855,17 +3819,37 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
index = fec_enet_get_bd_index(bdp, &txq->bd);
- dma_addr = dma_map_single(&fep->pdev->dev, frame->data,
- frame->len, DMA_TO_DEVICE);
- if (dma_mapping_error(&fep->pdev->dev, dma_addr))
- return -ENOMEM;
+ if (ndo_xmit) {
+ struct xdp_frame *xdpf = frame;
+
+ dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data,
+ xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, dma_addr))
+ return -ENOMEM;
+
+ frame_len = xdpf->len;
+ txq->tx_buf[index].buf_p = xdpf;
+ txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
+ } else {
+ struct xdp_buff *xdpb = frame;
+ struct page *page;
+
+ page = virt_to_page(xdpb->data);
+ dma_addr = page_pool_get_dma_addr(page) +
+ (xdpb->data - xdpb->data_hard_start);
+ dma_sync_single_for_device(&fep->pdev->dev, dma_addr,
+ dma_sync_len, DMA_BIDIRECTIONAL);
+ frame_len = xdpb->data_end - xdpb->data;
+ txq->tx_buf[index].buf_p = page;
+ txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX;
+ }
status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
if (fep->bufdesc_ex)
estatus = BD_ENET_TX_INT;
bdp->cbd_bufaddr = cpu_to_fec32(dma_addr);
- bdp->cbd_datlen = cpu_to_fec16(frame->len);
+ bdp->cbd_datlen = cpu_to_fec16(frame_len);
if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
@@ -3877,9 +3861,6 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
ebdp->cbd_esc = cpu_to_fec32(estatus);
}
- txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
- txq->tx_buf[index].xdp = frame;
-
/* Make sure the updates to rest of the descriptor are performed before
* transferring ownership.
*/
@@ -3905,6 +3886,29 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
return 0;
}
+static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
+ int cpu, struct xdp_buff *xdp,
+ u32 dma_sync_len)
+{
+ struct fec_enet_priv_tx_q *txq;
+ struct netdev_queue *nq;
+ int queue, ret;
+
+ queue = fec_enet_xdp_get_tx_queue(fep, cpu);
+ txq = fep->tx_queue[queue];
+ nq = netdev_get_tx_queue(fep->netdev, queue);
+
+ __netif_tx_lock(nq, cpu);
+
+ /* Avoid tx timeout as XDP shares the queue with kernel stack */
+ txq_trans_cond_update(nq);
+ ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false);
+
+ __netif_tx_unlock(nq);
+
+ return ret;
+}
+
static int fec_enet_xdp_xmit(struct net_device *dev,
int num_frames,
struct xdp_frame **frames,
@@ -3927,7 +3931,7 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
/* Avoid tx timeout as XDP shares the queue with kernel stack */
txq_trans_cond_update(nq);
for (i = 0; i < num_frames; i++) {
- if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) < 0)
+ if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0)
break;
sent_frames++;
}
@@ -3937,6 +3941,37 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
return sent_frames;
}
+static int fec_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!fep->bufdesc_ex)
+ return -EOPNOTSUPP;
+
+ fec_ptp_get(ndev, config);
+
+ return 0;
+}
+
+static int fec_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!fep->bufdesc_ex)
+ return -EOPNOTSUPP;
+
+ return fec_ptp_set(ndev, config, extack);
+}
+
static const struct net_device_ops fec_netdev_ops = {
.ndo_open = fec_enet_open,
.ndo_stop = fec_enet_close,
@@ -3946,13 +3981,15 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
- .ndo_eth_ioctl = fec_enet_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = fec_poll_controller,
#endif
.ndo_set_features = fec_set_features,
.ndo_bpf = fec_enet_bpf,
.ndo_xdp_xmit = fec_enet_xdp_xmit,
+ .ndo_hwtstamp_get = fec_hwtstamp_get,
+ .ndo_hwtstamp_set = fec_hwtstamp_set,
};
static const unsigned short offset_des_active_rxq[] = {
@@ -4018,9 +4055,6 @@ static int fec_enet_init(struct net_device *ndev)
if (ret)
goto free_queue_mem;
- /* make sure MAC we just acquired is programmed into the hw */
- fec_set_mac_address(ndev, NULL);
-
/* Set receive and transmit descriptor base. */
for (i = 0; i < fep->num_rx_queues; i++) {
struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
@@ -4486,7 +4520,6 @@ fec_probe(struct platform_device *pdev)
if (fep->bufdesc_ex && fep->ptp_clock)
netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
- fep->rx_copybreak = COPYBREAK_DEFAULT;
INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
pm_runtime_mark_last_busy(&pdev->dev);
@@ -4526,7 +4559,7 @@ failed_ioremap:
return ret;
}
-static int
+static void
fec_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
@@ -4562,7 +4595,6 @@ fec_drv_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
- return 0;
}
static int __maybe_unused fec_suspend(struct device *dev)
@@ -4718,7 +4750,7 @@ static struct platform_driver fec_driver = {
},
.id_table = fec_devtype,
.probe = fec_probe,
- .remove = fec_drv_remove,
+ .remove_new = fec_drv_remove,
};
module_platform_driver(fec_driver);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index b88816b71ddf..ebae71ec26c6 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -29,12 +29,12 @@
#include <linux/crc32.h>
#include <linux/hardirq.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -974,7 +974,7 @@ err_netdev:
return rv;
}
-static int
+static void
mpc52xx_fec_remove(struct platform_device *op)
{
struct net_device *ndev;
@@ -998,8 +998,6 @@ mpc52xx_fec_remove(struct platform_device *op)
release_mem_region(ndev->base_addr, sizeof(struct mpc52xx_fec));
free_netdev(ndev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -1042,7 +1040,7 @@ static struct platform_driver mpc52xx_fec_driver = {
.of_match_table = mpc52xx_fec_match,
},
.probe = mpc52xx_fec_probe,
- .remove = mpc52xx_fec_remove,
+ .remove_new = mpc52xx_fec_remove,
#ifdef CONFIG_PM
.suspend = mpc52xx_fec_of_suspend,
.resume = mpc52xx_fec_of_resume,
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index 95f778cce98c..39689826cc8f 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -13,10 +13,11 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
-#include <linux/of_platform.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/mpc52xx.h>
#include "fec_mpc52xx.h"
@@ -117,7 +118,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
return err;
}
-static int mpc52xx_fec_mdio_remove(struct platform_device *of)
+static void mpc52xx_fec_mdio_remove(struct platform_device *of)
{
struct mii_bus *bus = platform_get_drvdata(of);
struct mpc52xx_fec_mdio_priv *priv = bus->priv;
@@ -126,8 +127,6 @@ static int mpc52xx_fec_mdio_remove(struct platform_device *of)
iounmap(priv->regs);
kfree(priv);
mdiobus_free(bus);
-
- return 0;
}
static const struct of_device_id mpc52xx_fec_mdio_match[] = {
@@ -145,7 +144,7 @@ struct platform_driver mpc52xx_fec_mdio_driver = {
.of_match_table = mpc52xx_fec_mdio_match,
},
.probe = mpc52xx_fec_mdio_probe,
- .remove = mpc52xx_fec_mdio_remove,
+ .remove_new = mpc52xx_fec_mdio_remove,
};
/* let fec driver call it, since this has to be registered before it */
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index ab86bb8562ef..181d9bfbee22 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -30,7 +30,6 @@
#include <linux/phy.h>
#include <linux/fec.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_net.h>
@@ -443,21 +442,21 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
*/
static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
- struct fec_enet_private *adapter =
+ struct fec_enet_private *fep =
container_of(ptp, struct fec_enet_private, ptp_caps);
u64 ns;
unsigned long flags;
- mutex_lock(&adapter->ptp_clk_mutex);
+ mutex_lock(&fep->ptp_clk_mutex);
/* Check the ptp clock */
- if (!adapter->ptp_clk_on) {
- mutex_unlock(&adapter->ptp_clk_mutex);
+ if (!fep->ptp_clk_on) {
+ mutex_unlock(&fep->ptp_clk_mutex);
return -EINVAL;
}
- spin_lock_irqsave(&adapter->tmreg_lock, flags);
- ns = timecounter_read(&adapter->tc);
- spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
- mutex_unlock(&adapter->ptp_clk_mutex);
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ ns = timecounter_read(&fep->tc);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
*ts = ns_to_timespec64(ns);
@@ -606,28 +605,12 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
}
}
-/**
- * fec_ptp_disable_hwts - disable hardware time stamping
- * @ndev: pointer to net_device
- */
-void fec_ptp_disable_hwts(struct net_device *ndev)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
-
- fep->hwts_tx_en = 0;
- fep->hwts_rx_en = 0;
-}
-
-int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
+int fec_ptp_set(struct net_device *ndev, struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct hwtstamp_config config;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
fep->hwts_tx_en = 0;
break;
@@ -638,33 +621,28 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
fep->hwts_rx_en = 0;
break;
default:
fep->hwts_rx_en = 1;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
}
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
-int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
+void fec_ptp_get(struct net_device *ndev, struct kernel_hwtstamp_config *config)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct hwtstamp_config config;
-
- config.flags = 0;
- config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- config.rx_filter = (fep->hwts_rx_en ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ config->flags = 0;
+ config->tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config->rx_filter = (fep->hwts_rx_en ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
}
/*
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 9d85fb136e34..d96028f01770 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/fsl/guts.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/module.h>
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index ab90fe2bee5e..406e75e9e5ea 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/io.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 43665806c590..9767586b4eb3 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -18,6 +18,7 @@
#include <linux/phylink.h>
#include <linux/etherdevice.h>
#include <linux/libfdt_env.h>
+#include <linux/platform_device.h>
#include "mac.h"
#include "fman_mac.h"
@@ -331,12 +332,11 @@ _return_of_node_put:
return err;
}
-static int mac_remove(struct platform_device *pdev)
+static void mac_remove(struct platform_device *pdev)
{
struct mac_device *mac_dev = platform_get_drvdata(pdev);
platform_device_unregister(mac_dev->priv->eth_dev);
- return 0;
}
static struct platform_driver mac_driver = {
@@ -345,7 +345,7 @@ static struct platform_driver mac_driver = {
.of_match_table = mac_match,
},
.probe = mac_probe,
- .remove = mac_remove,
+ .remove_new = mac_remove,
};
builtin_platform_driver(mac_driver);
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index ad06f8d7924b..fe747915cc73 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -68,10 +68,6 @@ struct dpaa_eth_data {
extern const char *mac_driver_description;
-int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
-
-void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
- bool *tx_pause);
int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev);
#endif /* __MAC_H */
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 8844a9a04fcf..a6dfc8807d3d 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -318,14 +318,12 @@ fs_enet_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct fs_enet_private *fep;
- const struct fs_platform_info *fpi;
u32 int_events;
u32 int_clr_events;
int nr, napi_ok;
int handled;
fep = netdev_priv(dev);
- fpi = fep->fpi;
nr = 0;
while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
@@ -1051,7 +1049,7 @@ out_free_fpi:
return ret;
}
-static int fs_enet_remove(struct platform_device *ofdev)
+static void fs_enet_remove(struct platform_device *ofdev)
{
struct net_device *ndev = platform_get_drvdata(ofdev);
struct fs_enet_private *fep = netdev_priv(ndev);
@@ -1066,7 +1064,6 @@ static int fs_enet_remove(struct platform_device *ofdev)
if (of_phy_is_fixed_link(ofdev->dev.of_node))
of_phy_deregister_fixed_link(ofdev->dev.of_node);
free_netdev(ndev);
- return 0;
}
static const struct of_device_id fs_enet_match[] = {
@@ -1113,7 +1110,7 @@ static struct platform_driver fs_enet_driver = {
.of_match_table = fs_enet_match,
},
.probe = fs_enet_probe,
- .remove = fs_enet_remove,
+ .remove_new = fs_enet_remove,
};
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index cb419aef8d1b..759bb7080e22 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -2,6 +2,7 @@
#ifndef FS_ENET_H
#define FS_ENET_H
+#include <linux/clk.h>
#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/types.h>
@@ -9,7 +10,6 @@
#include <linux/phy.h>
#include <linux/dma-mapping.h>
-#include <linux/fs_enet_pd.h>
#include <asm/fs_pd.h>
#ifdef CONFIG_CPM1
@@ -118,6 +118,23 @@ struct phy_info {
#define ENET_RX_ALIGN 16
#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE + ENET_RX_ALIGN - 1)
+struct fs_platform_info {
+ /* device specific information */
+ u32 cp_command; /* CPM page/sblock/mcn */
+
+ u32 dpram_offset;
+
+ struct device_node *phy_node;
+
+ int rx_ring, tx_ring; /* number of buffers on rx */
+ int rx_copybreak; /* limit we copy small frames */
+ int napi_weight; /* NAPI weight */
+
+ int use_rmii; /* use RMII mode */
+
+ struct clk *clk_per; /* 'per' clock for register access */
+};
+
struct fs_enet_private {
struct napi_struct napi;
struct device *dev; /* pointer back to the device (must be initialized first) */
@@ -192,11 +209,6 @@ void fs_cleanup_bds(struct net_device *dev);
#define PFX DRV_MODULE_NAME ": "
/***************************************************************************/
-
-int fs_enet_platform_init(void);
-void fs_enet_platform_cleanup(void);
-
-/***************************************************************************/
/* buffer descriptor access macros */
/* access macros */
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index b47490be872c..d903a9012db0 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -32,7 +32,6 @@
#include <linux/platform_device.h>
#include <linux/phy.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/gfp.h>
#include <linux/pgtable.h>
@@ -106,7 +105,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
goto out_ep;
fep->fcc.mem = (void __iomem *)cpm2_immr;
- fpi->dpram_offset = cpm_dpalloc(128, 32);
+ fpi->dpram_offset = cpm_muram_alloc(128, 32);
if (IS_ERR_VALUE(fpi->dpram_offset)) {
ret = fpi->dpram_offset;
goto out_fcccp;
@@ -548,7 +547,7 @@ static void tx_restart(struct net_device *dev)
}
/* Now update the TBPTR and dirty flag to the current buffer */
W32(ep, fen_genfcc.fcc_tbptr,
- (uint) (((void *)recheck_bd - fep->ring_base) +
+ (uint)(((void __iomem *)recheck_bd - fep->ring_base) +
fep->ring_mem_addr));
fep->dirty_tx = recheck_bd;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 61f4b6e50d29..cdc89d83cf07 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -32,7 +32,6 @@
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/gfp.h>
@@ -340,11 +339,7 @@ static void restart(struct net_device *dev)
static void stop(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- const struct fs_platform_info *fpi = fep->fpi;
struct fec __iomem *fecp = fep->fec.fecp;
-
- struct fec_info *feci = dev->phydev->mdio.bus->priv;
-
int i;
if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
@@ -364,16 +359,6 @@ static void stop(struct net_device *dev)
FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN);
fs_cleanup_bds(dev);
-
- /* shut down FEC1? that's where the mii bus is */
- if (fpi->has_phy) {
- FS(fecp, r_cntrl, fpi->use_rmii ?
- FEC_RCNTRL_RMII_MODE :
- FEC_RCNTRL_MII_MODE); /* MII/RMII enable */
- FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
- FW(fecp, ievent, FEC_ENET_MII);
- FW(fecp, mii_speed, feci->mii_speed);
- }
}
static void napi_clear_event_fs(struct net_device *dev)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index 64300ac13e02..a64cb6270515 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -32,7 +32,6 @@
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <asm/irq.h>
#include <linux/uaccess.h>
@@ -134,13 +133,13 @@ static int allocate_bd(struct net_device *dev)
struct fs_enet_private *fep = netdev_priv(dev);
const struct fs_platform_info *fpi = fep->fpi;
- fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
- sizeof(cbd_t), 8);
+ fep->ring_mem_addr = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), 8);
if (IS_ERR_VALUE(fep->ring_mem_addr))
return -ENOMEM;
fep->ring_base = (void __iomem __force*)
- cpm_dpram_addr(fep->ring_mem_addr);
+ cpm_muram_addr(fep->ring_mem_addr);
return 0;
}
@@ -150,7 +149,7 @@ static void free_bd(struct net_device *dev)
struct fs_enet_private *fep = netdev_priv(dev);
if (fep->ring_base)
- cpm_dpfree(fep->ring_mem_addr);
+ cpm_muram_free(fep->ring_mem_addr);
}
static void cleanup_data(struct net_device *dev)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 21de56345503..f965a2329055 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -29,8 +29,8 @@
struct bb_info {
struct mdiobb_ctrl ctrl;
- __be32 __iomem *dir;
- __be32 __iomem *dat;
+ u32 __iomem *dir;
+ u32 __iomem *dat;
u32 mdio_msk;
u32 mdc_msk;
};
@@ -192,7 +192,7 @@ out:
return ret;
}
-static int fs_enet_mdio_remove(struct platform_device *ofdev)
+static void fs_enet_mdio_remove(struct platform_device *ofdev)
{
struct mii_bus *bus = platform_get_drvdata(ofdev);
struct bb_info *bitbang = bus->priv;
@@ -201,8 +201,6 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev)
free_mdio_bitbang(bus);
iounmap(bitbang->dir);
kfree(bitbang);
-
- return 0;
}
static const struct of_device_id fs_enet_mdio_bb_match[] = {
@@ -219,7 +217,7 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
.of_match_table = fs_enet_mdio_bb_match,
},
.probe = fs_enet_mdio_probe,
- .remove = fs_enet_mdio_remove,
+ .remove_new = fs_enet_mdio_remove,
};
module_platform_driver(fs_enet_bb_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 59a8f0bd0f5c..a1e777a4b75f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -31,6 +31,7 @@
#include <linux/bitops.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
+#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/pgtable.h>
@@ -187,7 +188,7 @@ out:
return ret;
}
-static int fs_enet_mdio_remove(struct platform_device *ofdev)
+static void fs_enet_mdio_remove(struct platform_device *ofdev)
{
struct mii_bus *bus = platform_get_drvdata(ofdev);
struct fec_info *fec = bus->priv;
@@ -196,8 +197,6 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev)
iounmap(fec->fecp);
kfree(fec);
mdiobus_free(bus);
-
- return 0;
}
static const struct of_device_id fs_enet_mdio_fec_match[] = {
@@ -220,7 +219,7 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
.of_match_table = fs_enet_mdio_fec_match,
},
.probe = fs_enet_mdio_probe,
- .remove = fs_enet_mdio_remove,
+ .remove_new = fs_enet_mdio_remove,
};
module_platform_driver(fs_enet_fec_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 9d58d8334467..eee675a25b2c 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -12,6 +12,7 @@
*/
#include <linux/kernel.h>
+#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
@@ -511,7 +512,7 @@ error:
}
-static int fsl_pq_mdio_remove(struct platform_device *pdev)
+static void fsl_pq_mdio_remove(struct platform_device *pdev)
{
struct device *device = &pdev->dev;
struct mii_bus *bus = dev_get_drvdata(device);
@@ -521,8 +522,6 @@ static int fsl_pq_mdio_remove(struct platform_device *pdev)
iounmap(priv->map);
mdiobus_free(bus);
-
- return 0;
}
static struct platform_driver fsl_pq_mdio_driver = {
@@ -531,7 +530,7 @@ static struct platform_driver fsl_pq_mdio_driver = {
.of_match_table = fsl_pq_mdio_match,
},
.probe = fsl_pq_mdio_probe,
- .remove = fsl_pq_mdio_remove,
+ .remove_new = fsl_pq_mdio_remove,
};
module_platform_driver(fsl_pq_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 38d5013c6fed..e3dfbd7a4236 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -60,6 +60,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
+#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
@@ -75,7 +76,6 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
-#include <linux/of_platform.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
@@ -3364,7 +3364,7 @@ register_fail:
return err;
}
-static int gfar_remove(struct platform_device *ofdev)
+static void gfar_remove(struct platform_device *ofdev)
{
struct gfar_private *priv = platform_get_drvdata(ofdev);
struct device_node *np = ofdev->dev.of_node;
@@ -3381,8 +3381,6 @@ static int gfar_remove(struct platform_device *ofdev)
gfar_free_rx_queues(priv);
gfar_free_tx_queues(priv);
free_gfar_dev(priv);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -3642,7 +3640,7 @@ static struct platform_driver gfar_driver = {
.of_match_table = gfar_match,
},
.probe = gfar_probe,
- .remove = gfar_remove,
+ .remove_new = gfar_remove,
};
module_platform_driver(gfar_driver);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index b2b0d3c26fcc..7a15b9245698 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -38,7 +38,9 @@
#include <linux/phy.h>
#include <linux/sort.h>
#include <linux/if_vlan.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/fsl/ptp_qoriq.h>
#include "gianfar.h"
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 7a4cb4f07c32..ab421243a419 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -28,11 +28,12 @@
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/workqueue.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
@@ -3753,7 +3754,7 @@ err_free_info:
return err;
}
-static int ucc_geth_remove(struct platform_device* ofdev)
+static void ucc_geth_remove(struct platform_device* ofdev)
{
struct net_device *dev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(dev);
@@ -3767,8 +3768,6 @@ static int ucc_geth_remove(struct platform_device* ofdev)
of_node_put(ugeth->ug_info->phy_node);
kfree(ugeth->ug_info);
free_netdev(dev);
-
- return 0;
}
static const struct of_device_id ucc_geth_match[] = {
@@ -3787,7 +3786,7 @@ static struct platform_driver ucc_geth_driver = {
.of_match_table = ucc_geth_match,
},
.probe = ucc_geth_probe,
- .remove = ucc_geth_remove,
+ .remove_new = ucc_geth_remove,
.suspend = ucc_geth_suspend,
.resume = ucc_geth_resume,
};
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index a13b4ba4d6e1..65dc07d0df0f 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -19,10 +19,10 @@
#include <linux/kernel.h>
#include <linux/mdio.h>
#include <linux/module.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_mdio.h>
-#include <linux/of_platform.h>
#include <linux/phy.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
/* Number of microseconds to wait for a register to respond */
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
index 53b7e95213a8..5eec552a1f24 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
+++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
@@ -5,6 +5,7 @@
#include <linux/netdevice.h>
#include <linux/u64_stats_sync.h>
+#include <net/xdp.h>
/* Tx descriptor size */
#define FUNETH_SQE_SIZE 64U
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 4b425bf71ede..0d1e681be250 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/u64_stats_sync.h>
+#include <net/xdp.h>
#include "gve_desc.h"
#include "gve_desc_dqo.h"
@@ -51,6 +52,26 @@
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
+#define DQO_QPL_DEFAULT_TX_PAGES 512
+#define DQO_QPL_DEFAULT_RX_PAGES 2048
+
+/* Maximum TSO size supported on DQO */
+#define GVE_DQO_TX_MAX 0x3FFFF
+
+#define GVE_TX_BUF_SHIFT_DQO 11
+
+/* 2K buffers for DQO-QPL */
+#define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO)
+#define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
+#define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO))
+
+/* If number of free/recyclable buffers are less than this threshold; driver
+ * allocs and uses a non-qpl page on the receive path of DQO QPL to free
+ * up buffers.
+ * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC.
+ */
+#define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
+
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
@@ -217,6 +238,15 @@ struct gve_rx_ring {
* which cannot be reused yet.
*/
struct gve_index_list used_buf_states;
+
+ /* qpl assigned to this queue */
+ struct gve_queue_page_list *qpl;
+
+ /* index into queue page list */
+ u32 next_qpl_page_idx;
+
+ /* track number of used buffers */
+ u16 used_buf_states_cnt;
} dqo;
};
@@ -328,8 +358,14 @@ struct gve_tx_pending_packet_dqo {
* All others correspond to `skb`'s frags and should be unmapped with
* `dma_unmap_page`.
*/
- DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
- DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
+ union {
+ struct {
+ DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
+ DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
+ };
+ s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT];
+ };
+
u16 num_bufs;
/* Linked list index to next element in the list, or -1 if none */
@@ -384,6 +420,32 @@ struct gve_tx_ring {
* set.
*/
u32 last_re_idx;
+
+ /* free running number of packet buf descriptors posted */
+ u16 posted_packet_desc_cnt;
+ /* free running number of packet buf descriptors completed */
+ u16 completed_packet_desc_cnt;
+
+ /* QPL fields */
+ struct {
+ /* Linked list of gve_tx_buf_dqo. Index into
+ * tx_qpl_buf_next, or -1 if empty.
+ *
+ * This is a consumer list owned by the TX path. When it
+ * runs out, the producer list is stolen from the
+ * completion handling path
+ * (dqo_compl.free_tx_qpl_buf_head).
+ */
+ s16 free_tx_qpl_buf_head;
+
+ /* Free running count of the number of QPL tx buffers
+ * allocated
+ */
+ u32 alloc_tx_qpl_buf_cnt;
+
+ /* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
+ u32 free_tx_qpl_buf_cnt;
+ };
} dqo_tx;
};
@@ -427,6 +489,24 @@ struct gve_tx_ring {
* reached a specified timeout.
*/
struct gve_index_list timed_out_completions;
+
+ /* QPL fields */
+ struct {
+ /* Linked list of gve_tx_buf_dqo. Index into
+ * tx_qpl_buf_next, or -1 if empty.
+ *
+ * This is the producer list, owned by the completion
+ * handling path. When the consumer list
+ * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list
+ * will be stolen.
+ */
+ atomic_t free_tx_qpl_buf_head;
+
+ /* Free running count of the number of tx buffers
+ * freed
+ */
+ atomic_t free_tx_qpl_buf_cnt;
+ };
} dqo_compl;
} ____cacheline_aligned;
u64 pkt_done; /* free-running - total packets completed */
@@ -453,6 +533,21 @@ struct gve_tx_ring {
s16 num_pending_packets;
u32 complq_mask; /* complq size is complq_mask + 1 */
+
+ /* QPL fields */
+ struct {
+ /* qpl assigned to this queue */
+ struct gve_queue_page_list *qpl;
+
+ /* Each QPL page is divided into TX bounce buffers
+ * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is
+ * an array to manage linked lists of TX buffers.
+ * An entry j at index i implies that j'th buffer
+ * is next on the list after i
+ */
+ s16 *tx_qpl_buf_next;
+ u32 num_tx_qpl_bufs;
+ };
} dqo;
} ____cacheline_aligned;
struct netdev_queue *netdev_txq;
@@ -531,6 +626,7 @@ enum gve_queue_format {
GVE_GQI_RDA_FORMAT = 0x1,
GVE_GQI_QPL_FORMAT = 0x2,
GVE_DQO_RDA_FORMAT = 0x3,
+ GVE_DQO_QPL_FORMAT = 0x4,
};
struct gve_priv {
@@ -550,7 +646,8 @@ struct gve_priv {
u16 num_event_counters;
u16 tx_desc_cnt; /* num desc per ring */
u16 rx_desc_cnt; /* num desc per ring */
- u16 tx_pages_per_qpl; /* tx buffer length */
+ u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
+ u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */
u16 rx_data_slot_cnt; /* rx buffer length */
u64 max_registered_pages;
u64 num_registered_pages; /* num pages registered with NIC */
@@ -808,11 +905,17 @@ static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
return (priv->num_ntfy_blks / 2) + queue_idx;
}
+static inline bool gve_is_qpl(struct gve_priv *priv)
+{
+ return priv->queue_format == GVE_GQI_QPL_FORMAT ||
+ priv->queue_format == GVE_DQO_QPL_FORMAT;
+}
+
/* Returns the number of tx queue page lists
*/
static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
{
- if (priv->queue_format != GVE_GQI_QPL_FORMAT)
+ if (!gve_is_qpl(priv))
return 0;
return priv->tx_cfg.num_queues + priv->num_xdp_queues;
@@ -832,7 +935,7 @@ static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
*/
static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
{
- if (priv->queue_format != GVE_GQI_QPL_FORMAT)
+ if (!gve_is_qpl(priv))
return 0;
return priv->rx_cfg.num_queues;
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 252974202a3f..79db7a6d42bc 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -39,7 +39,8 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_gqi_rda **dev_op_gqi_rda,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
- struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
+ struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
+ struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
u16 option_length = be16_to_cpu(option->option_length);
@@ -112,6 +113,22 @@ void gve_parse_device_option(struct gve_priv *priv,
}
*dev_op_dqo_rda = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_DQO_QPL:
+ if (option_length < sizeof(**dev_op_dqo_qpl) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "DQO QPL", (int)sizeof(**dev_op_dqo_qpl),
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_dqo_qpl)) {
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO QPL");
+ }
+ *dev_op_dqo_qpl = (void *)(option + 1);
+ break;
case GVE_DEV_OPT_ID_JUMBO_FRAMES:
if (option_length < sizeof(**dev_op_jumbo_frames) ||
req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {
@@ -146,7 +163,8 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_gqi_rda **dev_op_gqi_rda,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
- struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
+ struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
+ struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
struct gve_device_option *dev_opt;
@@ -166,7 +184,8 @@ gve_process_device_options(struct gve_priv *priv,
gve_parse_device_option(priv, descriptor, dev_opt,
dev_op_gqi_rda, dev_op_gqi_qpl,
- dev_op_dqo_rda, dev_op_jumbo_frames);
+ dev_op_dqo_rda, dev_op_jumbo_frames,
+ dev_op_dqo_qpl);
dev_opt = next_opt;
}
@@ -505,12 +524,24 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
} else {
+ u16 comp_ring_size;
+ u32 qpl_id = 0;
+
+ if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
+ comp_ring_size =
+ priv->options_dqo_rda.tx_comp_ring_entries;
+ } else {
+ qpl_id = tx->dqo.qpl->id;
+ comp_ring_size = priv->tx_desc_cnt;
+ }
+ cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
cmd.create_tx_queue.tx_ring_size =
cpu_to_be16(priv->tx_desc_cnt);
cmd.create_tx_queue.tx_comp_ring_addr =
cpu_to_be64(tx->complq_bus_dqo);
cmd.create_tx_queue.tx_comp_ring_size =
- cpu_to_be16(priv->options_dqo_rda.tx_comp_ring_entries);
+ cpu_to_be16(comp_ring_size);
}
return gve_adminq_issue_cmd(priv, &cmd);
@@ -555,6 +586,18 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
} else {
+ u16 rx_buff_ring_entries;
+ u32 qpl_id = 0;
+
+ if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
+ rx_buff_ring_entries =
+ priv->options_dqo_rda.rx_buff_ring_entries;
+ } else {
+ qpl_id = rx->dqo.qpl->id;
+ rx_buff_ring_entries = priv->rx_desc_cnt;
+ }
+ cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
cmd.create_rx_queue.rx_ring_size =
cpu_to_be16(priv->rx_desc_cnt);
cmd.create_rx_queue.rx_desc_ring_addr =
@@ -564,7 +607,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
cmd.create_rx_queue.packet_buffer_size =
cpu_to_be16(priv->data_buffer_size_dqo);
cmd.create_rx_queue.rx_buff_ring_size =
- cpu_to_be16(priv->options_dqo_rda.rx_buff_ring_entries);
+ cpu_to_be16(rx_buff_ring_entries);
cmd.create_rx_queue.enable_rsc =
!!(priv->dev->features & NETIF_F_LRO);
}
@@ -675,9 +718,13 @@ gve_set_desc_cnt_dqo(struct gve_priv *priv,
const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
{
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
+ priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
+
+ if (priv->queue_format == GVE_DQO_QPL_FORMAT)
+ return 0;
+
priv->options_dqo_rda.tx_comp_ring_entries =
be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
- priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
priv->options_dqo_rda.rx_buff_ring_entries =
be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
@@ -687,7 +734,9 @@ gve_set_desc_cnt_dqo(struct gve_priv *priv,
static void gve_enable_supported_features(struct gve_priv *priv,
u32 supported_features_mask,
const struct gve_device_option_jumbo_frames
- *dev_op_jumbo_frames)
+ *dev_op_jumbo_frames,
+ const struct gve_device_option_dqo_qpl
+ *dev_op_dqo_qpl)
{
/* Before control reaches this point, the page-size-capped max MTU from
* the gve_device_descriptor field has already been stored in
@@ -699,6 +748,18 @@ static void gve_enable_supported_features(struct gve_priv *priv,
"JUMBO FRAMES device option enabled.\n");
priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu);
}
+
+ /* Override pages for qpl for DQO-QPL */
+ if (dev_op_dqo_qpl) {
+ priv->tx_pages_per_qpl =
+ be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl);
+ priv->rx_pages_per_qpl =
+ be16_to_cpu(dev_op_dqo_qpl->rx_pages_per_qpl);
+ if (priv->tx_pages_per_qpl == 0)
+ priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES;
+ if (priv->rx_pages_per_qpl == 0)
+ priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
+ }
}
int gve_adminq_describe_device(struct gve_priv *priv)
@@ -707,6 +768,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
+ struct gve_device_option_dqo_qpl *dev_op_dqo_qpl = NULL;
struct gve_device_descriptor *descriptor;
u32 supported_features_mask = 0;
union gve_adminq_command cmd;
@@ -733,13 +795,14 @@ int gve_adminq_describe_device(struct gve_priv *priv)
err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
&dev_op_gqi_qpl, &dev_op_dqo_rda,
- &dev_op_jumbo_frames);
+ &dev_op_jumbo_frames,
+ &dev_op_dqo_qpl);
if (err)
goto free_device_descriptor;
/* If the GQI_RAW_ADDRESSING option is not enabled and the queue format
* is not set to GqiRda, choose the queue format in a priority order:
- * DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
+ * DqoRda, DqoQpl, GqiRda, GqiQpl. Use GqiQpl as default.
*/
if (dev_op_dqo_rda) {
priv->queue_format = GVE_DQO_RDA_FORMAT;
@@ -747,7 +810,11 @@ int gve_adminq_describe_device(struct gve_priv *priv)
"Driver is running with DQO RDA queue format.\n");
supported_features_mask =
be32_to_cpu(dev_op_dqo_rda->supported_features_mask);
- } else if (dev_op_gqi_rda) {
+ } else if (dev_op_dqo_qpl) {
+ priv->queue_format = GVE_DQO_QPL_FORMAT;
+ supported_features_mask =
+ be32_to_cpu(dev_op_dqo_qpl->supported_features_mask);
+ } else if (dev_op_gqi_rda) {
priv->queue_format = GVE_GQI_RDA_FORMAT;
dev_info(&priv->pdev->dev,
"Driver is running with GQI RDA queue format.\n");
@@ -798,7 +865,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
gve_enable_supported_features(priv, supported_features_mask,
- dev_op_jumbo_frames);
+ dev_op_jumbo_frames, dev_op_dqo_qpl);
free_device_descriptor:
dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor,
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index f894beb3deaf..38a22279e863 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -109,6 +109,14 @@ struct gve_device_option_dqo_rda {
static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
+struct gve_device_option_dqo_qpl {
+ __be32 supported_features_mask;
+ __be16 tx_pages_per_qpl;
+ __be16 rx_pages_per_qpl;
+};
+
+static_assert(sizeof(struct gve_device_option_dqo_qpl) == 8);
+
struct gve_device_option_jumbo_frames {
__be32 supported_features_mask;
__be16 max_mtu;
@@ -130,6 +138,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_GQI_RDA = 0x2,
GVE_DEV_OPT_ID_GQI_QPL = 0x3,
GVE_DEV_OPT_ID_DQO_RDA = 0x4,
+ GVE_DEV_OPT_ID_DQO_QPL = 0x7,
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
};
@@ -139,6 +148,7 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
};
enum gve_sup_feature_mask {
diff --git a/drivers/net/ethernet/google/gve/gve_desc.h b/drivers/net/ethernet/google/gve/gve_desc.h
index f4ae9e19b844..c2874cdcf40c 100644
--- a/drivers/net/ethernet/google/gve/gve_desc.h
+++ b/drivers/net/ethernet/google/gve/gve_desc.h
@@ -105,10 +105,10 @@ union gve_rx_data_slot {
__be64 addr;
};
-/* GVE Recive Packet Descriptor Seq No */
+/* GVE Receive Packet Descriptor Seq No */
#define GVE_SEQNO(x) (be16_to_cpu(x) & 0x7)
-/* GVE Recive Packet Descriptor Flags */
+/* GVE Receive Packet Descriptor Flags */
#define GVE_RXFLG(x) cpu_to_be16(1 << (3 + (x)))
#define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */
#define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index e6f1711d9be0..5704b5f57cd0 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -31,7 +31,6 @@
// Minimum amount of time between queue kicks in msec (10 seconds)
#define MIN_TX_TIMEOUT_GAP (1000 * 10)
-#define DQO_TX_MAX 0x3FFFF
char gve_driver_name[] = "gve";
const char gve_version_str[] = GVE_VERSION;
@@ -494,7 +493,7 @@ static int gve_setup_device_resources(struct gve_priv *priv)
goto abort_with_stats_report;
}
- if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ if (!gve_is_gqi(priv)) {
priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
GFP_KERNEL);
if (!priv->ptype_lut_dqo) {
@@ -1083,11 +1082,12 @@ free_qpls:
static int gve_alloc_qpls(struct gve_priv *priv)
{
int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
+ int page_count;
int start_id;
int i, j;
int err;
- if (priv->queue_format != GVE_GQI_QPL_FORMAT)
+ if (!gve_is_qpl(priv))
return 0;
priv->qpls = kvcalloc(max_queues, sizeof(*priv->qpls), GFP_KERNEL);
@@ -1095,17 +1095,25 @@ static int gve_alloc_qpls(struct gve_priv *priv)
return -ENOMEM;
start_id = gve_tx_start_qpl_id(priv);
+ page_count = priv->tx_pages_per_qpl;
for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
err = gve_alloc_queue_page_list(priv, i,
- priv->tx_pages_per_qpl);
+ page_count);
if (err)
goto free_qpls;
}
start_id = gve_rx_start_qpl_id(priv);
+
+ /* For GQI_QPL number of pages allocated have 1:1 relationship with
+ * number of descriptors. For DQO, number of pages required are
+ * more than descriptors (because of out of order completions).
+ */
+ page_count = priv->queue_format == GVE_GQI_QPL_FORMAT ?
+ priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
err = gve_alloc_queue_page_list(priv, i,
- priv->rx_data_slot_cnt);
+ page_count);
if (err)
goto free_qpls;
}
@@ -2051,7 +2059,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
/* Big TCP is only supported on DQ*/
if (!gve_is_gqi(priv))
- netif_set_tso_max_size(priv->dev, DQO_TX_MAX);
+ netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
priv->num_registered_pages = 0;
priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index e57b73eb70f6..ea0e38b4d9e9 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -22,11 +22,13 @@ static int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
}
static void gve_free_page_dqo(struct gve_priv *priv,
- struct gve_rx_buf_state_dqo *bs)
+ struct gve_rx_buf_state_dqo *bs,
+ bool free_page)
{
page_ref_sub(bs->page_info.page, bs->page_info.pagecnt_bias - 1);
- gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr,
- DMA_FROM_DEVICE);
+ if (free_page)
+ gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr,
+ DMA_FROM_DEVICE);
bs->page_info.page = NULL;
}
@@ -130,12 +132,20 @@ gve_get_recycled_buf_state(struct gve_rx_ring *rx)
*/
for (i = 0; i < 5; i++) {
buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
- if (gve_buf_ref_cnt(buf_state) == 0)
+ if (gve_buf_ref_cnt(buf_state) == 0) {
+ rx->dqo.used_buf_states_cnt--;
return buf_state;
+ }
gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
}
+ /* For QPL, we cannot allocate any new buffers and must
+ * wait for the existing ones to be available.
+ */
+ if (rx->dqo.qpl)
+ return NULL;
+
/* If there are no free buf states discard an entry from
* `used_buf_states` so it can be used.
*/
@@ -144,23 +154,39 @@ gve_get_recycled_buf_state(struct gve_rx_ring *rx)
if (gve_buf_ref_cnt(buf_state) == 0)
return buf_state;
- gve_free_page_dqo(rx->gve, buf_state);
+ gve_free_page_dqo(rx->gve, buf_state, true);
gve_free_buf_state(rx, buf_state);
}
return NULL;
}
-static int gve_alloc_page_dqo(struct gve_priv *priv,
+static int gve_alloc_page_dqo(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
- int err;
+ struct gve_priv *priv = rx->gve;
+ u32 idx;
- err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
- &buf_state->addr, DMA_FROM_DEVICE, GFP_ATOMIC);
- if (err)
- return err;
+ if (!rx->dqo.qpl) {
+ int err;
+ err = gve_alloc_page(priv, &priv->pdev->dev,
+ &buf_state->page_info.page,
+ &buf_state->addr,
+ DMA_FROM_DEVICE, GFP_ATOMIC);
+ if (err)
+ return err;
+ } else {
+ idx = rx->dqo.next_qpl_page_idx;
+ if (idx >= priv->rx_pages_per_qpl) {
+ net_err_ratelimited("%s: Out of QPL pages\n",
+ priv->dev->name);
+ return -ENOMEM;
+ }
+ buf_state->page_info.page = rx->dqo.qpl->pages[idx];
+ buf_state->addr = rx->dqo.qpl->page_buses[idx];
+ rx->dqo.next_qpl_page_idx++;
+ }
buf_state->page_info.page_offset = 0;
buf_state->page_info.page_address =
page_address(buf_state->page_info.page);
@@ -195,9 +221,13 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
for (i = 0; i < rx->dqo.num_buf_states; i++) {
struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
-
+ /* Only free page for RDA. QPL pages are freed in gve_main. */
if (bs->page_info.page)
- gve_free_page_dqo(priv, bs);
+ gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
+ }
+ if (rx->dqo.qpl) {
+ gve_unassign_qpl(priv, rx->dqo.qpl->id);
+ rx->dqo.qpl = NULL;
}
if (rx->dqo.bufq.desc_ring) {
@@ -229,7 +259,8 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
int i;
const u32 buffer_queue_slots =
- priv->options_dqo_rda.rx_buff_ring_entries;
+ priv->queue_format == GVE_DQO_RDA_FORMAT ?
+ priv->options_dqo_rda.rx_buff_ring_entries : priv->rx_desc_cnt;
const u32 completion_queue_slots = priv->rx_desc_cnt;
netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
@@ -243,7 +274,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
rx->ctx.skb_head = NULL;
rx->ctx.skb_tail = NULL;
- rx->dqo.num_buf_states = min_t(s16, S16_MAX, buffer_queue_slots * 4);
+ rx->dqo.num_buf_states = priv->queue_format == GVE_DQO_RDA_FORMAT ?
+ min_t(s16, S16_MAX, buffer_queue_slots * 4) :
+ priv->rx_pages_per_qpl;
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
sizeof(rx->dqo.buf_states[0]),
GFP_KERNEL);
@@ -275,6 +308,13 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
if (!rx->dqo.bufq.desc_ring)
goto err;
+ if (priv->queue_format != GVE_DQO_RDA_FORMAT) {
+ rx->dqo.qpl = gve_assign_rx_qpl(priv, rx->q_num);
+ if (!rx->dqo.qpl)
+ goto err;
+ rx->dqo.next_qpl_page_idx = 0;
+ }
+
rx->q_resources = dma_alloc_coherent(hdev, sizeof(*rx->q_resources),
&rx->q_resources_bus, GFP_KERNEL);
if (!rx->q_resources)
@@ -352,7 +392,7 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
if (unlikely(!buf_state))
break;
- if (unlikely(gve_alloc_page_dqo(priv, buf_state))) {
+ if (unlikely(gve_alloc_page_dqo(rx, buf_state))) {
u64_stats_update_begin(&rx->statss);
rx->rx_buf_alloc_fail++;
u64_stats_update_end(&rx->statss);
@@ -415,6 +455,7 @@ static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
mark_used:
gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
+ rx->dqo.used_buf_states_cnt++;
}
static void gve_rx_skb_csum(struct sk_buff *skb,
@@ -475,6 +516,43 @@ static void gve_rx_free_skb(struct gve_rx_ring *rx)
rx->ctx.skb_tail = NULL;
}
+static bool gve_rx_should_trigger_copy_ondemand(struct gve_rx_ring *rx)
+{
+ if (!rx->dqo.qpl)
+ return false;
+ if (rx->dqo.used_buf_states_cnt <
+ (rx->dqo.num_buf_states -
+ GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD))
+ return false;
+ return true;
+}
+
+static int gve_rx_copy_ondemand(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state,
+ u16 buf_len)
+{
+ struct page *page = alloc_page(GFP_ATOMIC);
+ int num_frags;
+
+ if (!page)
+ return -ENOMEM;
+
+ memcpy(page_address(page),
+ buf_state->page_info.page_address +
+ buf_state->page_info.page_offset,
+ buf_len);
+ num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags;
+ skb_add_rx_frag(rx->ctx.skb_tail, num_frags, page,
+ 0, buf_len, PAGE_SIZE);
+
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_frag_alloc_cnt++;
+ u64_stats_update_end(&rx->statss);
+ /* Return unused buffer. */
+ gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
+ return 0;
+}
+
/* Chains multi skbs for single rx packet.
* Returns 0 if buffer is appended, -1 otherwise.
*/
@@ -502,12 +580,20 @@ static int gve_rx_append_frags(struct napi_struct *napi,
rx->ctx.skb_head->truesize += priv->data_buffer_size_dqo;
}
+ /* Trigger ondemand page allocation if we are running low on buffers */
+ if (gve_rx_should_trigger_copy_ondemand(rx))
+ return gve_rx_copy_ondemand(rx, buf_state, buf_len);
+
skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
buf_state->page_info.page,
buf_state->page_info.page_offset,
buf_len, priv->data_buffer_size_dqo);
gve_dec_pagecnt_bias(&buf_state->page_info);
+ /* Advances buffer page-offset if page is partially used.
+ * Marks buffer as used if page is full.
+ */
+ gve_try_recycle_buf(priv, rx, buf_state);
return 0;
}
@@ -561,8 +647,6 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
priv)) != 0) {
goto error;
}
-
- gve_try_recycle_buf(priv, rx, buf_state);
return 0;
}
@@ -588,6 +672,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
goto error;
rx->ctx.skb_tail = rx->ctx.skb_head;
+ if (gve_rx_should_trigger_copy_ondemand(rx)) {
+ if (gve_rx_copy_ondemand(rx, buf_state, buf_len) < 0)
+ goto error;
+ return 0;
+ }
+
skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
buf_state->page_info.page_offset, buf_len,
priv->data_buffer_size_dqo);
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 3c09e66ba1ab..1e19b834a613 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -13,6 +13,89 @@
#include <linux/slab.h>
#include <linux/skbuff.h>
+/* Returns true if tx_bufs are available. */
+static bool gve_has_free_tx_qpl_bufs(struct gve_tx_ring *tx, int count)
+{
+ int num_avail;
+
+ if (!tx->dqo.qpl)
+ return true;
+
+ num_avail = tx->dqo.num_tx_qpl_bufs -
+ (tx->dqo_tx.alloc_tx_qpl_buf_cnt -
+ tx->dqo_tx.free_tx_qpl_buf_cnt);
+
+ if (count <= num_avail)
+ return true;
+
+ /* Update cached value from dqo_compl. */
+ tx->dqo_tx.free_tx_qpl_buf_cnt =
+ atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_cnt);
+
+ num_avail = tx->dqo.num_tx_qpl_bufs -
+ (tx->dqo_tx.alloc_tx_qpl_buf_cnt -
+ tx->dqo_tx.free_tx_qpl_buf_cnt);
+
+ return count <= num_avail;
+}
+
+static s16
+gve_alloc_tx_qpl_buf(struct gve_tx_ring *tx)
+{
+ s16 index;
+
+ index = tx->dqo_tx.free_tx_qpl_buf_head;
+
+ /* No TX buffers available, try to steal the list from the
+ * completion handler.
+ */
+ if (unlikely(index == -1)) {
+ tx->dqo_tx.free_tx_qpl_buf_head =
+ atomic_xchg(&tx->dqo_compl.free_tx_qpl_buf_head, -1);
+ index = tx->dqo_tx.free_tx_qpl_buf_head;
+
+ if (unlikely(index == -1))
+ return index;
+ }
+
+ /* Remove TX buf from free list */
+ tx->dqo_tx.free_tx_qpl_buf_head = tx->dqo.tx_qpl_buf_next[index];
+
+ return index;
+}
+
+static void
+gve_free_tx_qpl_bufs(struct gve_tx_ring *tx,
+ struct gve_tx_pending_packet_dqo *pkt)
+{
+ s16 index;
+ int i;
+
+ if (!pkt->num_bufs)
+ return;
+
+ index = pkt->tx_qpl_buf_ids[0];
+ /* Create a linked list of buffers to be added to the free list */
+ for (i = 1; i < pkt->num_bufs; i++) {
+ tx->dqo.tx_qpl_buf_next[index] = pkt->tx_qpl_buf_ids[i];
+ index = pkt->tx_qpl_buf_ids[i];
+ }
+
+ while (true) {
+ s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_head);
+
+ tx->dqo.tx_qpl_buf_next[index] = old_head;
+ if (atomic_cmpxchg(&tx->dqo_compl.free_tx_qpl_buf_head,
+ old_head,
+ pkt->tx_qpl_buf_ids[0]) == old_head) {
+ break;
+ }
+ }
+
+ atomic_add(pkt->num_bufs, &tx->dqo_compl.free_tx_qpl_buf_cnt);
+ pkt->num_bufs = 0;
+}
+
/* Returns true if a gve_tx_pending_packet_dqo object is available. */
static bool gve_has_pending_packet(struct gve_tx_ring *tx)
{
@@ -136,9 +219,40 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx)
kvfree(tx->dqo.pending_packets);
tx->dqo.pending_packets = NULL;
+ kvfree(tx->dqo.tx_qpl_buf_next);
+ tx->dqo.tx_qpl_buf_next = NULL;
+
+ if (tx->dqo.qpl) {
+ gve_unassign_qpl(priv, tx->dqo.qpl->id);
+ tx->dqo.qpl = NULL;
+ }
+
netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
}
+static int gve_tx_qpl_buf_init(struct gve_tx_ring *tx)
+{
+ int num_tx_qpl_bufs = GVE_TX_BUFS_PER_PAGE_DQO *
+ tx->dqo.qpl->num_entries;
+ int i;
+
+ tx->dqo.tx_qpl_buf_next = kvcalloc(num_tx_qpl_bufs,
+ sizeof(tx->dqo.tx_qpl_buf_next[0]),
+ GFP_KERNEL);
+ if (!tx->dqo.tx_qpl_buf_next)
+ return -ENOMEM;
+
+ tx->dqo.num_tx_qpl_bufs = num_tx_qpl_bufs;
+
+ /* Generate free TX buf list */
+ for (i = 0; i < num_tx_qpl_bufs - 1; i++)
+ tx->dqo.tx_qpl_buf_next[i] = i + 1;
+ tx->dqo.tx_qpl_buf_next[num_tx_qpl_bufs - 1] = -1;
+
+ atomic_set_release(&tx->dqo_compl.free_tx_qpl_buf_head, -1);
+ return 0;
+}
+
static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
{
struct gve_tx_ring *tx = &priv->tx[idx];
@@ -155,7 +269,9 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
/* Queue sizes must be a power of 2 */
tx->mask = priv->tx_desc_cnt - 1;
- tx->dqo.complq_mask = priv->options_dqo_rda.tx_comp_ring_entries - 1;
+ tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ?
+ priv->options_dqo_rda.tx_comp_ring_entries - 1 :
+ tx->mask;
/* The max number of pending packets determines the maximum number of
* descriptors which maybe written to the completion queue.
@@ -211,6 +327,15 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
if (!tx->q_resources)
goto err;
+ if (gve_is_qpl(priv)) {
+ tx->dqo.qpl = gve_assign_tx_qpl(priv, idx);
+ if (!tx->dqo.qpl)
+ goto err;
+
+ if (gve_tx_qpl_buf_init(tx))
+ goto err;
+ }
+
gve_tx_add_to_block(priv, idx);
return 0;
@@ -267,20 +392,27 @@ static u32 num_avail_tx_slots(const struct gve_tx_ring *tx)
return tx->mask - num_used;
}
+static bool gve_has_avail_slots_tx_dqo(struct gve_tx_ring *tx,
+ int desc_count, int buf_count)
+{
+ return gve_has_pending_packet(tx) &&
+ num_avail_tx_slots(tx) >= desc_count &&
+ gve_has_free_tx_qpl_bufs(tx, buf_count);
+}
+
/* Stops the queue if available descriptors is less than 'count'.
* Return: 0 if stop is not required.
*/
-static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, int count)
+static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx,
+ int desc_count, int buf_count)
{
- if (likely(gve_has_pending_packet(tx) &&
- num_avail_tx_slots(tx) >= count))
+ if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
return 0;
/* Update cached TX head pointer */
tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
- if (likely(gve_has_pending_packet(tx) &&
- num_avail_tx_slots(tx) >= count))
+ if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
return 0;
/* No space, so stop the queue */
@@ -295,8 +427,7 @@ static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, int count)
*/
tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
- if (likely(!gve_has_pending_packet(tx) ||
- num_avail_tx_slots(tx) < count))
+ if (likely(!gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
return -EBUSY;
netif_tx_start_queue(tx->netdev_txq);
@@ -444,44 +575,16 @@ gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc,
};
}
-/* Returns 0 on success, or < 0 on error.
- *
- * Before this function is called, the caller must ensure
- * gve_has_pending_packet(tx) returns true.
- */
static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct gve_tx_pending_packet_dqo *pkt,
+ s16 completion_tag,
+ u32 *desc_idx,
+ bool is_gso)
{
const struct skb_shared_info *shinfo = skb_shinfo(skb);
- const bool is_gso = skb_is_gso(skb);
- u32 desc_idx = tx->dqo_tx.tail;
-
- struct gve_tx_pending_packet_dqo *pkt;
- struct gve_tx_metadata_dqo metadata;
- s16 completion_tag;
int i;
- pkt = gve_alloc_pending_packet(tx);
- pkt->skb = skb;
- pkt->num_bufs = 0;
- completion_tag = pkt - tx->dqo.pending_packets;
-
- gve_extract_tx_metadata_dqo(skb, &metadata);
- if (is_gso) {
- int header_len = gve_prep_tso(skb);
-
- if (unlikely(header_len < 0))
- goto err;
-
- gve_tx_fill_tso_ctx_desc(&tx->dqo.tx_ring[desc_idx].tso_ctx,
- skb, &metadata, header_len);
- desc_idx = (desc_idx + 1) & tx->mask;
- }
-
- gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx,
- &metadata);
- desc_idx = (desc_idx + 1) & tx->mask;
-
/* Note: HW requires that the size of a non-TSO packet be within the
* range of [17, 9728].
*
@@ -490,6 +593,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
* - Hypervisor won't allow MTU larger than 9216.
*/
+ pkt->num_bufs = 0;
/* Map the linear portion of skb */
{
u32 len = skb_headlen(skb);
@@ -503,7 +607,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
++pkt->num_bufs;
- gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
completion_tag,
/*eop=*/shinfo->nr_frags == 0, is_gso);
}
@@ -522,10 +626,139 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
++pkt->num_bufs;
- gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
completion_tag, is_eop, is_gso);
}
+ return 0;
+err:
+ for (i = 0; i < pkt->num_bufs; i++) {
+ if (i == 0) {
+ dma_unmap_single(tx->dev,
+ dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]),
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(tx->dev,
+ dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]),
+ DMA_TO_DEVICE);
+ }
+ }
+ pkt->num_bufs = 0;
+ return -1;
+}
+
+/* Tx buffer i corresponds to
+ * qpl_page_id = i / GVE_TX_BUFS_PER_PAGE_DQO
+ * qpl_page_offset = (i % GVE_TX_BUFS_PER_PAGE_DQO) * GVE_TX_BUF_SIZE_DQO
+ */
+static void gve_tx_buf_get_addr(struct gve_tx_ring *tx,
+ s16 index,
+ void **va, dma_addr_t *dma_addr)
+{
+ int page_id = index >> (PAGE_SHIFT - GVE_TX_BUF_SHIFT_DQO);
+ int offset = (index & (GVE_TX_BUFS_PER_PAGE_DQO - 1)) << GVE_TX_BUF_SHIFT_DQO;
+
+ *va = page_address(tx->dqo.qpl->pages[page_id]) + offset;
+ *dma_addr = tx->dqo.qpl->page_buses[page_id] + offset;
+}
+
+static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx,
+ struct sk_buff *skb,
+ struct gve_tx_pending_packet_dqo *pkt,
+ s16 completion_tag,
+ u32 *desc_idx,
+ bool is_gso)
+{
+ u32 copy_offset = 0;
+ dma_addr_t dma_addr;
+ u32 copy_len;
+ s16 index;
+ void *va;
+
+ /* Break the packet into buffer size chunks */
+ pkt->num_bufs = 0;
+ while (copy_offset < skb->len) {
+ index = gve_alloc_tx_qpl_buf(tx);
+ if (unlikely(index == -1))
+ goto err;
+
+ gve_tx_buf_get_addr(tx, index, &va, &dma_addr);
+ copy_len = min_t(u32, GVE_TX_BUF_SIZE_DQO,
+ skb->len - copy_offset);
+ skb_copy_bits(skb, copy_offset, va, copy_len);
+
+ copy_offset += copy_len;
+ dma_sync_single_for_device(tx->dev, dma_addr,
+ copy_len, DMA_TO_DEVICE);
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb,
+ copy_len,
+ dma_addr,
+ completion_tag,
+ copy_offset == skb->len,
+ is_gso);
+
+ pkt->tx_qpl_buf_ids[pkt->num_bufs] = index;
+ ++tx->dqo_tx.alloc_tx_qpl_buf_cnt;
+ ++pkt->num_bufs;
+ }
+
+ return 0;
+err:
+ /* Should not be here if gve_has_free_tx_qpl_bufs() check is correct */
+ gve_free_tx_qpl_bufs(tx, pkt);
+ return -ENOMEM;
+}
+
+/* Returns 0 on success, or < 0 on error.
+ *
+ * Before this function is called, the caller must ensure
+ * gve_has_pending_packet(tx) returns true.
+ */
+static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
+ struct sk_buff *skb)
+{
+ const bool is_gso = skb_is_gso(skb);
+ u32 desc_idx = tx->dqo_tx.tail;
+ struct gve_tx_pending_packet_dqo *pkt;
+ struct gve_tx_metadata_dqo metadata;
+ s16 completion_tag;
+
+ pkt = gve_alloc_pending_packet(tx);
+ pkt->skb = skb;
+ completion_tag = pkt - tx->dqo.pending_packets;
+
+ gve_extract_tx_metadata_dqo(skb, &metadata);
+ if (is_gso) {
+ int header_len = gve_prep_tso(skb);
+
+ if (unlikely(header_len < 0))
+ goto err;
+
+ gve_tx_fill_tso_ctx_desc(&tx->dqo.tx_ring[desc_idx].tso_ctx,
+ skb, &metadata, header_len);
+ desc_idx = (desc_idx + 1) & tx->mask;
+ }
+
+ gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx,
+ &metadata);
+ desc_idx = (desc_idx + 1) & tx->mask;
+
+ if (tx->dqo.qpl) {
+ if (gve_tx_add_skb_copy_dqo(tx, skb, pkt,
+ completion_tag,
+ &desc_idx, is_gso))
+ goto err;
+ } else {
+ if (gve_tx_add_skb_no_copy_dqo(tx, skb, pkt,
+ completion_tag,
+ &desc_idx, is_gso))
+ goto err;
+ }
+
+ tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs;
+
/* Commit the changes to our state */
tx->dqo_tx.tail = desc_idx;
@@ -547,22 +780,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
return 0;
err:
- for (i = 0; i < pkt->num_bufs; i++) {
- if (i == 0) {
- dma_unmap_single(tx->dev,
- dma_unmap_addr(pkt, dma[i]),
- dma_unmap_len(pkt, len[i]),
- DMA_TO_DEVICE);
- } else {
- dma_unmap_page(tx->dev,
- dma_unmap_addr(pkt, dma[i]),
- dma_unmap_len(pkt, len[i]),
- DMA_TO_DEVICE);
- }
- }
-
pkt->skb = NULL;
- pkt->num_bufs = 0;
gve_free_pending_packet(tx, pkt);
return -1;
@@ -636,40 +854,56 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
int num_buffer_descs;
int total_num_descs;
- if (skb_is_gso(skb)) {
- /* If TSO doesn't meet HW requirements, attempt to linearize the
- * packet.
- */
- if (unlikely(!gve_can_send_tso(skb) &&
- skb_linearize(skb) < 0)) {
- net_err_ratelimited("%s: Failed to transmit TSO packet\n",
- priv->dev->name);
- goto drop;
- }
-
- if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
- goto drop;
+ if (tx->dqo.qpl) {
+ if (skb_is_gso(skb))
+ if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
+ goto drop;
- num_buffer_descs = gve_num_buffer_descs_needed(skb);
+ /* We do not need to verify the number of buffers used per
+ * packet or per segment in case of TSO as with 2K size buffers
+ * none of the TX packet rules would be violated.
+ *
+ * gve_can_send_tso() checks that each TCP segment of gso_size is
+ * not distributed over more than 9 SKB frags..
+ */
+ num_buffer_descs = DIV_ROUND_UP(skb->len, GVE_TX_BUF_SIZE_DQO);
} else {
- num_buffer_descs = gve_num_buffer_descs_needed(skb);
+ if (skb_is_gso(skb)) {
+ /* If TSO doesn't meet HW requirements, attempt to linearize the
+ * packet.
+ */
+ if (unlikely(!gve_can_send_tso(skb) &&
+ skb_linearize(skb) < 0)) {
+ net_err_ratelimited("%s: Failed to transmit TSO packet\n",
+ priv->dev->name);
+ goto drop;
+ }
- if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) {
- if (unlikely(skb_linearize(skb) < 0))
+ if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
goto drop;
- num_buffer_descs = 1;
+ num_buffer_descs = gve_num_buffer_descs_needed(skb);
+ } else {
+ num_buffer_descs = gve_num_buffer_descs_needed(skb);
+
+ if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) {
+ if (unlikely(skb_linearize(skb) < 0))
+ goto drop;
+
+ num_buffer_descs = 1;
+ }
}
}
/* Metadata + (optional TSO) + data descriptors. */
total_num_descs = 1 + skb_is_gso(skb) + num_buffer_descs;
if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs +
- GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP))) {
+ GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP,
+ num_buffer_descs))) {
return -1;
}
- if (unlikely(gve_tx_add_skb_no_copy_dqo(tx, skb) < 0))
+ if (unlikely(gve_tx_add_skb_dqo(tx, skb) < 0))
goto drop;
netdev_tx_sent_queue(tx->netdev_txq, skb->len);
@@ -817,7 +1051,11 @@ static void gve_handle_packet_completion(struct gve_priv *priv,
return;
}
}
- gve_unmap_packet(tx->dev, pending_packet);
+ tx->dqo_tx.completed_packet_desc_cnt += pending_packet->num_bufs;
+ if (tx->dqo.qpl)
+ gve_free_tx_qpl_bufs(tx, pending_packet);
+ else
+ gve_unmap_packet(tx->dev, pending_packet);
*bytes += pending_packet->skb->len;
(*pkts)++;
@@ -875,12 +1113,16 @@ static void remove_miss_completions(struct gve_priv *priv,
remove_from_list(tx, &tx->dqo_compl.miss_completions,
pending_packet);
- /* Unmap buffers and free skb but do not unallocate packet i.e.
+ /* Unmap/free TX buffers and free skb but do not unallocate packet i.e.
* the completion tag is not freed to ensure that the driver
* can take appropriate action if a corresponding valid
* completion is received later.
*/
- gve_unmap_packet(tx->dev, pending_packet);
+ if (tx->dqo.qpl)
+ gve_free_tx_qpl_bufs(tx, pending_packet);
+ else
+ gve_unmap_packet(tx->dev, pending_packet);
+
/* This indicates the packet was dropped. */
dev_kfree_skb_any(pending_packet->skb);
pending_packet->skb = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 50c3f5d6611f..ecf92a5d56bb 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -960,8 +960,8 @@ static int hip04_mac_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- ret = -EINVAL;
+ if (irq < 0) {
+ ret = irq;
goto init_fail;
}
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index ce2571c16e43..cb7b0293fe85 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -862,8 +862,8 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
goto out_disconnect_phy;
ndev->irq = platform_get_irq(pdev, 0);
- if (ndev->irq <= 0) {
- ret = -ENODEV;
+ if (ndev->irq < 0) {
+ ret = ndev->irq;
goto out_disconnect_phy;
}
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index f867e9531117..26d22bb04b87 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1206,9 +1206,8 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
}
ndev->irq = platform_get_irq(pdev, 0);
- if (ndev->irq <= 0) {
- netdev_err(ndev, "No irq resource\n");
- ret = -EINVAL;
+ if (ndev->irq < 0) {
+ ret = ndev->irq;
goto out_phy_node;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index e3bb05959ba9..edf0bcf76ac9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -422,7 +422,6 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
struct mac_params *mac_param);
int hns_mac_init(struct dsaf_device *dsaf_dev);
-void mac_adjust_link(struct net_device *net_dev);
bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status);
int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index a7eb87da4e70..a08d1f0a5a16 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -9,9 +9,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include "hns_dsaf_ppe.h"
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index e2ff3ca198d1..93344563a259 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -11,10 +11,6 @@
#include <linux/etherdevice.h>
#include <asm/cacheflush.h>
#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
#include <linux/spinlock.h>
#include "hns_dsaf_main.h"
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index 6efea4662858..e214bfaece1f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -17,11 +17,11 @@ hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
-hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o \
+hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o hns3vf/hclgevf_regs.o \
hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o \
+hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o hns3pf/hclge_regs.o \
hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \
hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 514a20bce4f4..a4b43bcd2f0c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -382,6 +382,7 @@ struct hnae3_dev_specs {
u16 umv_size;
u16 mc_mac_size;
u32 mac_stats_num;
+ u8 tnl_num;
};
struct hnae3_client_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index b7b51e56b030..eac2d0573241 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -18,6 +18,7 @@
#include <net/gre.h>
#include <net/gro.h>
#include <net/ip6_checksum.h>
+#include <net/page_pool/helpers.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
#include <net/tcp.h>
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 88af34bbee34..acd756b0c7c9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -6,7 +6,7 @@
#include <linux/dim.h>
#include <linux/if_vlan.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
#include <asm/barrier.h>
#include "hnae3.h"
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 407d30ee55d2..36858a72d771 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -569,8 +569,8 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
{
- struct hns3_nic_priv *nic_priv = (struct hns3_nic_priv *)handle->priv;
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hns3_nic_priv *nic_priv = handle->priv;
struct hns3_enet_ring *ring;
u8 *stat;
int i, j;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 91c173f40701..4d15eb73b972 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -826,7 +826,9 @@ struct hclge_dev_specs_1_cmd {
u8 rsv0[2];
__le16 umv_size;
__le16 mc_mac_size;
- u8 rsv1[12];
+ u8 rsv1[6];
+ u8 tnl_num;
+ u8 rsv2[5];
};
/* mac speed type defined in firmware command */
@@ -886,8 +888,4 @@ struct hclge_query_wol_supported_cmd {
struct hclge_hw;
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num);
-enum hclge_comm_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
- struct hclge_desc *desc);
-enum hclge_comm_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
- struct hclge_desc *desc);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 0fb2eaee3e8a..f01a7a9ee02c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -7,6 +7,7 @@
#include "hclge_debugfs.h"
#include "hclge_err.h"
#include "hclge_main.h"
+#include "hclge_regs.h"
#include "hclge_tm.h"
#include "hnae3.h"
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index a940e35aef29..0f50dba6cc47 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -20,6 +20,7 @@
#include "hclge_main.h"
#include "hclge_mbx.h"
#include "hclge_mdio.h"
+#include "hclge_regs.h"
#include "hclge_tm.h"
#include "hclge_err.h"
#include "hnae3.h"
@@ -40,20 +41,6 @@
#define HCLGE_PF_RESET_SYNC_TIME 20
#define HCLGE_PF_RESET_SYNC_CNT 1500
-/* Get DFX BD number offset */
-#define HCLGE_DFX_BIOS_BD_OFFSET 1
-#define HCLGE_DFX_SSU_0_BD_OFFSET 2
-#define HCLGE_DFX_SSU_1_BD_OFFSET 3
-#define HCLGE_DFX_IGU_BD_OFFSET 4
-#define HCLGE_DFX_RPU_0_BD_OFFSET 5
-#define HCLGE_DFX_RPU_1_BD_OFFSET 6
-#define HCLGE_DFX_NCSI_BD_OFFSET 7
-#define HCLGE_DFX_RTC_BD_OFFSET 8
-#define HCLGE_DFX_PPP_BD_OFFSET 9
-#define HCLGE_DFX_RCB_BD_OFFSET 10
-#define HCLGE_DFX_TQP_BD_OFFSET 11
-#define HCLGE_DFX_SSU_2_BD_OFFSET 12
-
#define HCLGE_LINK_STATUS_MS 10
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
@@ -94,62 +81,6 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
-static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
- HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
- HCLGE_COMM_NIC_CSQ_DEPTH_REG,
- HCLGE_COMM_NIC_CSQ_TAIL_REG,
- HCLGE_COMM_NIC_CSQ_HEAD_REG,
- HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
- HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
- HCLGE_COMM_NIC_CRQ_DEPTH_REG,
- HCLGE_COMM_NIC_CRQ_TAIL_REG,
- HCLGE_COMM_NIC_CRQ_HEAD_REG,
- HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
- HCLGE_COMM_CMDQ_INTR_STS_REG,
- HCLGE_COMM_CMDQ_INTR_EN_REG,
- HCLGE_COMM_CMDQ_INTR_GEN_REG};
-
-static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
- HCLGE_PF_OTHER_INT_REG,
- HCLGE_MISC_RESET_STS_REG,
- HCLGE_MISC_VECTOR_INT_STS,
- HCLGE_GLOBAL_RESET_REG,
- HCLGE_FUN_RST_ING,
- HCLGE_GRO_EN_REG};
-
-static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
- HCLGE_RING_RX_ADDR_H_REG,
- HCLGE_RING_RX_BD_NUM_REG,
- HCLGE_RING_RX_BD_LENGTH_REG,
- HCLGE_RING_RX_MERGE_EN_REG,
- HCLGE_RING_RX_TAIL_REG,
- HCLGE_RING_RX_HEAD_REG,
- HCLGE_RING_RX_FBD_NUM_REG,
- HCLGE_RING_RX_OFFSET_REG,
- HCLGE_RING_RX_FBD_OFFSET_REG,
- HCLGE_RING_RX_STASH_REG,
- HCLGE_RING_RX_BD_ERR_REG,
- HCLGE_RING_TX_ADDR_L_REG,
- HCLGE_RING_TX_ADDR_H_REG,
- HCLGE_RING_TX_BD_NUM_REG,
- HCLGE_RING_TX_PRIORITY_REG,
- HCLGE_RING_TX_TC_REG,
- HCLGE_RING_TX_MERGE_EN_REG,
- HCLGE_RING_TX_TAIL_REG,
- HCLGE_RING_TX_HEAD_REG,
- HCLGE_RING_TX_FBD_NUM_REG,
- HCLGE_RING_TX_OFFSET_REG,
- HCLGE_RING_TX_EBD_NUM_REG,
- HCLGE_RING_TX_EBD_OFFSET_REG,
- HCLGE_RING_TX_BD_ERR_REG,
- HCLGE_RING_EN_REG};
-
-static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
- HCLGE_TQP_INTR_GL0_REG,
- HCLGE_TQP_INTR_GL1_REG,
- HCLGE_TQP_INTR_GL2_REG,
- HCLGE_TQP_INTR_RL_REG};
-
static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
"External Loopback test",
"App Loopback test",
@@ -375,36 +306,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
},
};
-static const u32 hclge_dfx_bd_offset_list[] = {
- HCLGE_DFX_BIOS_BD_OFFSET,
- HCLGE_DFX_SSU_0_BD_OFFSET,
- HCLGE_DFX_SSU_1_BD_OFFSET,
- HCLGE_DFX_IGU_BD_OFFSET,
- HCLGE_DFX_RPU_0_BD_OFFSET,
- HCLGE_DFX_RPU_1_BD_OFFSET,
- HCLGE_DFX_NCSI_BD_OFFSET,
- HCLGE_DFX_RTC_BD_OFFSET,
- HCLGE_DFX_PPP_BD_OFFSET,
- HCLGE_DFX_RCB_BD_OFFSET,
- HCLGE_DFX_TQP_BD_OFFSET,
- HCLGE_DFX_SSU_2_BD_OFFSET
-};
-
-static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
- HCLGE_OPC_DFX_BIOS_COMMON_REG,
- HCLGE_OPC_DFX_SSU_REG_0,
- HCLGE_OPC_DFX_SSU_REG_1,
- HCLGE_OPC_DFX_IGU_EGU_REG,
- HCLGE_OPC_DFX_RPU_REG_0,
- HCLGE_OPC_DFX_RPU_REG_1,
- HCLGE_OPC_DFX_NCSI_REG,
- HCLGE_OPC_DFX_RTC_REG,
- HCLGE_OPC_DFX_PPP_REG,
- HCLGE_OPC_DFX_RCB_REG,
- HCLGE_OPC_DFX_TQP_REG,
- HCLGE_OPC_DFX_SSU_REG_2
-};
-
static const struct key_info meta_data_key_info[] = {
{ PACKET_TYPE_ID, 6 },
{ IP_FRAGEMENT, 1 },
@@ -1425,6 +1326,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
+ ae_dev->dev_specs.tnl_num = 0;
}
static void hclge_parse_dev_specs(struct hclge_dev *hdev,
@@ -1448,6 +1350,7 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
+ ae_dev->dev_specs.tnl_num = req1->tnl_num;
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@ -7317,14 +7220,14 @@ static int hclge_parse_cls_flower(struct hclge_dev *hdev,
struct flow_dissector *dissector = flow->match.dissector;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_VLAN) |
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_PORTS))) {
- dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) {
+ dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
@@ -12383,463 +12286,6 @@ out:
return ret;
}
-static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
- u32 *regs_num_64_bit)
-{
- struct hclge_desc desc;
- u32 total_num;
- int ret;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query register number cmd failed, ret = %d.\n", ret);
- return ret;
- }
-
- *regs_num_32_bit = le32_to_cpu(desc.data[0]);
- *regs_num_64_bit = le32_to_cpu(desc.data[1]);
-
- total_num = *regs_num_32_bit + *regs_num_64_bit;
- if (!total_num)
- return -EINVAL;
-
- return 0;
-}
-
-static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
- void *data)
-{
-#define HCLGE_32_BIT_REG_RTN_DATANUM 8
-#define HCLGE_32_BIT_DESC_NODATA_LEN 2
-
- struct hclge_desc *desc;
- u32 *reg_val = data;
- __le32 *desc_data;
- int nodata_num;
- int cmd_num;
- int i, k, n;
- int ret;
-
- if (regs_num == 0)
- return 0;
-
- nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
- cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
- HCLGE_32_BIT_REG_RTN_DATANUM);
- desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
- ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query 32 bit register cmd failed, ret = %d.\n", ret);
- kfree(desc);
- return ret;
- }
-
- for (i = 0; i < cmd_num; i++) {
- if (i == 0) {
- desc_data = (__le32 *)(&desc[i].data[0]);
- n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
- } else {
- desc_data = (__le32 *)(&desc[i]);
- n = HCLGE_32_BIT_REG_RTN_DATANUM;
- }
- for (k = 0; k < n; k++) {
- *reg_val++ = le32_to_cpu(*desc_data++);
-
- regs_num--;
- if (!regs_num)
- break;
- }
- }
-
- kfree(desc);
- return 0;
-}
-
-static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
- void *data)
-{
-#define HCLGE_64_BIT_REG_RTN_DATANUM 4
-#define HCLGE_64_BIT_DESC_NODATA_LEN 1
-
- struct hclge_desc *desc;
- u64 *reg_val = data;
- __le64 *desc_data;
- int nodata_len;
- int cmd_num;
- int i, k, n;
- int ret;
-
- if (regs_num == 0)
- return 0;
-
- nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
- cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
- HCLGE_64_BIT_REG_RTN_DATANUM);
- desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
- ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query 64 bit register cmd failed, ret = %d.\n", ret);
- kfree(desc);
- return ret;
- }
-
- for (i = 0; i < cmd_num; i++) {
- if (i == 0) {
- desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
- } else {
- desc_data = (__le64 *)(&desc[i]);
- n = HCLGE_64_BIT_REG_RTN_DATANUM;
- }
- for (k = 0; k < n; k++) {
- *reg_val++ = le64_to_cpu(*desc_data++);
-
- regs_num--;
- if (!regs_num)
- break;
- }
- }
-
- kfree(desc);
- return 0;
-}
-
-#define MAX_SEPARATE_NUM 4
-#define SEPARATOR_VALUE 0xFDFCFBFA
-#define REG_NUM_PER_LINE 4
-#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
-#define REG_SEPARATOR_LINE 1
-#define REG_NUM_REMAIN_MASK 3
-
-int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
-{
- int i;
-
- /* initialize command BD except the last one */
- for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
- hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
- true);
- desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
- }
-
- /* initialize the last command BD */
- hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
-
- return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
-}
-
-static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
- int *bd_num_list,
- u32 type_num)
-{
- u32 entries_per_desc, desc_index, index, offset, i;
- struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
- int ret;
-
- ret = hclge_query_bd_num_cmd_send(hdev, desc);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get dfx bd num fail, status is %d.\n", ret);
- return ret;
- }
-
- entries_per_desc = ARRAY_SIZE(desc[0].data);
- for (i = 0; i < type_num; i++) {
- offset = hclge_dfx_bd_offset_list[i];
- index = offset % entries_per_desc;
- desc_index = offset / entries_per_desc;
- bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
- }
-
- return ret;
-}
-
-static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
- struct hclge_desc *desc_src, int bd_num,
- enum hclge_opcode_type cmd)
-{
- struct hclge_desc *desc = desc_src;
- int i, ret;
-
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- for (i = 0; i < bd_num - 1; i++) {
- desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
- desc++;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- }
-
- desc = desc_src;
- ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
- cmd, ret);
-
- return ret;
-}
-
-static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
- void *data)
-{
- int entries_per_desc, reg_num, separator_num, desc_index, index, i;
- struct hclge_desc *desc = desc_src;
- u32 *reg = data;
-
- entries_per_desc = ARRAY_SIZE(desc->data);
- reg_num = entries_per_desc * bd_num;
- separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
- for (i = 0; i < reg_num; i++) {
- index = i % entries_per_desc;
- desc_index = i / entries_per_desc;
- *reg++ = le32_to_cpu(desc[desc_index].data[index]);
- }
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
-
- return reg_num + separator_num;
-}
-
-static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
-{
- u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
- int data_len_per_desc, bd_num, i;
- int *bd_num_list;
- u32 data_len;
- int ret;
-
- bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
- if (!bd_num_list)
- return -ENOMEM;
-
- ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get dfx reg bd num fail, status is %d.\n", ret);
- goto out;
- }
-
- data_len_per_desc = sizeof_field(struct hclge_desc, data);
- *len = 0;
- for (i = 0; i < dfx_reg_type_num; i++) {
- bd_num = bd_num_list[i];
- data_len = data_len_per_desc * bd_num;
- *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
- }
-
-out:
- kfree(bd_num_list);
- return ret;
-}
-
-static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
-{
- u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
- int bd_num, bd_num_max, buf_len, i;
- struct hclge_desc *desc_src;
- int *bd_num_list;
- u32 *reg = data;
- int ret;
-
- bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
- if (!bd_num_list)
- return -ENOMEM;
-
- ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get dfx reg bd num fail, status is %d.\n", ret);
- goto out;
- }
-
- bd_num_max = bd_num_list[0];
- for (i = 1; i < dfx_reg_type_num; i++)
- bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
-
- buf_len = sizeof(*desc_src) * bd_num_max;
- desc_src = kzalloc(buf_len, GFP_KERNEL);
- if (!desc_src) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < dfx_reg_type_num; i++) {
- bd_num = bd_num_list[i];
- ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
- hclge_dfx_reg_opcode_list[i]);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get dfx reg fail, status is %d.\n", ret);
- break;
- }
-
- reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
- }
-
- kfree(desc_src);
-out:
- kfree(bd_num_list);
- return ret;
-}
-
-static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
- struct hnae3_knic_private_info *kinfo)
-{
-#define HCLGE_RING_REG_OFFSET 0x200
-#define HCLGE_RING_INT_REG_OFFSET 0x4
-
- int i, j, reg_num, separator_num;
- int data_num_sum;
- u32 *reg = data;
-
- /* fetching per-PF registers valus from PF PCIe register space */
- reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
- data_num_sum = reg_num + separator_num;
-
- reg_num = ARRAY_SIZE(common_reg_addr_list);
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
- data_num_sum += reg_num + separator_num;
-
- reg_num = ARRAY_SIZE(ring_reg_addr_list);
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (j = 0; j < kinfo->num_tqps; j++) {
- for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw,
- ring_reg_addr_list[i] +
- HCLGE_RING_REG_OFFSET * j);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
- }
- data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
-
- reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (j = 0; j < hdev->num_msi_used - 1; j++) {
- for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw,
- tqp_intr_reg_addr_list[i] +
- HCLGE_RING_INT_REG_OFFSET * j);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
- }
- data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
-
- return data_num_sum;
-}
-
-static int hclge_get_regs_len(struct hnae3_handle *handle)
-{
- int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
- int regs_lines_32_bit, regs_lines_64_bit;
- int ret;
-
- ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get register number failed, ret = %d.\n", ret);
- return ret;
- }
-
- ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get dfx reg len failed, ret = %d.\n", ret);
- return ret;
- }
-
- cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
- common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
- ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
- tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
- regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
- regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
-
- return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
- tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
- regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
-}
-
-static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
- void *data)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- u32 regs_num_32_bit, regs_num_64_bit;
- int i, reg_num, separator_num, ret;
- u32 *reg = data;
-
- *version = hdev->fw_version;
-
- ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get register number failed, ret = %d.\n", ret);
- return;
- }
-
- reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
-
- ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get 32 bit register failed, ret = %d.\n", ret);
- return;
- }
- reg_num = regs_num_32_bit;
- reg += reg_num;
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
-
- ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get 64 bit register failed, ret = %d.\n", ret);
- return;
- }
- reg_num = regs_num_64_bit * 2;
- reg += reg_num;
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
-
- ret = hclge_get_dfx_reg(hdev, reg);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Get dfx register failed, ret = %d.\n", ret);
-}
-
static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
{
struct hclge_set_led_state_cmd *req;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 6a43d1515585..ec233ec57222 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -1142,11 +1142,8 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
u16 state,
struct hclge_vlan_info *vlan_info);
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time);
-int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
- struct hclge_desc *desc);
void hclge_report_hw_error(struct hclge_dev *hdev,
enum hnae3_hw_error_type type);
-void hclge_inform_vf_promisc_info(struct hclge_vport *vport);
int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len);
int hclge_push_vf_link_status(struct hclge_vport *vport);
int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
new file mode 100644
index 000000000000..43c1c18fa81f
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
@@ -0,0 +1,668 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2023 Hisilicon Limited.
+
+#include "hclge_cmd.h"
+#include "hclge_main.h"
+#include "hclge_regs.h"
+#include "hnae3.h"
+
+static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CSQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ HCLGE_COMM_NIC_CSQ_HEAD_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CRQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CRQ_TAIL_REG,
+ HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
+ HCLGE_COMM_CMDQ_INTR_STS_REG,
+ HCLGE_COMM_CMDQ_INTR_EN_REG,
+ HCLGE_COMM_CMDQ_INTR_GEN_REG};
+
+static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
+ HCLGE_PF_OTHER_INT_REG,
+ HCLGE_MISC_RESET_STS_REG,
+ HCLGE_MISC_VECTOR_INT_STS,
+ HCLGE_GLOBAL_RESET_REG,
+ HCLGE_FUN_RST_ING,
+ HCLGE_GRO_EN_REG};
+
+static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
+ HCLGE_RING_RX_ADDR_H_REG,
+ HCLGE_RING_RX_BD_NUM_REG,
+ HCLGE_RING_RX_BD_LENGTH_REG,
+ HCLGE_RING_RX_MERGE_EN_REG,
+ HCLGE_RING_RX_TAIL_REG,
+ HCLGE_RING_RX_HEAD_REG,
+ HCLGE_RING_RX_FBD_NUM_REG,
+ HCLGE_RING_RX_OFFSET_REG,
+ HCLGE_RING_RX_FBD_OFFSET_REG,
+ HCLGE_RING_RX_STASH_REG,
+ HCLGE_RING_RX_BD_ERR_REG,
+ HCLGE_RING_TX_ADDR_L_REG,
+ HCLGE_RING_TX_ADDR_H_REG,
+ HCLGE_RING_TX_BD_NUM_REG,
+ HCLGE_RING_TX_PRIORITY_REG,
+ HCLGE_RING_TX_TC_REG,
+ HCLGE_RING_TX_MERGE_EN_REG,
+ HCLGE_RING_TX_TAIL_REG,
+ HCLGE_RING_TX_HEAD_REG,
+ HCLGE_RING_TX_FBD_NUM_REG,
+ HCLGE_RING_TX_OFFSET_REG,
+ HCLGE_RING_TX_EBD_NUM_REG,
+ HCLGE_RING_TX_EBD_OFFSET_REG,
+ HCLGE_RING_TX_BD_ERR_REG,
+ HCLGE_RING_EN_REG};
+
+static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
+ HCLGE_TQP_INTR_GL0_REG,
+ HCLGE_TQP_INTR_GL1_REG,
+ HCLGE_TQP_INTR_GL2_REG,
+ HCLGE_TQP_INTR_RL_REG};
+
+/* Get DFX BD number offset */
+#define HCLGE_DFX_BIOS_BD_OFFSET 1
+#define HCLGE_DFX_SSU_0_BD_OFFSET 2
+#define HCLGE_DFX_SSU_1_BD_OFFSET 3
+#define HCLGE_DFX_IGU_BD_OFFSET 4
+#define HCLGE_DFX_RPU_0_BD_OFFSET 5
+#define HCLGE_DFX_RPU_1_BD_OFFSET 6
+#define HCLGE_DFX_NCSI_BD_OFFSET 7
+#define HCLGE_DFX_RTC_BD_OFFSET 8
+#define HCLGE_DFX_PPP_BD_OFFSET 9
+#define HCLGE_DFX_RCB_BD_OFFSET 10
+#define HCLGE_DFX_TQP_BD_OFFSET 11
+#define HCLGE_DFX_SSU_2_BD_OFFSET 12
+
+static const u32 hclge_dfx_bd_offset_list[] = {
+ HCLGE_DFX_BIOS_BD_OFFSET,
+ HCLGE_DFX_SSU_0_BD_OFFSET,
+ HCLGE_DFX_SSU_1_BD_OFFSET,
+ HCLGE_DFX_IGU_BD_OFFSET,
+ HCLGE_DFX_RPU_0_BD_OFFSET,
+ HCLGE_DFX_RPU_1_BD_OFFSET,
+ HCLGE_DFX_NCSI_BD_OFFSET,
+ HCLGE_DFX_RTC_BD_OFFSET,
+ HCLGE_DFX_PPP_BD_OFFSET,
+ HCLGE_DFX_RCB_BD_OFFSET,
+ HCLGE_DFX_TQP_BD_OFFSET,
+ HCLGE_DFX_SSU_2_BD_OFFSET
+};
+
+static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
+ HCLGE_OPC_DFX_BIOS_COMMON_REG,
+ HCLGE_OPC_DFX_SSU_REG_0,
+ HCLGE_OPC_DFX_SSU_REG_1,
+ HCLGE_OPC_DFX_IGU_EGU_REG,
+ HCLGE_OPC_DFX_RPU_REG_0,
+ HCLGE_OPC_DFX_RPU_REG_1,
+ HCLGE_OPC_DFX_NCSI_REG,
+ HCLGE_OPC_DFX_RTC_REG,
+ HCLGE_OPC_DFX_PPP_REG,
+ HCLGE_OPC_DFX_RCB_REG,
+ HCLGE_OPC_DFX_TQP_REG,
+ HCLGE_OPC_DFX_SSU_REG_2
+};
+
+enum hclge_reg_tag {
+ HCLGE_REG_TAG_CMDQ = 0,
+ HCLGE_REG_TAG_COMMON,
+ HCLGE_REG_TAG_RING,
+ HCLGE_REG_TAG_TQP_INTR,
+ HCLGE_REG_TAG_QUERY_32_BIT,
+ HCLGE_REG_TAG_QUERY_64_BIT,
+ HCLGE_REG_TAG_DFX_BIOS_COMMON,
+ HCLGE_REG_TAG_DFX_SSU_0,
+ HCLGE_REG_TAG_DFX_SSU_1,
+ HCLGE_REG_TAG_DFX_IGU_EGU,
+ HCLGE_REG_TAG_DFX_RPU_0,
+ HCLGE_REG_TAG_DFX_RPU_1,
+ HCLGE_REG_TAG_DFX_NCSI,
+ HCLGE_REG_TAG_DFX_RTC,
+ HCLGE_REG_TAG_DFX_PPP,
+ HCLGE_REG_TAG_DFX_RCB,
+ HCLGE_REG_TAG_DFX_TQP,
+ HCLGE_REG_TAG_DFX_SSU_2,
+ HCLGE_REG_TAG_RPU_TNL,
+};
+
+#pragma pack(4)
+struct hclge_reg_tlv {
+ u16 tag;
+ u16 len;
+};
+
+struct hclge_reg_header {
+ u64 magic_number;
+ u8 is_vf;
+ u8 rsv[7];
+};
+
+#pragma pack()
+
+#define HCLGE_REG_TLV_SIZE sizeof(struct hclge_reg_tlv)
+#define HCLGE_REG_HEADER_SIZE sizeof(struct hclge_reg_header)
+#define HCLGE_REG_TLV_SPACE (sizeof(struct hclge_reg_tlv) / sizeof(u32))
+#define HCLGE_REG_HEADER_SPACE (sizeof(struct hclge_reg_header) / sizeof(u32))
+#define HCLGE_REG_MAGIC_NUMBER 0x686e733372656773 /* meaning is hns3regs */
+
+#define HCLGE_REG_RPU_TNL_ID_0 1
+
+static u32 hclge_reg_get_header(void *data)
+{
+ struct hclge_reg_header *header = data;
+
+ header->magic_number = HCLGE_REG_MAGIC_NUMBER;
+ header->is_vf = 0x0;
+
+ return HCLGE_REG_HEADER_SPACE;
+}
+
+static u32 hclge_reg_get_tlv(u32 tag, u32 regs_num, void *data)
+{
+ struct hclge_reg_tlv *tlv = data;
+
+ tlv->tag = tag;
+ tlv->len = regs_num * sizeof(u32) + HCLGE_REG_TLV_SIZE;
+
+ return HCLGE_REG_TLV_SPACE;
+}
+
+static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ void *data)
+{
+#define HCLGE_32_BIT_REG_RTN_DATANUM 8
+#define HCLGE_32_BIT_DESC_NODATA_LEN 2
+
+ struct hclge_desc *desc;
+ u32 *reg_val = data;
+ __le32 *desc_data;
+ int nodata_num;
+ int cmd_num;
+ int i, k, n;
+ int ret;
+
+ if (regs_num == 0)
+ return 0;
+
+ nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
+ cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
+ HCLGE_32_BIT_REG_RTN_DATANUM);
+ desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query 32 bit register cmd failed, ret = %d.\n", ret);
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < cmd_num; i++) {
+ if (i == 0) {
+ desc_data = (__le32 *)(&desc[i].data[0]);
+ n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
+ } else {
+ desc_data = (__le32 *)(&desc[i]);
+ n = HCLGE_32_BIT_REG_RTN_DATANUM;
+ }
+ for (k = 0; k < n; k++) {
+ *reg_val++ = le32_to_cpu(*desc_data++);
+
+ regs_num--;
+ if (!regs_num)
+ break;
+ }
+ }
+
+ kfree(desc);
+ return 0;
+}
+
+static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ void *data)
+{
+#define HCLGE_64_BIT_REG_RTN_DATANUM 4
+#define HCLGE_64_BIT_DESC_NODATA_LEN 1
+
+ struct hclge_desc *desc;
+ u64 *reg_val = data;
+ __le64 *desc_data;
+ int nodata_len;
+ int cmd_num;
+ int i, k, n;
+ int ret;
+
+ if (regs_num == 0)
+ return 0;
+
+ nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
+ cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
+ HCLGE_64_BIT_REG_RTN_DATANUM);
+ desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query 64 bit register cmd failed, ret = %d.\n", ret);
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < cmd_num; i++) {
+ if (i == 0) {
+ desc_data = (__le64 *)(&desc[i].data[0]);
+ n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
+ } else {
+ desc_data = (__le64 *)(&desc[i]);
+ n = HCLGE_64_BIT_REG_RTN_DATANUM;
+ }
+ for (k = 0; k < n; k++) {
+ *reg_val++ = le64_to_cpu(*desc_data++);
+
+ regs_num--;
+ if (!regs_num)
+ break;
+ }
+ }
+
+ kfree(desc);
+ return 0;
+}
+
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
+{
+ int i;
+
+ /* initialize command BD except the last one */
+ for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
+ true);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ /* initialize the last command BD */
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
+
+ return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
+}
+
+static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
+ int *bd_num_list,
+ u32 type_num)
+{
+ u32 entries_per_desc, desc_index, index, offset, i;
+ struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
+ int ret;
+
+ ret = hclge_query_bd_num_cmd_send(hdev, desc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx bd num fail, status is %d.\n", ret);
+ return ret;
+ }
+
+ entries_per_desc = ARRAY_SIZE(desc[0].data);
+ for (i = 0; i < type_num; i++) {
+ offset = hclge_dfx_bd_offset_list[i];
+ index = offset % entries_per_desc;
+ desc_index = offset / entries_per_desc;
+ bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
+ }
+
+ return ret;
+}
+
+static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc_src, int bd_num,
+ enum hclge_opcode_type cmd)
+{
+ struct hclge_desc *desc = desc_src;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ for (i = 0; i < bd_num - 1; i++) {
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ desc++;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ }
+
+ desc = desc_src;
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
+ cmd, ret);
+
+ return ret;
+}
+
+/* tnl_id = 0 means get sum of all tnl reg's value */
+static int hclge_dfx_reg_rpu_tnl_cmd_send(struct hclge_dev *hdev, u32 tnl_id,
+ struct hclge_desc *desc, int bd_num)
+{
+ int i, ret;
+
+ for (i = 0; i < bd_num; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_RPU_REG_0,
+ true);
+ if (i != bd_num - 1)
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ desc[0].data[0] = cpu_to_le32(tnl_id);
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to query dfx rpu tnl reg, ret = %d\n",
+ ret);
+ return ret;
+}
+
+static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
+ void *data)
+{
+ int entries_per_desc, reg_num, desc_index, index, i;
+ struct hclge_desc *desc = desc_src;
+ u32 *reg = data;
+
+ entries_per_desc = ARRAY_SIZE(desc->data);
+ reg_num = entries_per_desc * bd_num;
+ for (i = 0; i < reg_num; i++) {
+ index = i % entries_per_desc;
+ desc_index = i / entries_per_desc;
+ *reg++ = le32_to_cpu(desc[desc_index].data[index]);
+ }
+
+ return reg_num;
+}
+
+static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int data_len_per_desc;
+ int *bd_num_list;
+ int ret;
+ u32 i;
+
+ bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
+ if (!bd_num_list)
+ return -ENOMEM;
+
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ goto out;
+ }
+
+ data_len_per_desc = sizeof_field(struct hclge_desc, data);
+ *len = 0;
+ for (i = 0; i < dfx_reg_type_num; i++)
+ *len += bd_num_list[i] * data_len_per_desc + HCLGE_REG_TLV_SIZE;
+
+ /**
+ * the num of dfx_rpu_0 is reused by each dfx_rpu_tnl
+ * HCLGE_DFX_BD_OFFSET is starting at 1, but the array subscript is
+ * starting at 0, so offset need '- 1'.
+ */
+ *len += (bd_num_list[HCLGE_DFX_RPU_0_BD_OFFSET - 1] * data_len_per_desc +
+ HCLGE_REG_TLV_SIZE) * ae_dev->dev_specs.tnl_num;
+
+out:
+ kfree(bd_num_list);
+ return ret;
+}
+
+static int hclge_get_dfx_rpu_tnl_reg(struct hclge_dev *hdev, u32 *reg,
+ struct hclge_desc *desc_src,
+ int bd_num)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int ret = 0;
+ u8 i;
+
+ for (i = HCLGE_REG_RPU_TNL_ID_0; i <= ae_dev->dev_specs.tnl_num; i++) {
+ ret = hclge_dfx_reg_rpu_tnl_cmd_send(hdev, i, desc_src, bd_num);
+ if (ret)
+ break;
+
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RPU_TNL,
+ ARRAY_SIZE(desc_src->data) * bd_num,
+ reg);
+ reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
+ }
+
+ return ret;
+}
+
+static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ int bd_num, bd_num_max, buf_len;
+ struct hclge_desc *desc_src;
+ int *bd_num_list;
+ u32 *reg = data;
+ int ret;
+ u32 i;
+
+ bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
+ if (!bd_num_list)
+ return -ENOMEM;
+
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ goto out;
+ }
+
+ bd_num_max = bd_num_list[0];
+ for (i = 1; i < dfx_reg_type_num; i++)
+ bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
+
+ buf_len = sizeof(*desc_src) * bd_num_max;
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_src) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < dfx_reg_type_num; i++) {
+ bd_num = bd_num_list[i];
+ ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
+ hclge_dfx_reg_opcode_list[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg fail, status is %d.\n", ret);
+ goto free;
+ }
+
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_DFX_BIOS_COMMON + i,
+ ARRAY_SIZE(desc_src->data) * bd_num,
+ reg);
+ reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
+ }
+
+ /**
+ * HCLGE_DFX_BD_OFFSET is starting at 1, but the array subscript is
+ * starting at 0, so offset need '- 1'.
+ */
+ bd_num = bd_num_list[HCLGE_DFX_RPU_0_BD_OFFSET - 1];
+ ret = hclge_get_dfx_rpu_tnl_reg(hdev, reg, desc_src, bd_num);
+
+free:
+ kfree(desc_src);
+out:
+ kfree(bd_num_list);
+ return ret;
+}
+
+static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
+ struct hnae3_knic_private_info *kinfo)
+{
+#define HCLGE_RING_REG_OFFSET 0x200
+#define HCLGE_RING_INT_REG_OFFSET 0x4
+
+ int i, j, reg_num;
+ int data_num_sum;
+ u32 *reg = data;
+
+ /* fetching per-PF registers valus from PF PCIe register space */
+ reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_CMDQ, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
+ *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
+ data_num_sum = reg_num + HCLGE_REG_TLV_SPACE;
+
+ reg_num = ARRAY_SIZE(common_reg_addr_list);
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_COMMON, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
+ *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
+ data_num_sum += reg_num + HCLGE_REG_TLV_SPACE;
+
+ reg_num = ARRAY_SIZE(ring_reg_addr_list);
+ for (j = 0; j < kinfo->num_tqps; j++) {
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
+ *reg++ = hclge_read_dev(&hdev->hw,
+ ring_reg_addr_list[i] +
+ HCLGE_RING_REG_OFFSET * j);
+ }
+ data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
+
+ reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
+ for (j = 0; j < hdev->num_msi_used - 1; j++) {
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_TQP_INTR, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
+ *reg++ = hclge_read_dev(&hdev->hw,
+ tqp_intr_reg_addr_list[i] +
+ HCLGE_RING_INT_REG_OFFSET * j);
+ }
+ data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) *
+ (hdev->num_msi_used - 1);
+
+ return data_num_sum;
+}
+
+static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
+ u32 *regs_num_64_bit)
+{
+ struct hclge_desc desc;
+ u32 total_num;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query register number cmd failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ *regs_num_32_bit = le32_to_cpu(desc.data[0]);
+ *regs_num_64_bit = le32_to_cpu(desc.data[1]);
+
+ total_num = *regs_num_32_bit + *regs_num_64_bit;
+ if (!total_num)
+ return -EINVAL;
+
+ return 0;
+}
+
+int hclge_get_regs_len(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
+ int cmdq_len, common_len, ring_len, tqp_intr_len;
+ int regs_len_32_bit, regs_len_64_bit;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg len failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ cmdq_len = HCLGE_REG_TLV_SIZE + sizeof(cmdq_reg_addr_list);
+ common_len = HCLGE_REG_TLV_SIZE + sizeof(common_reg_addr_list);
+ ring_len = HCLGE_REG_TLV_SIZE + sizeof(ring_reg_addr_list);
+ tqp_intr_len = HCLGE_REG_TLV_SIZE + sizeof(tqp_intr_reg_addr_list);
+ regs_len_32_bit = HCLGE_REG_TLV_SIZE + regs_num_32_bit * sizeof(u32);
+ regs_len_64_bit = HCLGE_REG_TLV_SIZE + regs_num_64_bit * sizeof(u64);
+
+ /* return the total length of all register values */
+ return HCLGE_REG_HEADER_SIZE + cmdq_len + common_len + ring_len *
+ kinfo->num_tqps + tqp_intr_len * (hdev->num_msi_used - 1) +
+ regs_len_32_bit + regs_len_64_bit + dfx_regs_len;
+}
+
+void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+#define HCLGE_REG_64_BIT_SPACE_MULTIPLE 2
+
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 regs_num_32_bit, regs_num_64_bit;
+ u32 *reg = data;
+ int ret;
+
+ *version = hdev->fw_version;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return;
+ }
+
+ reg += hclge_reg_get_header(reg);
+ reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
+
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_QUERY_32_BIT,
+ regs_num_32_bit, reg);
+ ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get 32 bit register failed, ret = %d.\n", ret);
+ return;
+ }
+ reg += regs_num_32_bit;
+
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_QUERY_64_BIT,
+ regs_num_64_bit *
+ HCLGE_REG_64_BIT_SPACE_MULTIPLE, reg);
+ ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get 64 bit register failed, ret = %d.\n", ret);
+ return;
+ }
+ reg += regs_num_64_bit * HCLGE_REG_64_BIT_SPACE_MULTIPLE;
+
+ ret = hclge_get_dfx_reg(hdev, reg);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Get dfx register failed, ret = %d.\n", ret);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h
new file mode 100644
index 000000000000..b6bc1ecb8054
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2023 Hisilicon Limited.
+
+#ifndef __HCLGE_REGS_H
+#define __HCLGE_REGS_H
+#include <linux/types.h>
+#include "hclge_comm_cmd.h"
+
+struct hnae3_handle;
+struct hclge_dev;
+
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc);
+int hclge_get_regs_len(struct hnae3_handle *handle);
+void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 34f02ca8d1d2..7a2f9233d695 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -6,6 +6,7 @@
#include <net/rtnetlink.h>
#include "hclgevf_cmd.h"
#include "hclgevf_main.h"
+#include "hclgevf_regs.h"
#include "hclge_mbx.h"
#include "hnae3.h"
#include "hclgevf_devlink.h"
@@ -33,58 +34,6 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
-static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
- HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
- HCLGE_COMM_NIC_CSQ_DEPTH_REG,
- HCLGE_COMM_NIC_CSQ_TAIL_REG,
- HCLGE_COMM_NIC_CSQ_HEAD_REG,
- HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
- HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
- HCLGE_COMM_NIC_CRQ_DEPTH_REG,
- HCLGE_COMM_NIC_CRQ_TAIL_REG,
- HCLGE_COMM_NIC_CRQ_HEAD_REG,
- HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
- HCLGE_COMM_VECTOR0_CMDQ_STATE_REG,
- HCLGE_COMM_CMDQ_INTR_EN_REG,
- HCLGE_COMM_CMDQ_INTR_GEN_REG};
-
-static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
- HCLGEVF_RST_ING,
- HCLGEVF_GRO_EN_REG};
-
-static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
- HCLGEVF_RING_RX_ADDR_H_REG,
- HCLGEVF_RING_RX_BD_NUM_REG,
- HCLGEVF_RING_RX_BD_LENGTH_REG,
- HCLGEVF_RING_RX_MERGE_EN_REG,
- HCLGEVF_RING_RX_TAIL_REG,
- HCLGEVF_RING_RX_HEAD_REG,
- HCLGEVF_RING_RX_FBD_NUM_REG,
- HCLGEVF_RING_RX_OFFSET_REG,
- HCLGEVF_RING_RX_FBD_OFFSET_REG,
- HCLGEVF_RING_RX_STASH_REG,
- HCLGEVF_RING_RX_BD_ERR_REG,
- HCLGEVF_RING_TX_ADDR_L_REG,
- HCLGEVF_RING_TX_ADDR_H_REG,
- HCLGEVF_RING_TX_BD_NUM_REG,
- HCLGEVF_RING_TX_PRIORITY_REG,
- HCLGEVF_RING_TX_TC_REG,
- HCLGEVF_RING_TX_MERGE_EN_REG,
- HCLGEVF_RING_TX_TAIL_REG,
- HCLGEVF_RING_TX_HEAD_REG,
- HCLGEVF_RING_TX_FBD_NUM_REG,
- HCLGEVF_RING_TX_OFFSET_REG,
- HCLGEVF_RING_TX_EBD_NUM_REG,
- HCLGEVF_RING_TX_EBD_OFFSET_REG,
- HCLGEVF_RING_TX_BD_ERR_REG,
- HCLGEVF_RING_EN_REG};
-
-static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
- HCLGEVF_TQP_INTR_GL0_REG,
- HCLGEVF_TQP_INTR_GL1_REG,
- HCLGEVF_TQP_INTR_GL2_REG,
- HCLGEVF_TQP_INTR_RL_REG};
-
/* hclgevf_cmd_send - send command to command queue
* @hw: pointer to the hw struct
* @desc: prefilled descriptor for describing the command
@@ -111,7 +60,7 @@ void hclgevf_arq_init(struct hclgevf_dev *hdev)
spin_unlock(&cmdq->crq.lock);
}
-static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
+struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
{
if (!handle->client)
return container_of(handle, struct hclgevf_dev, nic);
@@ -3258,72 +3207,6 @@ static void hclgevf_get_link_mode(struct hnae3_handle *handle,
*advertising = hdev->hw.mac.advertising;
}
-#define MAX_SEPARATE_NUM 4
-#define SEPARATOR_VALUE 0xFDFCFBFA
-#define REG_NUM_PER_LINE 4
-#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
-
-static int hclgevf_get_regs_len(struct hnae3_handle *handle)
-{
- int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
-
- cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
- common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
- ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
- tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
-
- return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
- tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
-}
-
-static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
- void *data)
-{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- int i, j, reg_um, separator_num;
- u32 *reg = data;
-
- *version = hdev->fw_version;
-
- /* fetching per-VF registers values from VF PCIe register space */
- reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (i = 0; i < reg_um; i++)
- *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
-
- reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (i = 0; i < reg_um; i++)
- *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
-
- reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (j = 0; j < hdev->num_tqps; j++) {
- for (i = 0; i < reg_um; i++)
- *reg++ = hclgevf_read_dev(&hdev->hw,
- ring_reg_addr_list[i] +
- HCLGEVF_TQP_REG_SIZE * j);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
- }
-
- reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (j = 0; j < hdev->num_msi_used - 1; j++) {
- for (i = 0; i < reg_um; i++)
- *reg++ = hclgevf_read_dev(&hdev->hw,
- tqp_intr_reg_addr_list[i] +
- 4 * j);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
- }
-}
-
void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
struct hclge_mbx_port_base_vlan *port_base_vlan)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 59ca6c794d6d..81c16b8c8da2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -294,4 +294,5 @@ void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
struct hclge_mbx_port_base_vlan *port_base_vlan);
+struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
new file mode 100644
index 000000000000..65b9dcd38137
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2023 Hisilicon Limited.
+
+#include "hclgevf_main.h"
+#include "hclgevf_regs.h"
+#include "hnae3.h"
+
+static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CSQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ HCLGE_COMM_NIC_CSQ_HEAD_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CRQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CRQ_TAIL_REG,
+ HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_STATE_REG,
+ HCLGE_COMM_CMDQ_INTR_EN_REG,
+ HCLGE_COMM_CMDQ_INTR_GEN_REG};
+
+static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
+ HCLGEVF_RST_ING,
+ HCLGEVF_GRO_EN_REG};
+
+static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
+ HCLGEVF_RING_RX_ADDR_H_REG,
+ HCLGEVF_RING_RX_BD_NUM_REG,
+ HCLGEVF_RING_RX_BD_LENGTH_REG,
+ HCLGEVF_RING_RX_MERGE_EN_REG,
+ HCLGEVF_RING_RX_TAIL_REG,
+ HCLGEVF_RING_RX_HEAD_REG,
+ HCLGEVF_RING_RX_FBD_NUM_REG,
+ HCLGEVF_RING_RX_OFFSET_REG,
+ HCLGEVF_RING_RX_FBD_OFFSET_REG,
+ HCLGEVF_RING_RX_STASH_REG,
+ HCLGEVF_RING_RX_BD_ERR_REG,
+ HCLGEVF_RING_TX_ADDR_L_REG,
+ HCLGEVF_RING_TX_ADDR_H_REG,
+ HCLGEVF_RING_TX_BD_NUM_REG,
+ HCLGEVF_RING_TX_PRIORITY_REG,
+ HCLGEVF_RING_TX_TC_REG,
+ HCLGEVF_RING_TX_MERGE_EN_REG,
+ HCLGEVF_RING_TX_TAIL_REG,
+ HCLGEVF_RING_TX_HEAD_REG,
+ HCLGEVF_RING_TX_FBD_NUM_REG,
+ HCLGEVF_RING_TX_OFFSET_REG,
+ HCLGEVF_RING_TX_EBD_NUM_REG,
+ HCLGEVF_RING_TX_EBD_OFFSET_REG,
+ HCLGEVF_RING_TX_BD_ERR_REG,
+ HCLGEVF_RING_EN_REG};
+
+static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
+ HCLGEVF_TQP_INTR_GL0_REG,
+ HCLGEVF_TQP_INTR_GL1_REG,
+ HCLGEVF_TQP_INTR_GL2_REG,
+ HCLGEVF_TQP_INTR_RL_REG};
+
+enum hclgevf_reg_tag {
+ HCLGEVF_REG_TAG_CMDQ = 0,
+ HCLGEVF_REG_TAG_COMMON,
+ HCLGEVF_REG_TAG_RING,
+ HCLGEVF_REG_TAG_TQP_INTR,
+};
+
+#pragma pack(4)
+struct hclgevf_reg_tlv {
+ u16 tag;
+ u16 len;
+};
+
+struct hclgevf_reg_header {
+ u64 magic_number;
+ u8 is_vf;
+ u8 rsv[7];
+};
+
+#pragma pack()
+
+#define HCLGEVF_REG_TLV_SIZE sizeof(struct hclgevf_reg_tlv)
+#define HCLGEVF_REG_HEADER_SIZE sizeof(struct hclgevf_reg_header)
+#define HCLGEVF_REG_TLV_SPACE (sizeof(struct hclgevf_reg_tlv) / sizeof(u32))
+#define HCLGEVF_REG_HEADER_SPACE (sizeof(struct hclgevf_reg_header) / sizeof(u32))
+#define HCLGEVF_REG_MAGIC_NUMBER 0x686e733372656773 /* meaning is hns3regs */
+
+static u32 hclgevf_reg_get_header(void *data)
+{
+ struct hclgevf_reg_header *header = data;
+
+ header->magic_number = HCLGEVF_REG_MAGIC_NUMBER;
+ header->is_vf = 0x1;
+
+ return HCLGEVF_REG_HEADER_SPACE;
+}
+
+static u32 hclgevf_reg_get_tlv(u32 tag, u32 regs_num, void *data)
+{
+ struct hclgevf_reg_tlv *tlv = data;
+
+ tlv->tag = tag;
+ tlv->len = regs_num * sizeof(u32) + HCLGEVF_REG_TLV_SIZE;
+
+ return HCLGEVF_REG_TLV_SPACE;
+}
+
+int hclgevf_get_regs_len(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int cmdq_len, common_len, ring_len, tqp_intr_len;
+
+ cmdq_len = HCLGEVF_REG_TLV_SIZE + sizeof(cmdq_reg_addr_list);
+ common_len = HCLGEVF_REG_TLV_SIZE + sizeof(common_reg_addr_list);
+ ring_len = HCLGEVF_REG_TLV_SIZE + sizeof(ring_reg_addr_list);
+ tqp_intr_len = HCLGEVF_REG_TLV_SIZE + sizeof(tqp_intr_reg_addr_list);
+
+ /* return the total length of all register values */
+ return HCLGEVF_REG_HEADER_SIZE + cmdq_len + common_len +
+ tqp_intr_len * (hdev->num_msi_used - 1) +
+ ring_len * hdev->num_tqps;
+}
+
+void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+#define HCLGEVF_RING_REG_OFFSET 0x200
+#define HCLGEVF_RING_INT_REG_OFFSET 0x4
+
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int i, j, reg_um;
+ u32 *reg = data;
+
+ *version = hdev->fw_version;
+ reg += hclgevf_reg_get_header(reg);
+
+ /* fetching per-VF registers values from VF PCIe register space */
+ reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_um, reg);
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
+
+ reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_um, reg);
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
+
+ reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
+ for (j = 0; j < hdev->num_tqps; j++) {
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw,
+ ring_reg_addr_list[i] +
+ HCLGEVF_RING_REG_OFFSET * j);
+ }
+
+ reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
+ for (j = 0; j < hdev->num_msi_used - 1; j++) {
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR, reg_um, reg);
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw,
+ tqp_intr_reg_addr_list[i] +
+ HCLGEVF_RING_INT_REG_OFFSET * j);
+ }
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h
new file mode 100644
index 000000000000..77bdcf60a1af
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2023 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_REGS_H
+#define __HCLGEVF_REGS_H
+#include <linux/types.h>
+
+struct hnae3_handle;
+
+int hclgevf_get_regs_len(struct hnae3_handle *handle);
+void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 9232caaf0bdc..409a89d80220 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -217,7 +217,7 @@ static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev,
static int hns_mdio_write_c22(struct mii_bus *bus,
int phy_id, int regnum, u16 data)
{
- struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ struct hns_mdio_device *mdio_dev = bus->priv;
u16 reg = (u16)(regnum & 0xffff);
u16 cmd_reg_cfg;
int ret;
@@ -259,7 +259,7 @@ static int hns_mdio_write_c22(struct mii_bus *bus,
static int hns_mdio_write_c45(struct mii_bus *bus, int phy_id, int devad,
int regnum, u16 data)
{
- struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ struct hns_mdio_device *mdio_dev = bus->priv;
u16 reg = (u16)(regnum & 0xffff);
u16 cmd_reg_cfg;
int ret;
@@ -312,7 +312,7 @@ static int hns_mdio_write_c45(struct mii_bus *bus, int phy_id, int devad,
*/
static int hns_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
{
- struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ struct hns_mdio_device *mdio_dev = bus->priv;
u16 reg = (u16)(regnum & 0xffff);
u16 reg_val;
int ret;
@@ -363,7 +363,7 @@ static int hns_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
static int hns_mdio_read_c45(struct mii_bus *bus, int phy_id, int devad,
int regnum)
{
- struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ struct hns_mdio_device *mdio_dev = bus->priv;
u16 reg = (u16)(regnum & 0xffff);
u16 reg_val;
int ret;
@@ -424,7 +424,7 @@ static int hns_mdio_read_c45(struct mii_bus *bus, int phy_id, int devad,
*/
static int hns_mdio_reset(struct mii_bus *bus)
{
- struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ struct hns_mdio_device *mdio_dev = bus->priv;
const struct hns_mdio_sc_reg *sc_reg;
int ret;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index b4aff59b3eb4..0a56e9752464 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -31,6 +31,7 @@
#include <linux/prefetch.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <net/ip.h>
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index c97095abd26a..0c314bf97480 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -38,6 +38,7 @@
#include <linux/of_irq.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 89a1b0fea158..295516b07662 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -27,7 +27,6 @@
#include <linux/netdevice.h>
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
-#include <linux/of_platform.h>
#include <linux/slab.h>
#include <asm/io.h>
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index ff5487bbebe3..c3236b59e7e9 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -22,7 +22,9 @@
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include "core.h"
#include <asm/dcr-regs.h>
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index 50358cf00130..fd437f986edf 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -19,7 +19,9 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/ethtool.h>
+#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#include "emac.h"
diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c
index 008bbdaf1204..aae9a88d95d7 100644
--- a/drivers/net/ethernet/ibm/emac/tah.c
+++ b/drivers/net/ethernet/ibm/emac/tah.c
@@ -14,7 +14,9 @@
*
* Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net>
*/
+#include <linux/mod_devicetable.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#include "emac.h"
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 57a25c7a9e70..6337388ee5f4 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -19,7 +19,9 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/ethtool.h>
+#include <linux/mod_devicetable.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#include "emac.h"
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index df76cdaddcfb..cdf5251e5679 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -194,9 +194,8 @@ static void ibmvnic_clean_affinity(struct ibmvnic_adapter *adapter)
struct ibmvnic_sub_crq_queue **rxqs;
struct ibmvnic_sub_crq_queue **txqs;
int num_rxqs, num_txqs;
- int rc, i;
+ int i;
- rc = 0;
rxqs = adapter->rx_scrq;
txqs = adapter->tx_scrq;
num_txqs = adapter->num_active_tx_scrqs;
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 4817eb13ca6f..75f3fd1d8d6e 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -347,6 +347,5 @@ bool e1000_has_link(struct e1000_adapter *adapter);
void e1000_power_up_phy(struct e1000_adapter *);
void e1000_set_ethtool_ops(struct net_device *netdev);
void e1000_check_options(struct e1000_adapter *adapter);
-char *e1000_get_hw_dev_name(struct e1000_hw *hw);
#endif /* _E1000_H_ */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h
index b57a04954ccf..95cdd17134e5 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.h
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h
@@ -343,7 +343,6 @@ struct e1000_host_mng_dhcp_cookie {
};
#endif
-bool e1000_check_mng_mode(struct e1000_hw *hw);
s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data);
s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw);
s32 e1000_update_eeprom_checksum(struct e1000_hw *hw);
@@ -352,7 +351,6 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw);
/* Filters (multicast, vlan, receive) */
u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr);
-void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index);
void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
@@ -361,7 +359,6 @@ s32 e1000_setup_led(struct e1000_hw *hw);
s32 e1000_cleanup_led(struct e1000_hw *hw);
s32 e1000_led_on(struct e1000_hw *hw);
s32 e1000_led_off(struct e1000_hw *hw);
-s32 e1000_blink_led_start(struct e1000_hw *hw);
/* Adaptive IFS Functions */
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 721f86fd5802..9835e6a90d56 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -917,6 +917,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch_mtp:
case e1000_pch_lnp:
case e1000_pch_ptp:
+ case e1000_pch_nvp:
mask |= BIT(18);
break;
default:
@@ -1585,6 +1586,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_pch_mtp:
case e1000_pch_lnp:
case e1000_pch_ptp:
+ case e1000_pch_nvp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 29f9fae35f42..1fef6bb5a5fb 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -122,6 +122,8 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_PTP_I219_V26 0x57B6
#define E1000_DEV_ID_PCH_PTP_I219_LM27 0x57B7
#define E1000_DEV_ID_PCH_PTP_I219_V27 0x57B8
+#define E1000_DEV_ID_PCH_NVL_I219_LM29 0x57B9
+#define E1000_DEV_ID_PCH_NVL_I219_V29 0x57BA
#define E1000_REVISION_4 4
@@ -150,6 +152,7 @@ enum e1000_mac_type {
e1000_pch_mtp,
e1000_pch_lnp,
e1000_pch_ptp,
+ e1000_pch_nvp,
};
enum e1000_media_type {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 0c7fd10312c8..39e9fc601bf5 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -323,6 +323,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
case e1000_pch_mtp:
case e1000_pch_lnp:
case e1000_pch_ptp:
+ case e1000_pch_nvp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -470,6 +471,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch_mtp:
case e1000_pch_lnp:
case e1000_pch_ptp:
+ case e1000_pch_nvp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -717,6 +719,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch_mtp:
case e1000_pch_lnp:
case e1000_pch_ptp:
+ case e1000_pch_nvp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -1685,6 +1688,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch_mtp:
case e1000_pch_lnp:
case e1000_pch_ptp:
+ case e1000_pch_nvp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -2142,6 +2146,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch_mtp:
case e1000_pch_lnp:
case e1000_pch_ptp:
+ case e1000_pch_nvp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -3188,6 +3193,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
case e1000_pch_mtp:
case e1000_pch_lnp:
case e1000_pch_ptp:
+ case e1000_pch_nvp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
@@ -4129,6 +4135,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
case e1000_pch_mtp:
case e1000_pch_lnp:
case e1000_pch_ptp:
+ case e1000_pch_nvp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index 6ab261119801..563176fd436e 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -29,8 +29,6 @@ s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
s32 e1000e_setup_led_generic(struct e1000_hw *hw);
s32 e1000e_setup_link_generic(struct e1000_hw *hw);
-s32 e1000e_validate_mdi_setting_generic(struct e1000_hw *hw);
-s32 e1000e_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
void e1000_clear_vfta_generic(struct e1000_hw *hw);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 771a3c909c45..f536c856727c 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3545,6 +3545,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
case e1000_pch_mtp:
case e1000_pch_lnp:
case e1000_pch_ptp:
+ case e1000_pch_nvp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
@@ -4061,6 +4062,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_mtp:
case e1000_pch_lnp:
case e1000_pch_ptp:
+ case e1000_pch_nvp:
fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF;
@@ -7021,6 +7023,8 @@ static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev)
struct e1000_adapter *adapter = netdev_priv(netdev);
int rc;
+ pdev->pme_poll = true;
+
rc = __e1000_resume(pdev);
if (rc)
return rc;
@@ -7682,7 +7686,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE);
- if (pci_dev_run_wake(pdev) && hw->mac.type != e1000_pch_cnp)
+ if (pci_dev_run_wake(pdev))
pm_runtime_put_noidle(&pdev->dev);
return 0;
@@ -7911,6 +7915,8 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V26), board_pch_mtp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM27), board_pch_mtp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V27), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_NVL_I219_LM29), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_NVL_I219_V29), board_pch_mtp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index def4566a916f..02d871bc112a 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -288,6 +288,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_mtp:
case e1000_pch_lnp:
case e1000_pch_ptp:
+ case e1000_pch_nvp:
if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 86fac8f959bb..100eb77b8dfe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
-#include "i40e_status.h"
#include "i40e_type.h"
#include "i40e_register.h"
#include "i40e_adminq.h"
@@ -284,7 +283,7 @@ static int i40e_config_asq_regs(struct i40e_hw *hw)
/* Check one register to verify that config was applied */
reg = rd32(hw, hw->aq.asq.bal);
if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
- ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ ret_code = -EIO;
return ret_code;
}
@@ -316,7 +315,7 @@ static int i40e_config_arq_regs(struct i40e_hw *hw)
/* Check one register to verify that config was applied */
reg = rd32(hw, hw->aq.arq.bal);
if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
- ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ ret_code = -EIO;
return ret_code;
}
@@ -340,14 +339,14 @@ static int i40e_init_asq(struct i40e_hw *hw)
if (hw->aq.asq.count > 0) {
/* queue already initialized */
- ret_code = I40E_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto init_adminq_exit;
}
/* verify input for valid configuration */
if ((hw->aq.num_asq_entries == 0) ||
(hw->aq.asq_buf_size == 0)) {
- ret_code = I40E_ERR_CONFIG;
+ ret_code = -EIO;
goto init_adminq_exit;
}
@@ -399,14 +398,14 @@ static int i40e_init_arq(struct i40e_hw *hw)
if (hw->aq.arq.count > 0) {
/* queue already initialized */
- ret_code = I40E_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto init_adminq_exit;
}
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.arq_buf_size == 0)) {
- ret_code = I40E_ERR_CONFIG;
+ ret_code = -EIO;
goto init_adminq_exit;
}
@@ -452,7 +451,7 @@ static int i40e_shutdown_asq(struct i40e_hw *hw)
mutex_lock(&hw->aq.asq_mutex);
if (hw->aq.asq.count == 0) {
- ret_code = I40E_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto shutdown_asq_out;
}
@@ -486,7 +485,7 @@ static int i40e_shutdown_arq(struct i40e_hw *hw)
mutex_lock(&hw->aq.arq_mutex);
if (hw->aq.arq.count == 0) {
- ret_code = I40E_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto shutdown_arq_out;
}
@@ -594,7 +593,7 @@ int i40e_init_adminq(struct i40e_hw *hw)
(hw->aq.num_asq_entries == 0) ||
(hw->aq.arq_buf_size == 0) ||
(hw->aq.asq_buf_size == 0)) {
- ret_code = I40E_ERR_CONFIG;
+ ret_code = -EIO;
goto init_adminq_exit;
}
@@ -626,13 +625,13 @@ int i40e_init_adminq(struct i40e_hw *hw)
&hw->aq.api_maj_ver,
&hw->aq.api_min_ver,
NULL);
- if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ if (ret_code != -EIO)
break;
retry++;
msleep(100);
i40e_resume_aq(hw);
} while (retry < 10);
- if (ret_code != I40E_SUCCESS)
+ if (ret_code != 0)
goto init_adminq_free_arq;
/* Some features were introduced in different FW API version
@@ -672,7 +671,7 @@ int i40e_init_adminq(struct i40e_hw *hw)
hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
- ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+ ret_code = -EIO;
goto init_adminq_free_arq;
}
@@ -799,7 +798,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
if (hw->aq.asq.count == 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: Admin queue not initialized.\n");
- status = I40E_ERR_QUEUE_EMPTY;
+ status = -EIO;
goto asq_send_command_error;
}
@@ -809,7 +808,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
- status = I40E_ERR_ADMIN_QUEUE_FULL;
+ status = -ENOSPC;
goto asq_send_command_error;
}
@@ -840,7 +839,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Invalid buffer size: %d.\n",
buff_size);
- status = I40E_ERR_INVALID_SIZE;
+ status = -EINVAL;
goto asq_send_command_error;
}
@@ -848,7 +847,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Async flag not set along with postpone flag");
- status = I40E_ERR_PARAM;
+ status = -EINVAL;
goto asq_send_command_error;
}
@@ -863,7 +862,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Error queue is full.\n");
- status = I40E_ERR_ADMIN_QUEUE_FULL;
+ status = -ENOSPC;
goto asq_send_command_error;
}
@@ -940,9 +939,9 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
status = 0;
else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
- status = I40E_ERR_NOT_READY;
+ status = -EBUSY;
else
- status = I40E_ERR_ADMIN_QUEUE_ERROR;
+ status = -EIO;
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
@@ -960,11 +959,11 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: AQ Critical error.\n");
- status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ status = -EIO;
} else {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: Writeback timeout.\n");
- status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ status = -EIO;
}
}
@@ -1106,7 +1105,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
if (hw->aq.arq.count == 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQRX: Admin queue not initialized.\n");
- ret_code = I40E_ERR_QUEUE_EMPTY;
+ ret_code = -EIO;
goto clean_arq_element_err;
}
@@ -1114,7 +1113,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
- ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ ret_code = -EALREADY;
goto clean_arq_element_out;
}
@@ -1126,7 +1125,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
- ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ ret_code = -EIO;
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index ee394aacef4d..267f2e0a21ce 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -5,7 +5,6 @@
#define _I40E_ADMINQ_H_
#include "i40e_osdep.h"
-#include "i40e_status.h"
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
@@ -117,7 +116,7 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
};
/* aq_rc is invalid if AQ timed out */
- if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ if (aq_ret == -EIO)
return -EAGAIN;
if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index ed88e38d488b..eeef20f77106 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -56,7 +56,7 @@ int i40e_set_mac_type(struct i40e_hw *hw)
break;
}
} else {
- status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ status = -ENODEV;
}
hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
@@ -660,7 +660,7 @@ int i40e_init_shared_code(struct i40e_hw *hw)
case I40E_MAC_X722:
break;
default:
- return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ return -ENODEV;
}
hw->phy.get_link_info = true;
@@ -780,7 +780,7 @@ int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
if (flags & I40E_AQC_PORT_ADDR_VALID)
ether_addr_copy(mac_addr, addrs.port_mac);
else
- status = I40E_ERR_INVALID_MAC_ADDR;
+ status = -EINVAL;
return status;
}
@@ -858,7 +858,7 @@ int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
pba_size--;
if (pba_num_size < (((u32)pba_size * 2) + 1)) {
hw_dbg(hw, "Buffer too small for PBA data.\n");
- return I40E_ERR_PARAM;
+ return -EINVAL;
}
for (i = 0; i < pba_size; i++) {
@@ -955,7 +955,7 @@ static int i40e_poll_globr(struct i40e_hw *hw,
hw_dbg(hw, "Global reset failed.\n");
hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
- return I40E_ERR_RESET_FAILED;
+ return -EIO;
}
#define I40E_PF_RESET_WAIT_COUNT_A0 200
@@ -995,7 +995,7 @@ int i40e_pf_reset(struct i40e_hw *hw)
}
if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
hw_dbg(hw, "Global reset polling failed to complete.\n");
- return I40E_ERR_RESET_FAILED;
+ return -EIO;
}
/* Now Wait for the FW to be ready */
@@ -1014,7 +1014,7 @@ int i40e_pf_reset(struct i40e_hw *hw)
I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
hw_dbg(hw, "wait for FW Reset complete timedout\n");
hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
- return I40E_ERR_RESET_FAILED;
+ return -EIO;
}
/* If there was a Global Reset in progress when we got here,
@@ -1040,10 +1040,10 @@ int i40e_pf_reset(struct i40e_hw *hw)
}
if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
if (i40e_poll_globr(hw, grst_del))
- return I40E_ERR_RESET_FAILED;
+ return -EIO;
} else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
hw_dbg(hw, "PF reset polling failed to complete.\n");
- return I40E_ERR_RESET_FAILED;
+ return -EIO;
}
}
@@ -1318,7 +1318,7 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
int status;
if (!abilities)
- return I40E_ERR_PARAM;
+ return -EINVAL;
do {
i40e_fill_default_direct_cmd_desc(&desc,
@@ -1341,12 +1341,12 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
switch (hw->aq.asq_last_status) {
case I40E_AQ_RC_EIO:
- status = I40E_ERR_UNKNOWN_PHY;
+ status = -EIO;
break;
case I40E_AQ_RC_EAGAIN:
usleep_range(1000, 2000);
total_delay++;
- status = I40E_ERR_TIMEOUT;
+ status = -EIO;
break;
/* also covers I40E_AQ_RC_OK */
default:
@@ -1396,7 +1396,7 @@ int i40e_aq_set_phy_config(struct i40e_hw *hw,
int status;
if (!config)
- return I40E_ERR_PARAM;
+ return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_config);
@@ -2312,7 +2312,7 @@ int i40e_aq_send_driver_version(struct i40e_hw *hw,
u16 len;
if (dv == NULL)
- return I40E_ERR_PARAM;
+ return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
@@ -2430,7 +2430,7 @@ int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
/* SEIDs need to either both be set or both be 0 for floating VEB */
if (!!uplink_seid != !!downlink_seid)
- return I40E_ERR_PARAM;
+ return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
@@ -2485,7 +2485,7 @@ int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
int status;
if (veb_seid == 0)
- return I40E_ERR_PARAM;
+ return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_veb_parameters);
@@ -2575,7 +2575,7 @@ i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
u16 buf_size;
if (count == 0 || !mv_list || !hw)
- return I40E_ERR_PARAM;
+ return -EINVAL;
buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
@@ -2608,7 +2608,7 @@ i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
u16 buf_size;
if (count == 0 || !mv_list || !hw)
- return I40E_ERR_PARAM;
+ return -EINVAL;
buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
@@ -2638,7 +2638,7 @@ i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
int status;
if (count == 0 || !mv_list || !hw)
- return I40E_ERR_PARAM;
+ return -EINVAL;
buf_size = count * sizeof(*mv_list);
@@ -2685,7 +2685,7 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
u16 buf_size;
if (count == 0 || !mv_list || !hw)
- return I40E_ERR_PARAM;
+ return -EINVAL;
buf_size = count * sizeof(*mv_list);
@@ -2791,7 +2791,7 @@ int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
if (count == 0 || !mr_list)
- return I40E_ERR_PARAM;
+ return -EINVAL;
}
return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
@@ -2827,7 +2827,7 @@ int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
* not matter.
*/
if (count == 0 || !mr_list)
- return I40E_ERR_PARAM;
+ return -EINVAL;
}
return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
@@ -2892,7 +2892,7 @@ int i40e_aq_debug_read_register(struct i40e_hw *hw,
int status;
if (reg_val == NULL)
- return I40E_ERR_PARAM;
+ return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
@@ -3031,7 +3031,7 @@ int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000) {
- status = I40E_ERR_PARAM;
+ status = -EINVAL;
goto i40e_aq_read_nvm_exit;
}
@@ -3076,7 +3076,7 @@ int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000) {
- status = I40E_ERR_PARAM;
+ status = -EINVAL;
goto i40e_aq_erase_nvm_exit;
}
@@ -3368,7 +3368,7 @@ int i40e_aq_discover_capabilities(struct i40e_hw *hw,
if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
- status = I40E_ERR_PARAM;
+ status = -EINVAL;
goto exit;
}
@@ -3416,7 +3416,7 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000) {
- status = I40E_ERR_PARAM;
+ status = -EINVAL;
goto i40e_aq_update_nvm_exit;
}
@@ -3473,7 +3473,7 @@ int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
I40E_AQ_NVM_REARRANGE_TO_STRUCT);
if (!rearrange_nvm) {
- status = I40E_ERR_PARAM;
+ status = -EINVAL;
goto i40e_aq_rearrange_nvm_exit;
}
@@ -3510,7 +3510,7 @@ int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
int status;
if (buff_size == 0 || !buff)
- return I40E_ERR_PARAM;
+ return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
/* Indirect Command */
@@ -3558,7 +3558,7 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
if (buff_size == 0 || !buff)
- return I40E_ERR_PARAM;
+ return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_lldp_set_local_mib);
@@ -3627,7 +3627,7 @@ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
i40e_debug(hw, I40E_DEBUG_ALL,
"Restore LLDP not supported by current FW version.\n");
- return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ return -ENODEV;
}
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
@@ -3729,7 +3729,7 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
int status;
if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
- return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ return -ENODEV;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_dcb_parameters);
@@ -3760,7 +3760,7 @@ int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
int status;
if (buff_size == 0 || !buff)
- return I40E_ERR_PARAM;
+ return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
@@ -3848,7 +3848,7 @@ int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
int status;
if (seid == 0)
- return I40E_ERR_PARAM;
+ return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
@@ -3922,7 +3922,7 @@ static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
cmd_param_flag = false;
break;
default:
- return I40E_ERR_PARAM;
+ return -EINVAL;
}
i40e_fill_default_direct_cmd_desc(&desc, opcode);
@@ -4148,7 +4148,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw,
fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
break;
default:
- return I40E_ERR_PARAM;
+ return -EINVAL;
}
switch (settings->fcoe_cntx_num) {
@@ -4160,7 +4160,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw,
fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
break;
default:
- return I40E_ERR_PARAM;
+ return -EINVAL;
}
/* Validate PE settings passed */
@@ -4178,7 +4178,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw,
case I40E_HASH_FILTER_SIZE_1M:
break;
default:
- return I40E_ERR_PARAM;
+ return -EINVAL;
}
switch (settings->pe_cntx_num) {
@@ -4194,7 +4194,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw,
case I40E_DMA_CNTX_SIZE_256K:
break;
default:
- return I40E_ERR_PARAM;
+ return -EINVAL;
}
/* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
@@ -4202,7 +4202,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw,
fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
>> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
- return I40E_ERR_INVALID_SIZE;
+ return -EINVAL;
return 0;
}
@@ -4224,7 +4224,7 @@ int i40e_set_filter_control(struct i40e_hw *hw,
u32 val;
if (!settings)
- return I40E_ERR_PARAM;
+ return -EINVAL;
/* Validate the input settings */
ret = i40e_validate_filter_settings(hw, settings);
@@ -4306,7 +4306,7 @@ int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
int status;
if (vsi_seid == 0)
- return I40E_ERR_PARAM;
+ return -EINVAL;
if (is_add) {
i40e_fill_default_direct_cmd_desc(&desc,
@@ -4381,7 +4381,7 @@ static int i40e_aq_alternate_read(struct i40e_hw *hw,
int status;
if (!reg_val0)
- return I40E_ERR_PARAM;
+ return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
cmd_resp->address0 = cpu_to_le32(reg_addr0);
@@ -4517,7 +4517,7 @@ int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
int status;
if (buff_size == 0 || !buff)
- return I40E_ERR_PARAM;
+ return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_debug_dump_internals);
@@ -4635,7 +4635,7 @@ int i40e_read_phy_register_clause22(struct i40e_hw *hw,
u16 reg, u8 phy_addr, u16 *value)
{
u8 port_num = (u8)hw->func_caps.mdio_port_num;
- int status = I40E_ERR_TIMEOUT;
+ int status = -EIO;
u32 command = 0;
u16 retry = 1000;
@@ -4680,7 +4680,7 @@ int i40e_write_phy_register_clause22(struct i40e_hw *hw,
u16 reg, u8 phy_addr, u16 value)
{
u8 port_num = (u8)hw->func_caps.mdio_port_num;
- int status = I40E_ERR_TIMEOUT;
+ int status = -EIO;
u32 command = 0;
u16 retry = 1000;
@@ -4721,7 +4721,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 *value)
{
u8 port_num = hw->func_caps.mdio_port_num;
- int status = I40E_ERR_TIMEOUT;
+ int status = -EIO;
u32 command = 0;
u16 retry = 1000;
@@ -4755,7 +4755,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw,
(I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
- status = I40E_ERR_TIMEOUT;
+ status = -EIO;
retry = 1000;
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
do {
@@ -4795,7 +4795,7 @@ int i40e_write_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value)
{
u8 port_num = hw->func_caps.mdio_port_num;
- int status = I40E_ERR_TIMEOUT;
+ int status = -EIO;
u16 retry = 1000;
u32 command = 0;
@@ -4831,7 +4831,7 @@ int i40e_write_phy_register_clause45(struct i40e_hw *hw,
(I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
- status = I40E_ERR_TIMEOUT;
+ status = -EIO;
retry = 1000;
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
do {
@@ -4880,7 +4880,7 @@ int i40e_write_phy_register(struct i40e_hw *hw,
phy_addr, value);
break;
default:
- status = I40E_ERR_UNKNOWN_PHY;
+ status = -EIO;
break;
}
@@ -4919,7 +4919,7 @@ int i40e_read_phy_register(struct i40e_hw *hw,
phy_addr, value);
break;
default:
- status = I40E_ERR_UNKNOWN_PHY;
+ status = -EIO;
break;
}
@@ -5109,7 +5109,7 @@ int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
I40E_PHY_COM_REG_PAGE, true,
I40E_PHY_LED_PROV_REG_1,
&reg_val_aq, NULL);
- if (status == I40E_SUCCESS)
+ if (status == 0)
*val = (u16)reg_val_aq;
return status;
}
@@ -5204,7 +5204,7 @@ int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
int status;
if (!reg_val)
- return I40E_ERR_PARAM;
+ return -EINVAL;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
@@ -5644,7 +5644,7 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
if (track_id == I40E_DDP_TRACKID_INVALID) {
i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
- return I40E_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
dev_cnt = profile->device_table_count;
@@ -5657,7 +5657,7 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
if (dev_cnt && i == dev_cnt) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
"Device doesn't support DDP\n");
- return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ return -ENODEV;
}
I40E_SECTION_TABLE(profile, sec_tbl);
@@ -5672,14 +5672,14 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
sec->section.type == SECTION_TYPE_RB_AQ) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
"Not a roll-back package\n");
- return I40E_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
} else {
if (sec->section.type == SECTION_TYPE_RB_AQ ||
sec->section.type == SECTION_TYPE_RB_MMIO) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
"Not an original package\n");
- return I40E_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
}
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 90638b67f8dc..f81e744c0fb3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -17,7 +17,7 @@ int i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
u32 reg;
if (!status)
- return I40E_ERR_PARAM;
+ return -EINVAL;
reg = rd32(hw, I40E_PRTDCB_GENS);
*status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
@@ -508,7 +508,7 @@ int i40e_lldp_to_dcb_config(u8 *lldpmib,
u16 type;
if (!lldpmib || !dcbcfg)
- return I40E_ERR_PARAM;
+ return -EINVAL;
/* set to the start of LLDPDU */
lldpmib += ETH_HLEN;
@@ -874,7 +874,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
int ret = 0;
if (!hw->func_caps.dcb)
- return I40E_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
/* Read LLDP NVM area */
if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) {
@@ -885,7 +885,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
else if (hw->mac.type == I40E_MAC_X722)
offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET;
else
- return I40E_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
ret = i40e_read_nvm_module_data(hw,
I40E_SR_EMP_SR_SETTINGS_PTR,
@@ -897,7 +897,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
}
if (ret)
- return I40E_ERR_NOT_READY;
+ return -EBUSY;
/* Get the LLDP AdminStatus for the current port */
adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
@@ -906,7 +906,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
/* LLDP agent disabled */
if (!adminstatus) {
hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
- return I40E_ERR_NOT_READY;
+ return -EBUSY;
}
/* Get DCBX status */
@@ -922,7 +922,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
if (ret)
return ret;
} else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
- return I40E_ERR_NOT_READY;
+ return -EBUSY;
}
/* Configure the LLDP MIB change event */
@@ -949,7 +949,7 @@ i40e_get_fw_lldp_status(struct i40e_hw *hw,
int ret;
if (!lldp_status)
- return I40E_ERR_PARAM;
+ return -EINVAL;
/* Allocate buffer for the LLDPDU */
ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
@@ -1299,7 +1299,7 @@ int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
sizeof(tlv->typelength) + length);
} while (tlvid < I40E_TLV_ID_END_OF_LLDPPDU);
*miblen = offset;
- return I40E_SUCCESS;
+ return 0;
}
/**
@@ -1957,7 +1957,7 @@ int i40e_read_lldp_cfg(struct i40e_hw *hw,
u32 mem;
if (!lldp_cfg)
- return I40E_ERR_PARAM;
+ return -EINVAL;
ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
index 7e8183762fd9..0e72abd178ae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -220,7 +220,7 @@ static bool i40e_ddp_is_pkg_hdr_valid(struct net_device *netdev,
netdev_err(netdev, "Invalid DDP profile - size is bigger than 4G");
return false;
}
- if (size < (sizeof(struct i40e_package_header) +
+ if (size < (sizeof(struct i40e_package_header) + sizeof(u32) +
sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
netdev_err(netdev, "Invalid DDP profile - size is too small.");
return false;
@@ -281,7 +281,7 @@ int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size))
return -EINVAL;
- if (size < (sizeof(struct i40e_package_header) +
+ if (size < (sizeof(struct i40e_package_header) + sizeof(u32) +
sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
netdev_err(netdev, "Invalid DDP recipe size.");
return -EINVAL;
@@ -344,7 +344,7 @@ int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
if (is_add) {
status = i40e_write_profile(&pf->hw, profile_hdr, track_id);
if (status) {
- if (status == I40E_ERR_DEVICE_NOT_SUPPORTED) {
+ if (status == -ENODEV) {
netdev_err(netdev,
"Profile is not supported by the device.");
return -EPERM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 62497f5565c5..1a497cb07710 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1309,7 +1309,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL);
if (!ret) {
dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
- } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+ } else if (ret == -EIO) {
dev_info(&pf->pdev->dev,
"AQ command send failed Opcode %x AQ Error: %d\n",
desc->opcode, pf->hw.aq.asq_last_status);
@@ -1370,7 +1370,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buffer_len, NULL);
if (!ret) {
dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
- } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+ } else if (ret == -EIO) {
dev_info(&pf->pdev->dev,
"AQ command send failed Opcode %x AQ Error: %d\n",
desc->opcode, pf->hw.aq.asq_last_status);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index 97fe1787a8f4..b1ad7c4259b9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -28,7 +28,7 @@ static int i40e_diag_reg_pattern_test(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_DIAG,
"%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n",
__func__, reg, pat, val);
- return I40E_ERR_DIAG_TEST_FAILED;
+ return -EIO;
}
}
@@ -38,7 +38,7 @@ static int i40e_diag_reg_pattern_test(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_DIAG,
"%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n",
__func__, reg, orig_val, val);
- return I40E_ERR_DIAG_TEST_FAILED;
+ return -EIO;
}
return 0;
@@ -127,5 +127,5 @@ int i40e_diag_eeprom_test(struct i40e_hw *hw)
BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
return i40e_validate_nvm_checksum(hw, NULL);
else
- return I40E_ERR_DIAG_TEST_FAILED;
+ return -EIO;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index afc4fa8c66af..bd1321bf7e26 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -5699,8 +5699,8 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- int status = I40E_SUCCESS;
__le16 eee_capability;
+ int status = 0;
/* Deny parameters we don't support */
if (i40e_is_eee_param_supported(netdev, edata))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 46f7950a0049..96ee63aca7a1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -4,7 +4,6 @@
#include "i40e.h"
#include "i40e_osdep.h"
#include "i40e_register.h"
-#include "i40e_status.h"
#include "i40e_alloc.h"
#include "i40e_hmc.h"
#include "i40e_type.h"
@@ -26,18 +25,18 @@ int i40e_add_sd_table_entry(struct i40e_hw *hw,
enum i40e_memory_type mem_type __attribute__((unused));
struct i40e_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
- int ret_code = I40E_SUCCESS;
struct i40e_dma_mem mem;
+ int ret_code = 0;
u64 alloc_len;
if (NULL == hmc_info->sd_table.sd_entry) {
- ret_code = I40E_ERR_BAD_PTR;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n");
goto exit;
}
if (sd_index >= hmc_info->sd_table.sd_cnt) {
- ret_code = I40E_ERR_INVALID_SD_INDEX;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n");
goto exit;
}
@@ -121,7 +120,7 @@ int i40e_add_pd_table_entry(struct i40e_hw *hw,
u64 *pd_addr;
if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
- ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n");
goto exit;
}
@@ -200,13 +199,13 @@ int i40e_remove_pd_bp(struct i40e_hw *hw,
sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
if (sd_idx >= hmc_info->sd_table.sd_cnt) {
- ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n");
goto exit;
}
sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
- ret_code = I40E_ERR_INVALID_SD_TYPE;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n");
goto exit;
}
@@ -251,7 +250,7 @@ int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
sd_entry = &hmc_info->sd_table.sd_entry[idx];
I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
if (sd_entry->u.bp.ref_cnt) {
- ret_code = I40E_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto exit;
}
I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
@@ -276,7 +275,7 @@ int i40e_remove_sd_bp_new(struct i40e_hw *hw,
struct i40e_hmc_sd_entry *sd_entry;
if (!is_pf)
- return I40E_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
/* get the entry and decrease its ref counter */
sd_entry = &hmc_info->sd_table.sd_entry[idx];
@@ -299,7 +298,7 @@ int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
sd_entry = &hmc_info->sd_table.sd_entry[idx];
if (sd_entry->u.pd_table.ref_cnt) {
- ret_code = I40E_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto exit;
}
@@ -325,7 +324,7 @@ int i40e_remove_pd_page_new(struct i40e_hw *hw,
struct i40e_hmc_sd_entry *sd_entry;
if (!is_pf)
- return I40E_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
sd_entry = &hmc_info->sd_table.sd_entry[idx];
I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index 40c101f286d1..474365bf0648 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -111,7 +111,7 @@ int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
/* validate values requested by driver don't exceed HMC capacity */
if (txq_num > obj->max_cnt) {
- ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
txq_num, obj->max_cnt, ret_code);
goto init_lan_hmc_out;
@@ -134,7 +134,7 @@ int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
/* validate values requested by driver don't exceed HMC capacity */
if (rxq_num > obj->max_cnt) {
- ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
rxq_num, obj->max_cnt, ret_code);
goto init_lan_hmc_out;
@@ -157,7 +157,7 @@ int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
/* validate values requested by driver don't exceed HMC capacity */
if (fcoe_cntx_num > obj->max_cnt) {
- ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
fcoe_cntx_num, obj->max_cnt, ret_code);
goto init_lan_hmc_out;
@@ -180,7 +180,7 @@ int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
/* validate values requested by driver don't exceed HMC capacity */
if (fcoe_filt_num > obj->max_cnt) {
- ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
fcoe_filt_num, obj->max_cnt, ret_code);
goto init_lan_hmc_out;
@@ -289,30 +289,30 @@ static int i40e_create_lan_hmc_object(struct i40e_hw *hw,
u32 i, j;
if (NULL == info) {
- ret_code = I40E_ERR_BAD_PTR;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n");
goto exit;
}
if (NULL == info->hmc_info) {
- ret_code = I40E_ERR_BAD_PTR;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n");
goto exit;
}
if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
- ret_code = I40E_ERR_BAD_PTR;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n");
goto exit;
}
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
- ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
ret_code);
goto exit;
}
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
- ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
ret_code);
goto exit;
@@ -324,8 +324,8 @@ static int i40e_create_lan_hmc_object(struct i40e_hw *hw,
&sd_idx, &sd_lmt);
if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
sd_lmt > info->hmc_info->sd_table.sd_cnt) {
- ret_code = I40E_ERR_INVALID_SD_INDEX;
- goto exit;
+ ret_code = -EINVAL;
+ goto exit;
}
/* find pd index */
I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
@@ -393,7 +393,7 @@ static int i40e_create_lan_hmc_object(struct i40e_hw *hw,
j, sd_entry->entry_type);
break;
default:
- ret_code = I40E_ERR_INVALID_SD_TYPE;
+ ret_code = -EINVAL;
goto exit;
}
}
@@ -417,7 +417,7 @@ exit_sd_error:
i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
break;
default:
- ret_code = I40E_ERR_INVALID_SD_TYPE;
+ ret_code = -EINVAL;
break;
}
j--;
@@ -474,7 +474,7 @@ try_type_paged:
break;
default:
/* unsupported type */
- ret_code = I40E_ERR_INVALID_SD_TYPE;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n",
ret_code);
goto configure_lan_hmc_out;
@@ -530,34 +530,34 @@ static int i40e_delete_lan_hmc_object(struct i40e_hw *hw,
u32 i, j;
if (NULL == info) {
- ret_code = I40E_ERR_BAD_PTR;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n");
goto exit;
}
if (NULL == info->hmc_info) {
- ret_code = I40E_ERR_BAD_PTR;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n");
goto exit;
}
if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
- ret_code = I40E_ERR_BAD_PTR;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n");
goto exit;
}
if (NULL == info->hmc_info->sd_table.sd_entry) {
- ret_code = I40E_ERR_BAD_PTR;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n");
goto exit;
}
if (NULL == info->hmc_info->hmc_obj) {
- ret_code = I40E_ERR_BAD_PTR;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
goto exit;
}
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
- ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
ret_code);
goto exit;
@@ -565,7 +565,7 @@ static int i40e_delete_lan_hmc_object(struct i40e_hw *hw,
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
- ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
ret_code);
goto exit;
@@ -599,7 +599,7 @@ static int i40e_delete_lan_hmc_object(struct i40e_hw *hw,
&sd_idx, &sd_lmt);
if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
sd_lmt > info->hmc_info->sd_table.sd_cnt) {
- ret_code = I40E_ERR_INVALID_SD_INDEX;
+ ret_code = -EINVAL;
goto exit;
}
@@ -987,29 +987,29 @@ int i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base,
int ret_code = 0;
if (NULL == hmc_info) {
- ret_code = I40E_ERR_BAD_PTR;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n");
goto exit;
}
if (NULL == hmc_info->hmc_obj) {
- ret_code = I40E_ERR_BAD_PTR;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
goto exit;
}
if (NULL == object_base) {
- ret_code = I40E_ERR_BAD_PTR;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n");
goto exit;
}
if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
- ret_code = I40E_ERR_BAD_PTR;
+ ret_code = -EINVAL;
hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n");
goto exit;
}
if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n",
ret_code);
- ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ ret_code = -EINVAL;
goto exit;
}
/* find sd index and limit */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a86bfa3bba74..de7fd43dc11c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3586,11 +3586,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
if (ring->xsk_pool) {
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
- /* For AF_XDP ZC, we disallow packets to span on
- * multiple buffers, thus letting us skip that
- * handling in the fast-path.
- */
- chain_len = 1;
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
@@ -5715,7 +5710,7 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
int ret;
if (!vsi)
- return I40E_ERR_PARAM;
+ return -EINVAL;
pf = vsi->back;
hw = &pf->hw;
@@ -7159,7 +7154,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
*/
if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
dev_info(&pf->pdev->dev, "DCB is not supported.\n");
- err = I40E_NOT_SUPPORTED;
+ err = -EOPNOTSUPP;
goto out;
}
if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
@@ -7469,7 +7464,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
non_zero_phy_type = true;
else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
- return I40E_SUCCESS;
+ return 0;
/* To force link we need to set bits for all supported PHY types,
* but there are now more than 32, so we need to split the bitmap
@@ -7520,7 +7515,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
i40e_aq_set_link_restart_an(hw, is_up, NULL);
- return I40E_SUCCESS;
+ return 0;
}
/**
@@ -8367,7 +8362,7 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
};
if (filter->flags >= ARRAY_SIZE(flag_table))
- return I40E_ERR_CONFIG;
+ return -EIO;
memset(&cld_filter, 0, sizeof(cld_filter));
@@ -8531,15 +8526,15 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
u8 field_flags = 0;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_VLAN) |
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
- dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
+ dev_err(&pf->pdev->dev, "Unsupported key used: 0x%llx\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
@@ -8581,7 +8576,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
} else {
dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
match.mask->dst);
- return I40E_ERR_CONFIG;
+ return -EIO;
}
}
@@ -8591,7 +8586,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
} else {
dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
match.mask->src);
- return I40E_ERR_CONFIG;
+ return -EIO;
}
}
ether_addr_copy(filter->dst_mac, match.key->dst);
@@ -8609,7 +8604,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
} else {
dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
match.mask->vlan_id);
- return I40E_ERR_CONFIG;
+ return -EIO;
}
}
@@ -8633,7 +8628,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
} else {
dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
&match.mask->dst);
- return I40E_ERR_CONFIG;
+ return -EIO;
}
}
@@ -8643,13 +8638,13 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
} else {
dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
&match.mask->src);
- return I40E_ERR_CONFIG;
+ return -EIO;
}
}
if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
- return I40E_ERR_CONFIG;
+ return -EIO;
}
filter->dst_ipv4 = match.key->dst;
filter->src_ipv4 = match.key->src;
@@ -8667,7 +8662,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
ipv6_addr_loopback(&match.key->src)) {
dev_err(&pf->pdev->dev,
"Bad ipv6, addr is LOOPBACK\n");
- return I40E_ERR_CONFIG;
+ return -EIO;
}
if (!ipv6_addr_any(&match.mask->dst) ||
!ipv6_addr_any(&match.mask->src))
@@ -8689,7 +8684,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
} else {
dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
be16_to_cpu(match.mask->src));
- return I40E_ERR_CONFIG;
+ return -EIO;
}
}
@@ -8699,7 +8694,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
} else {
dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
be16_to_cpu(match.mask->dst));
- return I40E_ERR_CONFIG;
+ return -EIO;
}
}
@@ -9907,11 +9902,11 @@ static void i40e_link_event(struct i40e_pf *pf)
status = i40e_get_link_status(&pf->hw, &new_link);
/* On success, disable temp link polling */
- if (status == I40E_SUCCESS) {
+ if (status == 0) {
clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
} else {
/* Enable link polling temporarily until i40e_get_link_status
- * returns I40E_SUCCESS
+ * returns 0
*/
set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
@@ -10165,7 +10160,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
do {
ret = i40e_clean_arq_element(hw, &event, &pending);
- if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ if (ret == -EALREADY)
break;
else if (ret) {
dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
@@ -12575,7 +12570,7 @@ int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
dev_info(&pf->pdev->dev,
"Commit BW only works on partition 1! This is partition %d",
pf->hw.partition_id);
- ret = I40E_NOT_SUPPORTED;
+ ret = -EOPNOTSUPP;
goto bw_commit_out;
}
@@ -12657,10 +12652,10 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
- int read_status = I40E_SUCCESS;
u16 sr_emp_sr_settings_ptr = 0;
u16 features_enable = 0;
u16 link_behavior = 0;
+ int read_status = 0;
bool ret = false;
read_status = i40e_read_nvm_word(&pf->hw,
@@ -13823,6 +13818,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_XSK_ZEROCOPY |
NETDEV_XDP_ACT_RX_SG;
+ netdev->xdp_zc_max_segs = I40E_MAX_BUFFER_TXD;
} else {
/* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
* are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
@@ -15467,12 +15463,12 @@ static int i40e_pf_loop_reset(struct i40e_pf *pf)
int ret;
ret = i40e_pf_reset(hw);
- while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
+ while (ret != 0 && time_before(jiffies, time_end)) {
usleep_range(10000, 20000);
ret = i40e_pf_reset(hw);
}
- if (ret == I40E_SUCCESS)
+ if (ret == 0)
pf->pfr_count++;
else
dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
@@ -15515,10 +15511,10 @@ static int i40e_handle_resets(struct i40e_pf *pf)
const int pfr = i40e_pf_loop_reset(pf);
const bool is_empr = i40e_check_fw_empr(pf);
- if (is_empr || pfr != I40E_SUCCESS)
+ if (is_empr || pfr != 0)
dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
- return is_empr ? I40E_ERR_RESET_FAILED : pfr;
+ return is_empr ? -EIO : pfr;
}
/**
@@ -15811,7 +15807,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_adminq(hw);
if (err) {
- if (err == I40E_ERR_FIRMWARE_API_VERSION)
+ if (err == -EIO)
dev_info(&pdev->dev,
"The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
hw->aq.api_maj_ver,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index f99c1f7fec40..07a46adeab38 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -37,7 +37,7 @@ int i40e_init_nvm(struct i40e_hw *hw)
nvm->blank_nvm_mode = false;
} else { /* Blank programming mode */
nvm->blank_nvm_mode = true;
- ret_code = I40E_ERR_NVM_BLANK_MODE;
+ ret_code = -EIO;
i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
}
@@ -111,8 +111,8 @@ i40e_i40e_acquire_nvm_exit:
**/
void i40e_release_nvm(struct i40e_hw *hw)
{
- int ret_code = I40E_SUCCESS;
u32 total_delay = 0;
+ int ret_code = 0;
if (hw->nvm.blank_nvm_mode)
return;
@@ -122,7 +122,7 @@ void i40e_release_nvm(struct i40e_hw *hw)
/* there are some rare cases when trying to release the resource
* results in an admin Q timeout, so handle them correctly
*/
- while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
+ while ((ret_code == -EIO) &&
(total_delay < hw->aq.asq_cmd_timeout)) {
usleep_range(1000, 2000);
ret_code = i40e_aq_release_resource(hw,
@@ -140,7 +140,7 @@ void i40e_release_nvm(struct i40e_hw *hw)
**/
static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
{
- int ret_code = I40E_ERR_TIMEOUT;
+ int ret_code = -EIO;
u32 srctl, wait_cnt;
/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
@@ -152,7 +152,7 @@ static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
}
udelay(5);
}
- if (ret_code == I40E_ERR_TIMEOUT)
+ if (ret_code == -EIO)
i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
return ret_code;
}
@@ -168,14 +168,14 @@ static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
u16 *data)
{
- int ret_code = I40E_ERR_TIMEOUT;
+ int ret_code = -EIO;
u32 sr_reg;
if (offset >= hw->nvm.sr_size) {
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM read error: offset %d beyond Shadow RAM limit %d\n",
offset, hw->nvm.sr_size);
- ret_code = I40E_ERR_PARAM;
+ ret_code = -EINVAL;
goto read_nvm_exit;
}
@@ -222,7 +222,7 @@ static int i40e_read_nvm_aq(struct i40e_hw *hw,
bool last_command)
{
struct i40e_asq_cmd_details cmd_details;
- int ret_code = I40E_ERR_NVM;
+ int ret_code = -EIO;
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@@ -267,7 +267,7 @@ static int i40e_read_nvm_aq(struct i40e_hw *hw,
static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
u16 *data)
{
- int ret_code = I40E_ERR_TIMEOUT;
+ int ret_code = -EIO;
ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
*data = le16_to_cpu(*(__le16 *)data);
@@ -348,7 +348,7 @@ int i40e_read_nvm_module_data(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_ALL,
"Reading nvm word failed.Error code: %d.\n",
status);
- return I40E_ERR_NVM;
+ return -EIO;
}
}
#define I40E_NVM_INVALID_PTR_VAL 0x7FFF
@@ -358,7 +358,7 @@ int i40e_read_nvm_module_data(struct i40e_hw *hw,
if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
ptr_value == I40E_NVM_INVALID_VAL) {
i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
- return I40E_ERR_BAD_PTR;
+ return -EINVAL;
}
/* Check whether the module is in SR mapped area or outside */
@@ -367,7 +367,7 @@ int i40e_read_nvm_module_data(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_ALL,
"Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
- return I40E_ERR_PARAM;
+ return -EINVAL;
} else {
/* Read from the Shadow RAM */
@@ -377,7 +377,7 @@ int i40e_read_nvm_module_data(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_ALL,
"Reading nvm word failed.Error code: %d.\n",
status);
- return I40E_ERR_NVM;
+ return -EIO;
}
offset = ptr_value + module_offset + specific_ptr +
@@ -549,7 +549,7 @@ static int i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
bool last_command)
{
struct i40e_asq_cmd_details cmd_details;
- int ret_code = I40E_ERR_NVM;
+ int ret_code = -EIO;
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@@ -614,7 +614,7 @@ static int i40e_calc_nvm_checksum(struct i40e_hw *hw,
/* read pointer to VPD area */
ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
if (ret_code) {
- ret_code = I40E_ERR_NVM_CHECKSUM;
+ ret_code = -EIO;
goto i40e_calc_nvm_checksum_exit;
}
@@ -622,7 +622,7 @@ static int i40e_calc_nvm_checksum(struct i40e_hw *hw,
ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
&pcie_alt_module);
if (ret_code) {
- ret_code = I40E_ERR_NVM_CHECKSUM;
+ ret_code = -EIO;
goto i40e_calc_nvm_checksum_exit;
}
@@ -636,7 +636,7 @@ static int i40e_calc_nvm_checksum(struct i40e_hw *hw,
ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
if (ret_code) {
- ret_code = I40E_ERR_NVM_CHECKSUM;
+ ret_code = -EIO;
goto i40e_calc_nvm_checksum_exit;
}
}
@@ -724,7 +724,7 @@ int i40e_validate_nvm_checksum(struct i40e_hw *hw,
* calculated checksum
*/
if (checksum_local != checksum_sr)
- ret_code = I40E_ERR_NVM_CHECKSUM;
+ ret_code = -EIO;
/* If the user cares, return the calculated checksum */
if (checksum)
@@ -839,7 +839,7 @@ int i40e_nvmupd_command(struct i40e_hw *hw,
if (upd_cmd == I40E_NVMUPD_STATUS) {
if (!cmd->data_size) {
*perrno = -EFAULT;
- return I40E_ERR_BUF_TOO_SHORT;
+ return -EINVAL;
}
bytes[0] = hw->nvmupd_state;
@@ -896,7 +896,7 @@ int i40e_nvmupd_command(struct i40e_hw *hw,
break;
}
- status = I40E_ERR_NOT_READY;
+ status = -EBUSY;
*perrno = -EBUSY;
break;
@@ -904,7 +904,7 @@ int i40e_nvmupd_command(struct i40e_hw *hw,
/* invalid state, should never happen */
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: no such state %d\n", hw->nvmupd_state);
- status = I40E_NOT_SUPPORTED;
+ status = -EOPNOTSUPP;
*perrno = -ESRCH;
break;
}
@@ -1045,7 +1045,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: bad cmd %s in init state\n",
i40e_nvm_update_state_str[upd_cmd]);
- status = I40E_ERR_NVM;
+ status = -EIO;
*perrno = -ESRCH;
break;
}
@@ -1087,7 +1087,7 @@ static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: bad cmd %s in reading state.\n",
i40e_nvm_update_state_str[upd_cmd]);
- status = I40E_NOT_SUPPORTED;
+ status = -EOPNOTSUPP;
*perrno = -ESRCH;
break;
}
@@ -1174,7 +1174,7 @@ retry:
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: bad cmd %s in writing state.\n",
i40e_nvm_update_state_str[upd_cmd]);
- status = I40E_NOT_SUPPORTED;
+ status = -EOPNOTSUPP;
*perrno = -ESRCH;
break;
}
@@ -1398,7 +1398,7 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
"NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
cmd->data_size, aq_desc_len);
*perrno = -EINVAL;
- return I40E_ERR_PARAM;
+ return -EINVAL;
}
aq_desc = (struct i40e_aq_desc *)bytes;
@@ -1473,7 +1473,7 @@ static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
__func__, cmd->offset, aq_total_len);
*perrno = -EINVAL;
- return I40E_ERR_PARAM;
+ return -EINVAL;
}
/* check copylength range */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index fe845987d99a..3eeee224f1fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -18,7 +18,6 @@
/* adminq functions */
int i40e_init_adminq(struct i40e_hw *hw);
void i40e_shutdown_adminq(struct i40e_hw *hw);
-void i40e_adminq_init_ring_data(struct i40e_hw *hw);
int i40e_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *events_pending);
@@ -51,7 +50,6 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
-void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
int i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
@@ -117,9 +115,6 @@ int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
int i40e_aq_get_link_info(struct i40e_hw *hw,
bool enable_lse, struct i40e_link_status *link,
struct i40e_asq_cmd_details *cmd_details);
-int i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
- u64 advt_reg,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_driver_version *dv,
struct i40e_asq_cmd_details *cmd_details);
@@ -269,9 +264,6 @@ int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
-int i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
- u16 seid, u16 credit, u8 max_bw,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
@@ -350,7 +342,6 @@ i40e_aq_configure_partition_bw(struct i40e_hw *hw,
int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
u32 pba_num_size);
-int i40e_validate_mac_addr(u8 *mac_addr);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
/* prototype for functions used for NVM access */
int i40e_init_nvm(struct i40e_hw *hw);
@@ -425,14 +416,6 @@ i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
/* prototype for functions used for SW locks */
/* i40e_common for VF drivers*/
-void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct virtchnl_vf_resource *msg);
-int i40e_vf_reset(struct i40e_hw *hw);
-int i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum virtchnl_ops v_opcode,
- int v_retval,
- u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_set_filter_control(struct i40e_hw *hw,
struct i40e_filter_control_settings *settings);
int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index c37abbb3cd06..8a26811140b4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -1132,7 +1132,7 @@ int i40e_ptp_alloc_pins(struct i40e_pf *pf)
if (!pf->ptp_pins) {
dev_warn(&pf->pdev->dev, "Cannot allocate memory for PTP pins structure.\n");
- return -I40E_ERR_NO_MEMORY;
+ return -ENOMEM;
}
pf->ptp_pins->sdp3_2 = off;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
deleted file mode 100644
index 4d2782e76038..000000000000
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_STATUS_H_
-#define _I40E_STATUS_H_
-
-/* Error Codes */
-enum i40e_status_code {
- I40E_SUCCESS = 0,
- I40E_ERR_NVM = -1,
- I40E_ERR_NVM_CHECKSUM = -2,
- I40E_ERR_CONFIG = -4,
- I40E_ERR_PARAM = -5,
- I40E_ERR_UNKNOWN_PHY = -7,
- I40E_ERR_INVALID_MAC_ADDR = -10,
- I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
- I40E_ERR_RESET_FAILED = -15,
- I40E_ERR_NO_AVAILABLE_VSI = -17,
- I40E_ERR_NO_MEMORY = -18,
- I40E_ERR_BAD_PTR = -19,
- I40E_ERR_INVALID_SIZE = -26,
- I40E_ERR_QUEUE_EMPTY = -32,
- I40E_ERR_TIMEOUT = -37,
- I40E_ERR_INVALID_SD_INDEX = -45,
- I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
- I40E_ERR_INVALID_SD_TYPE = -47,
- I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
- I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
- I40E_ERR_ADMIN_QUEUE_ERROR = -53,
- I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
- I40E_ERR_BUF_TOO_SHORT = -55,
- I40E_ERR_ADMIN_QUEUE_FULL = -56,
- I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
- I40E_ERR_NVM_BLANK_MODE = -59,
- I40E_ERR_NOT_IMPLEMENTED = -60,
- I40E_ERR_DIAG_TEST_FAILED = -62,
- I40E_ERR_NOT_READY = -63,
- I40E_NOT_SUPPORTED = -64,
- I40E_ERR_FIRMWARE_API_VERSION = -65,
- I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
-};
-
-#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 8b8bf4880faa..0b3a27f118fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2284,8 +2284,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
* If the buffer is an EOP buffer, this function exits returning false,
* otherwise return true indicating that this is in fact a non-EOP buffer.
*/
-static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc)
+bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc)
{
/* if we are the last buffer then there is nothing else to do */
#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 8c3d24012c54..900b0d9ede9f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -473,6 +473,8 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
+bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc);
/**
* i40e_get_head - Retrieve head from head writeback
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 388c3d36d96a..232131bedc3e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -4,7 +4,6 @@
#ifndef _I40E_TYPE_H_
#define _I40E_TYPE_H_
-#include "i40e_status.h"
#include "i40e_osdep.h"
#include "i40e_register.h"
#include "i40e_adminq.h"
@@ -1456,7 +1455,7 @@ struct i40e_ddp_version {
struct i40e_package_header {
struct i40e_ddp_version version;
u32 segment_count;
- u32 segment_offset[1];
+ u32 segment_offset[];
};
/* Generic segment header */
@@ -1487,12 +1486,12 @@ struct i40e_profile_segment {
struct i40e_ddp_version version;
char name[I40E_DDP_NAME_SIZE];
u32 device_table_count;
- struct i40e_device_id_entry device_table[1];
+ struct i40e_device_id_entry device_table[];
};
struct i40e_section_table {
u32 section_count;
- u32 section_offset[1];
+ u32 section_offset[];
};
struct i40e_profile_section_header {
@@ -1524,7 +1523,7 @@ struct i40e_profile_aq_section {
u16 flags;
u8 param[16];
u16 datalen;
- u8 data[1];
+ u8 data[];
};
struct i40e_profile_info {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index be59ba3774e1..8ea1a238dcef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -506,6 +506,7 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf,
struct virtchnl_rdma_qv_info *qv_info;
u32 v_idx, i, reg_idx, reg;
u32 next_q_idx, next_q_type;
+ size_t size;
u32 msix_vf;
int ret = 0;
@@ -521,9 +522,9 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf,
}
kfree(vf->qvlist_info);
- vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info,
- qvlist_info->num_vectors - 1),
- GFP_KERNEL);
+ size = virtchnl_struct_size(vf->qvlist_info, qv_info,
+ qvlist_info->num_vectors);
+ vf->qvlist_info = kzalloc(size, GFP_KERNEL);
if (!vf->qvlist_info) {
ret = -ENOMEM;
goto err_out;
@@ -1346,14 +1347,14 @@ static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
bool alluni)
{
struct i40e_pf *pf = vf->pf;
- int aq_ret = I40E_SUCCESS;
struct i40e_vsi *vsi;
+ int aq_ret = 0;
u16 num_vlans;
s16 *vl;
vsi = i40e_find_vsi_from_id(pf, vsi_id);
if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
- return I40E_ERR_PARAM;
+ return -EINVAL;
if (vf->port_vlan_id) {
aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
@@ -1363,7 +1364,7 @@ static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
if (!vl)
- return I40E_ERR_NO_MEMORY;
+ return -ENOMEM;
aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
vl, num_vlans);
@@ -2037,7 +2038,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
if (VF_IS_V10(&vf->vf_ver))
info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
- I40E_SUCCESS, (u8 *)&info,
+ 0, (u8 *)&info,
sizeof(struct virtchnl_version_info));
}
@@ -2099,14 +2100,14 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
int ret;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
- len = struct_size(vfres, vsi_res, num_vsis);
+ len = virtchnl_struct_size(vfres, vsi_res, num_vsis);
vfres = kzalloc(len, GFP_KERNEL);
if (!vfres) {
- aq_ret = I40E_ERR_NO_MEMORY;
+ aq_ret = -ENOMEM;
len = 0;
goto err;
}
@@ -2159,7 +2160,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev,
"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
vf->vf_id);
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
@@ -2227,7 +2228,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err_out;
}
if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
@@ -2243,12 +2244,12 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
}
if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err_out;
}
if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err_out;
}
@@ -2315,17 +2316,17 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
@@ -2333,7 +2334,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
for (i = 0; i < vf->num_tc; i++)
num_qps_all += vf->ch[i].num_qps;
if (num_qps_all != qci->num_queue_pairs) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
}
@@ -2346,7 +2347,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
if (!vf->adq_enabled) {
if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
qpi->txq.queue_id)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
@@ -2355,14 +2356,14 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
if (qpi->txq.vsi_id != qci->vsi_id ||
qpi->rxq.vsi_id != qci->vsi_id ||
qpi->rxq.queue_id != vsi_queue_id) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
}
if (vf->adq_enabled) {
if (idx >= ARRAY_SIZE(vf->ch)) {
- aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
+ aq_ret = -ENODEV;
goto error_param;
}
vsi_id = vf->ch[idx].vsi_id;
@@ -2372,7 +2373,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
&qpi->rxq) ||
i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
&qpi->txq)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
@@ -2383,7 +2384,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
*/
if (vf->adq_enabled) {
if (idx >= ARRAY_SIZE(vf->ch)) {
- aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
+ aq_ret = -ENODEV;
goto error_param;
}
if (j == (vf->ch[idx].num_qps - 1)) {
@@ -2406,7 +2407,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
vsi->num_queue_pairs = vf->ch[i].num_qps;
if (i40e_update_adq_vsi_queues(vsi, i)) {
- aq_ret = I40E_ERR_CONFIG;
+ aq_ret = -EIO;
goto error_param;
}
}
@@ -2464,13 +2465,13 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
if (irqmap_info->num_vectors >
vf->pf->hw.func_caps.num_msix_vectors_vf) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
@@ -2479,18 +2480,18 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
/* validate msg params */
if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
!i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
vsi_id = map->vsi_id;
if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
@@ -2579,29 +2580,29 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
int i;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
/* Use the queue bit map sent by the VF */
if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
true)) {
- aq_ret = I40E_ERR_TIMEOUT;
+ aq_ret = -EIO;
goto error_param;
}
if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
true)) {
- aq_ret = I40E_ERR_TIMEOUT;
+ aq_ret = -EIO;
goto error_param;
}
@@ -2610,7 +2611,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
/* zero belongs to LAN VSI */
for (i = 1; i < vf->num_tc; i++) {
if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
- aq_ret = I40E_ERR_TIMEOUT;
+ aq_ret = -EIO;
}
}
@@ -2636,29 +2637,29 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
/* Use the queue bit map sent by the VF */
if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
false)) {
- aq_ret = I40E_ERR_TIMEOUT;
+ aq_ret = -EIO;
goto error_param;
}
if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
false)) {
- aq_ret = I40E_ERR_TIMEOUT;
+ aq_ret = -EIO;
goto error_param;
}
error_param:
@@ -2790,18 +2791,18 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
memset(&stats, 0, sizeof(struct i40e_eth_stats));
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
i40e_update_eth_stats(vsi);
@@ -2862,7 +2863,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
is_zero_ether_addr(addr)) {
dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
addr);
- return I40E_ERR_INVALID_MAC_ADDR;
+ return -EINVAL;
}
/* If the host VMM administrator has set the VF MAC address
@@ -2998,7 +2999,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
- ret = I40E_ERR_PARAM;
+ ret = -EINVAL;
goto error_param;
}
@@ -3027,7 +3028,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev,
"Unable to add MAC filter %pM for VF %d\n",
al->list[i].addr, vf->vf_id);
- ret = I40E_ERR_PARAM;
+ ret = -EINVAL;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
}
@@ -3067,7 +3068,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
- ret = I40E_ERR_PARAM;
+ ret = -EINVAL;
goto error_param;
}
@@ -3076,7 +3077,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
is_zero_ether_addr(al->list[i].addr)) {
dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
al->list[i].addr, vf->vf_id);
- ret = I40E_ERR_INVALID_MAC_ADDR;
+ ret = -EINVAL;
goto error_param;
}
if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
@@ -3088,7 +3089,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
/* delete addresses from the list */
for (i = 0; i < al->num_elements; i++)
if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
- ret = I40E_ERR_INVALID_MAC_ADDR;
+ ret = -EINVAL;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
}
@@ -3149,13 +3150,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
}
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
for (i = 0; i < vfl->num_elements; i++) {
if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
dev_err(&pf->pdev->dev,
"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
goto error_param;
@@ -3163,7 +3164,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
}
vsi = pf->vsi[vf->lan_vsi_idx];
if (vsi->info.pvid) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
@@ -3214,13 +3215,13 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
for (i = 0; i < vfl->num_elements; i++) {
if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
}
@@ -3228,7 +3229,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
vsi = pf->vsi[vf->lan_vsi_idx];
if (vsi->info.pvid) {
if (vfl->num_elements > 1 || vfl->vlan_id[0])
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
@@ -3269,7 +3270,7 @@ static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
@@ -3298,13 +3299,13 @@ static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto error_param;
}
if (config) {
if (i40e_config_rdma_qvlist(vf, qvlist_info))
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
} else {
i40e_release_rdma_qvlist(vf);
}
@@ -3335,7 +3336,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
@@ -3366,13 +3367,13 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
for (i = 0; i < vrl->lut_entries; i++)
if (vrl->lut[i] >= vf->num_queue_pairs) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
@@ -3399,14 +3400,14 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
int len = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
len = sizeof(struct virtchnl_rss_hena);
vrh = kzalloc(len, GFP_KERNEL);
if (!vrh) {
- aq_ret = I40E_ERR_NO_MEMORY;
+ aq_ret = -ENOMEM;
len = 0;
goto err;
}
@@ -3435,7 +3436,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
@@ -3460,7 +3461,7 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
@@ -3486,7 +3487,7 @@ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
@@ -3574,7 +3575,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
dev_err(&pf->pdev->dev,
"VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
vf->vf_id);
- return I40E_ERR_CONFIG;
+ return -EIO;
}
}
@@ -3627,9 +3628,9 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
}
}
- return I40E_SUCCESS;
+ return 0;
err:
- return I40E_ERR_CONFIG;
+ return -EIO;
}
/**
@@ -3713,7 +3714,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
int i, ret;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
@@ -3721,7 +3722,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
dev_info(&pf->pdev->dev,
"VF %d: ADq not enabled, can't apply cloud filter\n",
vf->vf_id);
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
@@ -3729,7 +3730,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
dev_info(&pf->pdev->dev,
"VF %d: Invalid input, can't apply cloud filter\n",
vf->vf_id);
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
@@ -3844,7 +3845,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
int i, ret;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err_out;
}
@@ -3852,7 +3853,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
dev_info(&pf->pdev->dev,
"VF %d: ADq is not enabled, can't apply cloud filter\n",
vf->vf_id);
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err_out;
}
@@ -3860,7 +3861,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
dev_info(&pf->pdev->dev,
"VF %d: Invalid input/s, can't apply cloud filter\n",
vf->vf_id);
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err_out;
}
@@ -3953,7 +3954,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
u64 speed = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
@@ -3961,7 +3962,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
if (vf->spoofchk) {
dev_err(&pf->pdev->dev,
"Spoof check is ON, turn it OFF to enable ADq\n");
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
@@ -3969,7 +3970,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev,
"VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
vf->vf_id);
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
@@ -3978,7 +3979,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev,
"VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
@@ -3990,7 +3991,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
"VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
vf->vf_id, i, tci->list[i].count,
I40E_DEFAULT_QUEUES_PER_VF);
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
@@ -4001,7 +4002,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev,
"No queues left to allocate to VF %d\n",
vf->vf_id);
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
} else {
/* we need to allocate max VF queues to enable ADq so as to
@@ -4016,7 +4017,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
if (speed == SPEED_UNKNOWN) {
dev_err(&pf->pdev->dev,
"Cannot detect link speed\n");
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
@@ -4029,7 +4030,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
"Invalid max tx rate %llu specified for VF %d.",
tci->list[i].max_tx_rate,
vf->vf_id);
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
} else {
vf->ch[i].max_tx_rate =
@@ -4045,7 +4046,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
/* reset the VF in order to allocate resources */
i40e_vc_reset_vf(vf, true);
- return I40E_SUCCESS;
+ return 0;
/* send the response to the VF */
err:
@@ -4064,7 +4065,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
goto err;
}
@@ -4079,13 +4080,13 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
} else {
dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
vf->vf_id);
- aq_ret = I40E_ERR_PARAM;
+ aq_ret = -EINVAL;
}
/* reset the VF in order to allocate resources */
i40e_vc_reset_vf(vf, true);
- return I40E_SUCCESS;
+ return 0;
err:
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
@@ -4119,21 +4120,16 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
/* Check if VF is disabled. */
if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
- return I40E_ERR_PARAM;
+ return -EINVAL;
/* perform basic checks on the msg */
ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
if (ret) {
- i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
+ i40e_vc_send_resp_to_vf(vf, v_opcode, -EINVAL);
dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
local_vf_id, v_opcode, msglen);
- switch (ret) {
- case VIRTCHNL_STATUS_ERR_PARAM:
- return -EPERM;
- default:
- return -EINVAL;
- }
+ return ret;
}
switch (v_opcode) {
@@ -4226,7 +4222,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
v_opcode, local_vf_id);
ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
- I40E_ERR_NOT_IMPLEMENTED);
+ -EOPNOTSUPP);
break;
}
@@ -4305,6 +4301,38 @@ err_out:
}
/**
+ * i40e_check_vf_init_timeout
+ * @vf: the virtual function
+ *
+ * Check that the VF's initialization was successfully done and if not
+ * wait up to 300ms for its finish.
+ *
+ * Returns true when VF is initialized, false on timeout
+ **/
+static bool i40e_check_vf_init_timeout(struct i40e_vf *vf)
+{
+ int i;
+
+ /* When the VF is resetting wait until it is done.
+ * It can take up to 200 milliseconds, but wait for
+ * up to 300 milliseconds to be safe.
+ */
+ for (i = 0; i < 15; i++) {
+ if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
+ return true;
+ msleep(20);
+ }
+
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ dev_err(&vf->pf->pdev->dev,
+ "VF %d still in reset. Try again.\n", vf->vf_id);
+ return false;
+ }
+
+ return true;
+}
+
+/**
* i40e_ndo_set_vf_mac
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -4322,7 +4350,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
int ret = 0;
struct hlist_node *h;
int bkt;
- u8 i;
if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
@@ -4335,21 +4362,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
vf = &pf->vf[vf_id];
-
- /* When the VF is resetting wait until it is done.
- * It can take up to 200 milliseconds,
- * but wait for up to 300 milliseconds to be safe.
- * Acquire the VSI pointer only after the VF has been
- * properly initialized.
- */
- for (i = 0; i < 15; i++) {
- if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
- break;
- msleep(20);
- }
- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
- dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
- vf_id);
+ if (!i40e_check_vf_init_timeout(vf)) {
ret = -EAGAIN;
goto error_param;
}
@@ -4451,13 +4464,11 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
}
vf = &pf->vf[vf_id];
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
- dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
- vf_id);
+ if (!i40e_check_vf_init_timeout(vf)) {
ret = -EAGAIN;
goto error_pvid;
}
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (le16_to_cpu(vsi->info.pvid) == vlanprio)
/* duplicate request, so just return success */
@@ -4601,13 +4612,11 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
}
vf = &pf->vf[vf_id];
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
- dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
- vf_id);
+ if (!i40e_check_vf_init_timeout(vf)) {
ret = -EAGAIN;
goto error;
}
+ vsi = pf->vsi[vf->lan_vsi_idx];
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (ret)
@@ -4774,9 +4783,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
}
vf = &(pf->vf[vf_id]);
- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
- dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
- vf_id);
+ if (!i40e_check_vf_init_timeout(vf)) {
ret = -EAGAIN;
goto out;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 05ec1181471e..37f41c8a682f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -294,8 +294,14 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
{
unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
+ struct skb_shared_info *sinfo = NULL;
struct sk_buff *skb;
+ u32 nr_frags = 0;
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
@@ -312,6 +318,28 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
__skb_pull(skb, metasize);
}
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
+ for (int i = 0; i < nr_frags; i++) {
+ struct skb_shared_info *skinfo = skb_shinfo(skb);
+ skb_frag_t *frag = &sinfo->frags[i];
+ struct page *page;
+ void *addr;
+
+ page = dev_alloc_page();
+ if (!page) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ addr = page_to_virt(page);
+
+ memcpy(addr, skb_frag_page(frag), skb_frag_size(frag));
+
+ __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++,
+ addr, 0, skb_frag_size(frag));
+ }
+
out:
xsk_buff_free(xdp);
return skb;
@@ -322,14 +350,13 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc,
unsigned int *rx_packets,
unsigned int *rx_bytes,
- unsigned int size,
unsigned int xdp_res,
bool *failure)
{
struct sk_buff *skb;
*rx_packets = 1;
- *rx_bytes = size;
+ *rx_bytes = xdp_get_buff_len(xdp_buff);
if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
return;
@@ -363,7 +390,6 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
return;
}
- *rx_bytes = skb->len;
i40e_process_skb_fields(rx_ring, rx_desc, skb);
napi_gro_receive(&rx_ring->q_vector->napi, skb);
return;
@@ -374,6 +400,31 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
WARN_ON_ONCE(1);
}
+static int
+i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
+ struct xdp_buff *xdp, const unsigned int size)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
+
+ if (!xdp_buff_has_frags(first)) {
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(first);
+ }
+
+ if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
+ xsk_buff_free(first);
+ return -ENOMEM;
+ }
+
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
+ virt_to_page(xdp->data_hard_start), 0, size);
+ sinfo->xdp_frags_size += size;
+ xsk_buff_add_frag(xdp);
+
+ return 0;
+}
+
/**
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
* @rx_ring: Rx ring
@@ -384,13 +435,18 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 next_to_process = rx_ring->next_to_process;
u16 next_to_clean = rx_ring->next_to_clean;
u16 count_mask = rx_ring->count - 1;
unsigned int xdp_res, xdp_xmit = 0;
+ struct xdp_buff *first = NULL;
struct bpf_prog *xdp_prog;
bool failure = false;
u16 cleaned_count;
+ if (next_to_process != next_to_clean)
+ first = *i40e_rx_bi(rx_ring, next_to_clean);
+
/* NB! xdp_prog will always be !NULL, due to the fact that
* this path is enabled by setting an XDP program.
*/
@@ -404,7 +460,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int size;
u64 qword;
- rx_desc = I40E_RX_DESC(rx_ring, next_to_clean);
+ rx_desc = I40E_RX_DESC(rx_ring, next_to_process);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
/* This memory barrier is needed to keep us from reading
@@ -417,9 +473,9 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
i40e_clean_programming_status(rx_ring,
rx_desc->raw.qword[0],
qword);
- bi = *i40e_rx_bi(rx_ring, next_to_clean);
+ bi = *i40e_rx_bi(rx_ring, next_to_process);
xsk_buff_free(bi);
- next_to_clean = (next_to_clean + 1) & count_mask;
+ next_to_process = (next_to_process + 1) & count_mask;
continue;
}
@@ -428,22 +484,35 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
if (!size)
break;
- bi = *i40e_rx_bi(rx_ring, next_to_clean);
+ bi = *i40e_rx_bi(rx_ring, next_to_process);
xsk_buff_set_size(bi, size);
xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
- xdp_res = i40e_run_xdp_zc(rx_ring, bi, xdp_prog);
- i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
- &rx_bytes, size, xdp_res, &failure);
+ if (!first)
+ first = bi;
+ else if (i40e_add_xsk_frag(rx_ring, first, bi, size))
+ break;
+
+ next_to_process = (next_to_process + 1) & count_mask;
+
+ if (i40e_is_non_eop(rx_ring, rx_desc))
+ continue;
+
+ xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog);
+ i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets,
+ &rx_bytes, xdp_res, &failure);
+ first->flags = 0;
+ next_to_clean = next_to_process;
if (failure)
break;
total_rx_packets += rx_packets;
total_rx_bytes += rx_bytes;
xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
- next_to_clean = (next_to_clean + 1) & count_mask;
+ first = NULL;
}
rx_ring->next_to_clean = next_to_clean;
+ rx_ring->next_to_process = next_to_process;
cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
if (cleaned_count >= I40E_RX_BUFFER_WRITE)
@@ -466,6 +535,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
unsigned int *total_bytes)
{
+ u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(desc);
struct i40e_tx_desc *tx_desc;
dma_addr_t dma;
@@ -474,8 +544,7 @@ static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
tx_desc->buffer_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP,
- 0, desc->len, 0);
+ tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc->len, 0);
*total_bytes += desc->len;
}
@@ -489,14 +558,14 @@ static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *des
u32 i;
loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(&desc[i]);
+
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len);
tx_desc = I40E_TX_DESC(xdp_ring, ntu++);
tx_desc->buffer_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC |
- I40E_TX_DESC_CMD_EOP,
- 0, desc[i].len, 0);
+ tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc[i].len, 0);
*total_bytes += desc[i].len;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 8cbdebc5b698..85fba85fbb23 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -92,9 +92,9 @@ struct iavf_vsi {
#define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */
#define IAVF_MBPS_QUANTA 50
-#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \
- (IAVF_MAX_VF_VSI * \
- sizeof(struct virtchnl_vsi_resource)))
+#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE \
+ virtchnl_struct_size((struct virtchnl_vf_resource *)NULL, \
+ vsi_res, IAVF_MAX_VF_VSI)
/* MAX_MSIX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.c b/drivers/net/ethernet/intel/iavf/iavf_client.c
index 93c903c02c64..e6051b6355aa 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_client.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_client.c
@@ -469,8 +469,8 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev,
}
v_qvlist_info = (struct virtchnl_rdma_qvlist_info *)qvlist_info;
- msg_size = struct_size(v_qvlist_info, qv_info,
- v_qvlist_info->num_vectors - 1);
+ msg_size = virtchnl_struct_size(v_qvlist_info, qv_info,
+ v_qvlist_info->num_vectors);
adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP);
err = iavf_aq_send_msg_to_pf(&adapter->hw,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.h b/drivers/net/ethernet/intel/iavf/iavf_client.h
index c5d51d7dc7cc..500269bc0f5b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_client.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_client.h
@@ -53,7 +53,7 @@ struct iavf_qv_info {
struct iavf_qvlist_info {
u32 num_vectors;
- struct iavf_qv_info qv_info[1];
+ struct iavf_qv_info qv_info[];
};
#define IAVF_CLIENT_MSIX_ALL 0xFFFFFFFF
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 9610ca770349..7b300c86ceda 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -3744,15 +3744,15 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
struct virtchnl_filter *vf = &filter->f;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_VLAN) |
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
- dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
+ dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%llx\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index be3c007ce90a..f9727e9c3d63 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -215,8 +215,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)
u16 len;
int err;
- len = sizeof(struct virtchnl_vf_resource) +
- IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
+ len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE;
event.buf_len = len;
event.msg_buf = kzalloc(len, GFP_KERNEL);
if (!event.msg_buf)
@@ -284,7 +283,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
return;
}
adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
- len = struct_size(vqci, qpair, pairs);
+ len = virtchnl_struct_size(vqci, qpair, pairs);
vqci = kzalloc(len, GFP_KERNEL);
if (!vqci)
return;
@@ -397,7 +396,7 @@ void iavf_map_queues(struct iavf_adapter *adapter)
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
- len = struct_size(vimi, vecmap, adapter->num_msix_vectors);
+ len = virtchnl_struct_size(vimi, vecmap, adapter->num_msix_vectors);
vimi = kzalloc(len, GFP_KERNEL);
if (!vimi)
return;
@@ -476,13 +475,11 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter)
}
adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
- len = struct_size(veal, list, count);
+ len = virtchnl_struct_size(veal, list, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
- count = (IAVF_MAX_AQ_BUF_SIZE -
- sizeof(struct virtchnl_ether_addr_list)) /
- sizeof(struct virtchnl_ether_addr);
- len = struct_size(veal, list, count);
+ while (len > IAVF_MAX_AQ_BUF_SIZE)
+ len = virtchnl_struct_size(veal, list, --count);
more = true;
}
@@ -547,13 +544,11 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
}
adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
- len = struct_size(veal, list, count);
+ len = virtchnl_struct_size(veal, list, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
- count = (IAVF_MAX_AQ_BUF_SIZE -
- sizeof(struct virtchnl_ether_addr_list)) /
- sizeof(struct virtchnl_ether_addr);
- len = struct_size(veal, list, count);
+ while (len > IAVF_MAX_AQ_BUF_SIZE)
+ len = virtchnl_struct_size(veal, list, --count);
more = true;
}
veal = kzalloc(len, GFP_ATOMIC);
@@ -687,12 +682,12 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
- len = sizeof(*vvfl) + (count * sizeof(u16));
+ len = virtchnl_struct_size(vvfl, vlan_id, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
- count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) /
- sizeof(u16);
- len = sizeof(*vvfl) + (count * sizeof(u16));
+ while (len > IAVF_MAX_AQ_BUF_SIZE)
+ len = virtchnl_struct_size(vvfl, vlan_id,
+ --count);
more = true;
}
vvfl = kzalloc(len, GFP_ATOMIC);
@@ -732,15 +727,12 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
more = true;
}
- len = sizeof(*vvfl_v2) + ((count - 1) *
- sizeof(struct virtchnl_vlan_filter));
+ len = virtchnl_struct_size(vvfl_v2, filters, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
- count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl_v2)) /
- sizeof(struct virtchnl_vlan_filter);
- len = sizeof(*vvfl_v2) +
- ((count - 1) *
- sizeof(struct virtchnl_vlan_filter));
+ while (len > IAVF_MAX_AQ_BUF_SIZE)
+ len = virtchnl_struct_size(vvfl_v2, filters,
+ --count);
more = true;
}
@@ -838,12 +830,12 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
- len = sizeof(*vvfl) + (count * sizeof(u16));
+ len = virtchnl_struct_size(vvfl, vlan_id, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
- count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) /
- sizeof(u16);
- len = sizeof(*vvfl) + (count * sizeof(u16));
+ while (len > IAVF_MAX_AQ_BUF_SIZE)
+ len = virtchnl_struct_size(vvfl, vlan_id,
+ --count);
more = true;
}
vvfl = kzalloc(len, GFP_ATOMIC);
@@ -884,16 +876,12 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2;
- len = sizeof(*vvfl_v2) +
- ((count - 1) * sizeof(struct virtchnl_vlan_filter));
+ len = virtchnl_struct_size(vvfl_v2, filters, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
- count = (IAVF_MAX_AQ_BUF_SIZE -
- sizeof(*vvfl_v2)) /
- sizeof(struct virtchnl_vlan_filter);
- len = sizeof(*vvfl_v2) +
- ((count - 1) *
- sizeof(struct virtchnl_vlan_filter));
+ while (len > IAVF_MAX_AQ_BUF_SIZE)
+ len = virtchnl_struct_size(vvfl_v2, filters,
+ --count);
more = true;
}
@@ -1085,8 +1073,7 @@ void iavf_set_rss_key(struct iavf_adapter *adapter)
adapter->current_op);
return;
}
- len = sizeof(struct virtchnl_rss_key) +
- (adapter->rss_key_size * sizeof(u8)) - 1;
+ len = virtchnl_struct_size(vrk, key, adapter->rss_key_size);
vrk = kzalloc(len, GFP_KERNEL);
if (!vrk)
return;
@@ -1117,8 +1104,7 @@ void iavf_set_rss_lut(struct iavf_adapter *adapter)
adapter->current_op);
return;
}
- len = sizeof(struct virtchnl_rss_lut) +
- (adapter->rss_lut_size * sizeof(u8)) - 1;
+ len = virtchnl_struct_size(vrl, lut, adapter->rss_lut_size);
vrl = kzalloc(len, GFP_KERNEL);
if (!vrl)
return;
@@ -1499,7 +1485,7 @@ void iavf_enable_channels(struct iavf_adapter *adapter)
return;
}
- len = struct_size(vti, list, adapter->num_tc - 1);
+ len = virtchnl_struct_size(vti, list, adapter->num_tc);
vti = kzalloc(len, GFP_KERNEL);
if (!vti)
return;
@@ -2175,9 +2161,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
break;
case VIRTCHNL_OP_GET_VF_RESOURCES: {
- u16 len = sizeof(struct virtchnl_vf_resource) +
- IAVF_MAX_VF_VSI *
- sizeof(struct virtchnl_vsi_resource);
+ u16 len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE;
+
memcpy(adapter->vf_res, msg, min(msglen, len));
iavf_validate_num_queues(adapter);
iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 817977e3039d..960277d78e09 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -47,5 +47,5 @@ ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
-ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o
+ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o ice_eswitch_br.o
ice-$(CONFIG_GNSS) += ice_gnss.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 4ba3d99439a0..5022b036ca4f 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -200,6 +200,8 @@ enum ice_feature {
ICE_F_PTP_EXTTS,
ICE_F_SMA_CTRL,
ICE_F_GNSS,
+ ICE_F_ROCE_LAG,
+ ICE_F_SRIOV_LAG,
ICE_F_MAX
};
@@ -370,6 +372,7 @@ struct ice_vsi {
u16 rx_buf_len;
struct ice_aqc_vsi_props info; /* VSI properties */
+ struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */
/* VSI stats */
struct rtnl_link_stats64 net_stats;
@@ -517,6 +520,7 @@ enum ice_misc_thread_tasks {
struct ice_switchdev_info {
struct ice_vsi *control_vsi;
struct ice_vsi *uplink_vsi;
+ struct ice_esw_br_offloads *br_offloads;
bool is_running;
};
@@ -567,6 +571,7 @@ struct ice_pf {
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
struct mutex adev_mutex; /* lock to protect aux device access */
+ struct mutex lag_mutex; /* protect ice_lag struct in PF */
u32 msg_enable;
struct ice_ptp ptp;
struct gnss_serial *gnss_serial;
@@ -626,6 +631,7 @@ struct ice_pf {
struct ice_lag *lag; /* Link Aggregation information */
struct ice_switchdev_info switchdev;
+ struct ice_esw_br_port *br_port;
#define ICE_INVALID_AGG_NODE_ID 0
#define ICE_PF_AGG_NODE_ID_START 1
@@ -636,6 +642,8 @@ struct ice_pf {
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
};
+extern struct workqueue_struct *ice_lag_wq;
+
struct ice_netdev_priv {
struct ice_vsi *vsi;
struct ice_repr *repr;
@@ -853,7 +861,7 @@ static inline bool ice_is_adq_active(struct ice_pf *pf)
return false;
}
-bool netif_is_ice(struct net_device *dev);
+bool netif_is_ice(const struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
int ice_vsi_open_ctrl(struct ice_vsi *vsi);
@@ -909,8 +917,25 @@ void ice_fdir_release_flows(struct ice_hw *hw);
void ice_fdir_replay_flows(struct ice_hw *hw);
void ice_fdir_replay_fltrs(struct ice_pf *pf);
int ice_fdir_create_dflt_rules(struct ice_pf *pf);
-int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
- struct ice_rq_event_info *event);
+
+enum ice_aq_task_state {
+ ICE_AQ_TASK_NOT_PREPARED,
+ ICE_AQ_TASK_WAITING,
+ ICE_AQ_TASK_COMPLETE,
+ ICE_AQ_TASK_CANCELED,
+};
+
+struct ice_aq_task {
+ struct hlist_node entry;
+ struct ice_rq_event_info event;
+ enum ice_aq_task_state state;
+ u16 opcode;
+};
+
+void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
+ u16 opcode);
+int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
+ unsigned long timeout);
int ice_open(struct net_device *netdev);
int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 63d3e1dcbba5..29f7a9852aec 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -120,6 +120,9 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
+#define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
+#define ICE_AQC_BIT_ROCEV2_LAG 0x01
+#define ICE_AQC_BIT_SRIOV_LAG 0x02
u8 major_ver;
u8 minor_ver;
@@ -232,6 +235,8 @@ struct ice_aqc_set_port_params {
#define ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA BIT(2)
__le16 bad_frame_vsi;
__le16 swid;
+#define ICE_AQC_PORT_SWID_VALID BIT(15)
+#define ICE_AQC_PORT_SWID_M 0xFF
u8 reserved[10];
};
@@ -241,10 +246,12 @@ struct ice_aqc_set_port_params {
* Allocate Resources command (indirect 0x0208)
* Free Resources command (indirect 0x0209)
* Get Allocated Resource Descriptors Command (indirect 0x020A)
+ * Share Resource command (indirect 0x020B)
*/
#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03
#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04
#define ICE_AQC_RES_TYPE_RECIPE 0x05
+#define ICE_AQC_RES_TYPE_SWID 0x07
#define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21
#define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22
#define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23
@@ -264,6 +271,7 @@ struct ice_aqc_set_port_params {
/* Allocate Resources command (indirect 0x0208)
* Free Resources command (indirect 0x0209)
+ * Share Resource command (indirect 0x020B)
*/
struct ice_aqc_alloc_free_res_cmd {
__le16 num_entries; /* Number of Resource entries */
@@ -818,7 +826,11 @@ struct ice_aqc_txsched_move_grp_info_hdr {
__le32 src_parent_teid;
__le32 dest_parent_teid;
__le16 num_elems;
- __le16 reserved;
+ u8 mode;
+#define ICE_AQC_MOVE_ELEM_MODE_SAME_PF 0x0
+#define ICE_AQC_MOVE_ELEM_MODE_GIVE_OWN 0x1
+#define ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN 0x2
+ u8 reserved;
};
struct ice_aqc_move_elem {
@@ -1392,6 +1404,7 @@ struct ice_aqc_get_link_topo {
struct ice_aqc_link_topo_addr addr;
u8 node_part_num;
#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_C827 0x31
u8 rsvd[9];
};
@@ -1781,11 +1794,10 @@ struct ice_aqc_lldp_filter_ctrl {
u8 reserved2[12];
};
+#define ICE_AQC_RSS_VSI_VALID BIT(15)
+
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
struct ice_aqc_get_set_rss_key {
-#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
-#define ICE_AQC_GSET_RSS_KEY_VSI_ID_S 0
-#define ICE_AQC_GSET_RSS_KEY_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_KEY_VSI_ID_S)
__le16 vsi_id;
u8 reserved[6];
__le32 addr_high;
@@ -1803,35 +1815,33 @@ struct ice_aqc_get_set_rss_keys {
u8 extended_hash_key[ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE];
};
-/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */
-struct ice_aqc_get_set_rss_lut {
-#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
-#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
-#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
- __le16 vsi_id;
-#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0
-#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \
- (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S)
-
-#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI 0
-#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF 1
-#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL 2
+enum ice_lut_type {
+ ICE_LUT_VSI = 0,
+ ICE_LUT_PF = 1,
+ ICE_LUT_GLOBAL = 2,
+};
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S 2
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M \
- (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S)
+enum ice_lut_size {
+ ICE_LUT_VSI_SIZE = 64,
+ ICE_LUT_GLOBAL_SIZE = 512,
+ ICE_LUT_PF_SIZE = 2048,
+};
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 128
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG 0
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 512
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG 1
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K 2048
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG 2
+/* enum ice_aqc_lut_flags combines constants used to fill
+ * &ice_aqc_get_set_rss_lut ::flags, which is an amalgamation of global LUT ID,
+ * LUT size and LUT type, last of which does not need neither shift nor mask.
+ */
+enum ice_aqc_lut_flags {
+ ICE_AQC_LUT_SIZE_SMALL = 0, /* size = 64 or 128 */
+ ICE_AQC_LUT_SIZE_512 = BIT(2),
+ ICE_AQC_LUT_SIZE_2K = BIT(3),
-#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S 4
-#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M \
- (0xF << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S)
+ ICE_AQC_LUT_GLOBAL_IDX = GENMASK(7, 4),
+};
+/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */
+struct ice_aqc_get_set_rss_lut {
+ __le16 vsi_id;
__le16 flags;
__le32 reserved;
__le32 addr_high;
@@ -1923,6 +1933,42 @@ struct ice_aqc_dis_txq_item {
__le16 q_id[];
} __packed;
+/* Move/Reconfigure Tx queue (indirect 0x0C32) */
+struct ice_aqc_cfg_txqs {
+ u8 cmd_type;
+#define ICE_AQC_Q_CFG_MOVE_NODE 0x1
+#define ICE_AQC_Q_CFG_TC_CHNG 0x2
+#define ICE_AQC_Q_CFG_MOVE_TC_CHNG 0x3
+#define ICE_AQC_Q_CFG_SUBSEQ_CALL BIT(2)
+#define ICE_AQC_Q_CFG_FLUSH BIT(3)
+ u8 num_qs;
+ u8 port_num_chng;
+#define ICE_AQC_Q_CFG_SRC_PRT_M 0x7
+#define ICE_AQC_Q_CFG_DST_PRT_S 3
+#define ICE_AQC_Q_CFG_DST_PRT_M (0x7 << ICE_AQC_Q_CFG_DST_PRT_S)
+ u8 time_out;
+#define ICE_AQC_Q_CFG_TIMEOUT_S 2
+#define ICE_AQC_Q_CFG_TIMEOUT_M (0x1F << ICE_AQC_Q_CFG_TIMEOUT_S)
+ __le32 blocked_cgds;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Per Q struct for Move/Reconfigure Tx LAN Queues (indirect 0x0C32) */
+struct ice_aqc_cfg_txq_perq {
+ __le16 q_handle;
+ u8 tc;
+ u8 rsvd;
+ __le32 q_teid;
+};
+
+/* The buffer for Move/Reconfigure Tx LAN Queues (indirect 0x0C32) */
+struct ice_aqc_cfg_txqs_buf {
+ __le32 src_parent_teid;
+ __le32 dst_parent_teid;
+ struct ice_aqc_cfg_txq_perq queue_info[];
+};
+
/* Add Tx RDMA Queue Set (indirect 0x0C33) */
struct ice_aqc_add_rdma_qset {
u8 num_qset_grps;
@@ -2181,6 +2227,7 @@ struct ice_aq_desc {
struct ice_aqc_neigh_dev_req neigh_dev;
struct ice_aqc_add_txqs add_txqs;
struct ice_aqc_dis_txqs dis_txqs;
+ struct ice_aqc_cfg_txqs cfg_txqs;
struct ice_aqc_add_rdma_qset add_rdma_qset;
struct ice_aqc_add_get_update_free_vsi vsi_cmd;
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
@@ -2263,6 +2310,7 @@ enum ice_adminq_opc {
/* Alloc/Free/Get Resources */
ice_aqc_opc_alloc_res = 0x0208,
ice_aqc_opc_free_res = 0x0209,
+ ice_aqc_opc_share_res = 0x020B,
ice_aqc_opc_set_vlan_mode_parameters = 0x020C,
ice_aqc_opc_get_vlan_mode_parameters = 0x020D,
@@ -2356,6 +2404,7 @@ enum ice_adminq_opc {
/* Tx queue handling commands/events */
ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31,
+ ice_aqc_opc_cfg_txqs = 0x0C32,
ice_aqc_opc_add_rdma_qset = 0x0C33,
/* package commands */
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 074bf9403cd1..7fa43827a3f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -408,7 +408,6 @@ static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
*/
static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
{
- int chain_len = ICE_MAX_CHAINED_RX_BUFS;
struct ice_vsi *vsi = ring->vsi;
u32 rxdid = ICE_RXDID_FLEX_NIC;
struct ice_rlan_ctx rlan_ctx;
@@ -473,17 +472,11 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
*/
rlan_ctx.showiv = 0;
- /* For AF_XDP ZC, we disallow packets to span on
- * multiple buffers, thus letting us skip that
- * handling in the fast-path.
- */
- if (ring->xsk_pool)
- chain_len = 1;
/* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF
*/
rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
- chain_len * ring->rx_buf_len);
+ ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len);
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index e16d4c83ed5f..80deca45ab59 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -5,6 +5,7 @@
#include "ice_sched.h"
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
+#include "ice_ptp_hw.h"
#define ICE_PF_RESET_WAIT_COUNT 300
@@ -1999,37 +2000,31 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
/**
* ice_aq_alloc_free_res - command to allocate/free resources
* @hw: pointer to the HW struct
- * @num_entries: number of resource entries in buffer
* @buf: Indirect buffer to hold data parameters and response
* @buf_size: size of buffer for indirect commands
* @opc: pass in the command opcode
- * @cd: pointer to command details structure or NULL
*
* Helper function to allocate/free resources using the admin queue commands
*/
-int
-ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
- struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
- enum ice_adminq_opc opc, struct ice_sq_cd *cd)
+int ice_aq_alloc_free_res(struct ice_hw *hw,
+ struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
+ enum ice_adminq_opc opc)
{
struct ice_aqc_alloc_free_res_cmd *cmd;
struct ice_aq_desc desc;
cmd = &desc.params.sw_res_ctrl;
- if (!buf)
- return -EINVAL;
-
- if (buf_size < flex_array_size(buf, elem, num_entries))
+ if (!buf || buf_size < flex_array_size(buf, elem, 1))
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- cmd->num_entries = cpu_to_le16(num_entries);
+ cmd->num_entries = cpu_to_le16(1);
- return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL);
}
/**
@@ -2059,8 +2054,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
if (btm)
buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
- status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
- ice_aqc_opc_alloc_res, NULL);
+ status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
if (status)
goto ice_alloc_res_exit;
@@ -2094,8 +2088,7 @@ int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
buf->res_type = cpu_to_le16(type);
memcpy(buf->elem, res, sizeof(*buf->elem) * num);
- status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
- ice_aqc_opc_free_res, NULL);
+ status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res);
if (status)
ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
@@ -2241,6 +2234,14 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
"%s: reset_restrict_support = %d\n", prefix,
caps->reset_restrict_support);
break;
+ case ICE_AQC_CAPS_FW_LAG_SUPPORT:
+ caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
+ ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
+ prefix, caps->roce_lag);
+ caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG);
+ ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
+ prefix, caps->sriov_lag);
+ break;
default:
/* Not one of the recognized common capabilities */
found = false;
@@ -2654,6 +2655,67 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
+ * ice_aq_get_netlist_node
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ */
+static int
+ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ desc.params.get_link_topo = *cmd;
+
+ if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
+ return -EIO;
+
+ if (node_handle)
+ *node_handle = le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ if (node_part_number)
+ *node_part_number = desc.params.get_link_topo.node_part_num;
+
+ return 0;
+}
+
+/**
+ * ice_is_pf_c827 - check if pf contains c827 phy
+ * @hw: pointer to the hw struct
+ */
+bool ice_is_pf_c827(struct ice_hw *hw)
+{
+ struct ice_aqc_get_link_topo cmd = {};
+ u8 node_part_number;
+ u16 node_handle;
+ int status;
+
+ if (hw->mac_type != ICE_MAC_E810)
+ return false;
+
+ if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
+ return true;
+
+ cmd.addr.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
+ cmd.addr.topo_params.index = 0;
+
+ status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
+ &node_handle);
+
+ if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
+ return false;
+
+ if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
+ return true;
+
+ return false;
+}
+
+/**
* ice_aq_list_caps - query function/device capabilities
* @hw: pointer to the HW struct
* @buf: a buffer to hold the capabilities
@@ -3869,6 +3931,34 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
return status;
}
+static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type)
+{
+ switch (type) {
+ case ICE_LUT_VSI:
+ return ICE_LUT_VSI_SIZE;
+ case ICE_LUT_GLOBAL:
+ return ICE_LUT_GLOBAL_SIZE;
+ case ICE_LUT_PF:
+ return ICE_LUT_PF_SIZE;
+ }
+ WARN_ONCE(1, "incorrect type passed");
+ return ICE_LUT_VSI_SIZE;
+}
+
+static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size)
+{
+ switch (size) {
+ case ICE_LUT_VSI_SIZE:
+ return ICE_AQC_LUT_SIZE_SMALL;
+ case ICE_LUT_GLOBAL_SIZE:
+ return ICE_AQC_LUT_SIZE_512;
+ case ICE_LUT_PF_SIZE:
+ return ICE_AQC_LUT_SIZE_2K;
+ }
+ WARN_ONCE(1, "incorrect size passed");
+ return 0;
+}
+
/**
* __ice_aq_get_set_rss_lut
* @hw: pointer to the hardware structure
@@ -3878,95 +3968,44 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
* Internal function to get (0x0B05) or set (0x0B03) RSS look up table
*/
static int
-__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
-{
- u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
- struct ice_aqc_get_set_rss_lut *cmd_resp;
+__ice_aq_get_set_rss_lut(struct ice_hw *hw,
+ struct ice_aq_get_set_rss_lut_params *params, bool set)
+{
+ u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0;
+ enum ice_lut_type lut_type = params->lut_type;
+ struct ice_aqc_get_set_rss_lut *desc_params;
+ enum ice_aqc_lut_flags flags;
+ enum ice_lut_size lut_size;
struct ice_aq_desc desc;
- int status;
- u8 *lut;
+ u8 *lut = params->lut;
- if (!params)
- return -EINVAL;
-
- vsi_handle = params->vsi_handle;
- lut = params->lut;
- if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
+ if (!lut || !ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
- lut_size = params->lut_size;
- lut_type = params->lut_type;
- glob_lut_idx = params->global_lut_id;
- vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
-
- cmd_resp = &desc.params.get_set_rss_lut;
+ lut_size = ice_lut_type_to_size(lut_type);
+ if (lut_size > params->lut_size)
+ return -EINVAL;
+ else if (set && lut_size != params->lut_size)
+ return -EINVAL;
- if (set) {
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
+ opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut;
+ ice_fill_dflt_direct_cmd_desc(&desc, opcode);
+ if (set)
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- } else {
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
- }
-
- cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
- ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
- ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
- ICE_AQC_GSET_RSS_LUT_VSI_VALID);
-
- switch (lut_type) {
- case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
- case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
- case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
- flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
- ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
- break;
- default:
- status = -EINVAL;
- goto ice_aq_get_set_rss_lut_exit;
- }
-
- if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
- flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
- ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
- if (!set)
- goto ice_aq_get_set_rss_lut_send;
- } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
- if (!set)
- goto ice_aq_get_set_rss_lut_send;
- } else {
- goto ice_aq_get_set_rss_lut_send;
- }
+ desc_params = &desc.params.get_set_rss_lut;
+ vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+ desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
- /* LUT size is only valid for Global and PF table types */
- switch (lut_size) {
- case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
- break;
- case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
- flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
- break;
- case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
- if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
- flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
- break;
- }
- fallthrough;
- default:
- status = -EINVAL;
- goto ice_aq_get_set_rss_lut_exit;
- }
+ if (lut_type == ICE_LUT_GLOBAL)
+ glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX,
+ params->global_lut_id);
-ice_aq_get_set_rss_lut_send:
- cmd_resp->flags = cpu_to_le16(flags);
- status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
+ flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size);
+ desc_params->flags = cpu_to_le16(flags);
-ice_aq_get_set_rss_lut_exit:
- return status;
+ return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
}
/**
@@ -4008,12 +4047,10 @@ static int
__ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
struct ice_aqc_get_set_rss_keys *key, bool set)
{
- struct ice_aqc_get_set_rss_key *cmd_resp;
+ struct ice_aqc_get_set_rss_key *desc_params;
u16 key_size = sizeof(*key);
struct ice_aq_desc desc;
- cmd_resp = &desc.params.get_set_rss_key;
-
if (set) {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
@@ -4021,10 +4058,8 @@ __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
}
- cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
- ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
- ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
- ICE_AQC_GSET_RSS_KEY_VSI_VALID);
+ desc_params = &desc.params.get_set_rss_key;
+ desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
}
@@ -4222,6 +4257,53 @@ do_aq:
}
/**
+ * ice_aq_cfg_lan_txq
+ * @hw: pointer to the hardware structure
+ * @buf: buffer for command
+ * @buf_size: size of buffer in bytes
+ * @num_qs: number of queues being configured
+ * @oldport: origination lport
+ * @newport: destination lport
+ * @cd: pointer to command details structure or NULL
+ *
+ * Move/Configure LAN Tx queue (0x0C32)
+ *
+ * There is a better AQ command to use for moving nodes, so only coding
+ * this one for configuring the node.
+ */
+int
+ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
+ u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_cfg_txqs *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ cmd = &desc.params.cfg_txqs;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ if (!buf)
+ return -EINVAL;
+
+ cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG;
+ cmd->num_qs = num_qs;
+ cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M);
+ cmd->port_num_chng |= (newport << ICE_AQC_Q_CFG_DST_PRT_S) &
+ ICE_AQC_Q_CFG_DST_PRT_M;
+ cmd->time_out = (5 << ICE_AQC_Q_CFG_TIMEOUT_S) &
+ ICE_AQC_Q_CFG_TIMEOUT_M;
+ cmd->blocked_cgds = 0;
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (status)
+ ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n",
+ hw->adminq.sq_last_status);
+ return status;
+}
+
+/**
* ice_aq_add_rdma_qsets
* @hw: pointer to the hardware structure
* @num_qset_grps: Number of RDMA Qset groups
@@ -4700,6 +4782,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
break;
ice_free_sched_node(pi, node);
q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
+ q_ctx->q_teid = ICE_INVAL_TEID;
}
mutex_unlock(&pi->sched_lock);
kfree(qg_list);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 81961a7d6598..226b81f97a92 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -38,10 +38,9 @@ int
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
int
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
-int
-ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
- struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
- enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+int ice_aq_alloc_free_res(struct ice_hw *hw,
+ struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
+ enum ice_adminq_opc opc);
bool ice_is_sbq_supported(struct ice_hw *hw);
struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw);
int
@@ -93,6 +92,7 @@ int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
+bool ice_is_pf_c827(struct ice_hw *hw);
int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
@@ -186,6 +186,10 @@ int
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
+int
+ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
+ u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
+ struct ice_sq_cd *cd);
int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 3eb01731e496..e1fbc6de452d 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -70,6 +70,11 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return -EINVAL;
+ }
+
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
mutex_lock(&pf->tc_mutex);
@@ -170,6 +175,11 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
if (mode == pf->dcbx_cap)
return ICE_DCB_NO_HW_CHG;
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return ICE_DCB_NO_HW_CHG;
+ }
+
qos_cfg = &pf->hw.port_info->qos_cfg;
/* DSCP configuration is not DCBx negotiated */
@@ -261,6 +271,11 @@ static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return -EINVAL;
+ }
+
mutex_lock(&pf->tc_mutex);
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
@@ -323,6 +338,11 @@ static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set)
if (prio >= ICE_MAX_USER_PRIORITY)
return;
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return;
+ }
+
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
@@ -379,6 +399,11 @@ static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return ICE_DCB_NO_HW_CHG;
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return ICE_DCB_NO_HW_CHG;
+ }
+
/* Nothing to do */
if (!!state == test_bit(ICE_FLAG_DCB_ENA, pf->flags))
return ICE_DCB_NO_HW_CHG;
@@ -451,6 +476,11 @@ ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
if (tc >= ICE_MAX_TRAFFIC_CLASS)
return;
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return;
+ }
+
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
/* prio_type, bwg_id and bw_pct per UP are not supported */
@@ -505,6 +535,11 @@ ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct)
if (pgid >= ICE_MAX_TRAFFIC_CLASS)
return;
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return;
+ }
+
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
new_cfg->etscfg.tcbwtable[pgid] = bw_pct;
@@ -725,6 +760,11 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
return -EINVAL;
}
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return -EINVAL;
+ }
+
max_tc = pf->hw.func_caps.common_cap.maxtc;
if (app->priority >= max_tc) {
netdev_err(netdev, "TC %d out of range, max TC %d\n",
@@ -836,6 +876,11 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
return -EINVAL;
}
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return -EINVAL;
+ }
+
mutex_lock(&pf->tc_mutex);
old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
@@ -937,6 +982,11 @@ static u8 ice_dcbnl_cee_set_all(struct net_device *netdev)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return ICE_DCB_NO_HW_CHG;
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return ICE_DCB_NO_HW_CHG;
+ }
+
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
mutex_lock(&pf->tc_mutex);
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index d71ed210f9c4..b27ec93638b6 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -30,7 +30,7 @@ static const struct ice_tunnel_type_scan tnls[] = {
* Verifies various attributes of the package file, including length, format
* version, and the requirement of at least one segment.
*/
-enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
+static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
{
u32 seg_count;
u32 i;
@@ -118,7 +118,7 @@ static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
*
* This helper function validates a buffer's header.
*/
-struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
+static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
{
struct ice_buf_hdr *hdr;
u16 section_count;
@@ -1153,6 +1153,54 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw)
}
/**
+ * ice_aq_download_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer to transfer
+ * @buf_size: the size of the package buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Download Package (0x0C40)
+ */
+static int
+ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, bool last_buf, u32 *error_offset,
+ u32 *error_info, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == -EIO) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
* ice_dwnld_cfg_bufs
* @hw: pointer to the hardware structure
* @bufs: pointer to an array of buffers
@@ -1294,20 +1342,20 @@ static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw,
}
/**
- * ice_aq_download_pkg
+ * ice_aq_update_pkg
* @hw: pointer to the hardware structure
- * @pkg_buf: the package buffer to transfer
- * @buf_size: the size of the package buffer
+ * @pkg_buf: the package cmd buffer
+ * @buf_size: the size of the package cmd buffer
* @last_buf: last buffer indicator
* @error_offset: returns error offset
* @error_info: returns error information
* @cd: pointer to command details structure or NULL
*
- * Download Package (0x0C40)
+ * Update Package (0x0C42)
*/
-int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
- u16 buf_size, bool last_buf, u32 *error_offset,
- u32 *error_info, struct ice_sq_cd *cd)
+static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, bool last_buf, u32 *error_offset,
+ u32 *error_info, struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
struct ice_aq_desc desc;
@@ -1319,7 +1367,7 @@ int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
*error_info = 0;
cmd = &desc.params.download_pkg;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
if (last_buf)
@@ -1361,53 +1409,6 @@ int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
}
/**
- * ice_aq_update_pkg
- * @hw: pointer to the hardware structure
- * @pkg_buf: the package cmd buffer
- * @buf_size: the size of the package cmd buffer
- * @last_buf: last buffer indicator
- * @error_offset: returns error offset
- * @error_info: returns error information
- * @cd: pointer to command details structure or NULL
- *
- * Update Package (0x0C42)
- */
-static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
- u16 buf_size, bool last_buf, u32 *error_offset,
- u32 *error_info, struct ice_sq_cd *cd)
-{
- struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
- int status;
-
- if (error_offset)
- *error_offset = 0;
- if (error_info)
- *error_info = 0;
-
- cmd = &desc.params.download_pkg;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- if (last_buf)
- cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
-
- status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == -EIO) {
- /* Read error from buffer only when the FW returned an error */
- struct ice_aqc_download_pkg_resp *resp;
-
- resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
- if (error_offset)
- *error_offset = le32_to_cpu(resp->error_offset);
- if (error_info)
- *error_info = le32_to_cpu(resp->error_info);
- }
-
- return status;
-}
-
-/**
* ice_update_pkg_no_lock
* @hw: pointer to the hardware structure
* @bufs: pointer to an array of buffers
@@ -1470,8 +1471,9 @@ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
* success it returns a pointer to the segment header, otherwise it will
* return NULL.
*/
-struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
- struct ice_pkg_hdr *pkg_hdr)
+static struct ice_generic_seg_hdr *
+ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+ struct ice_pkg_hdr *pkg_hdr)
{
u32 i;
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h
index 41acfe26df1c..abb5f32f2ef4 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.h
@@ -416,21 +416,13 @@ struct ice_pkg_enum {
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
-int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
- u16 buf_size, bool last_buf, u32 *error_offset,
- u32 *error_info, struct ice_sq_cd *cd);
int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd);
void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size);
-enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len);
-
struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw);
-struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
- struct ice_pkg_hdr *pkg_hdr);
-
int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
@@ -439,6 +431,4 @@ u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type);
-struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf);
-
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 8f232c41a89e..a655d499abfa 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -4,6 +4,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_eswitch.h"
+#include "ice_eswitch_br.h"
#include "ice_fltr.h"
#include "ice_repr.h"
#include "ice_devlink.h"
@@ -83,10 +84,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
struct ice_vsi_vlan_ops *vlan_ops;
bool rule_added = false;
- vlan_ops = ice_get_compat_vsi_vlan_ops(ctrl_vsi);
- if (vlan_ops->dis_stripping(ctrl_vsi))
- return -ENODEV;
-
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
netif_addr_lock_bh(uplink_netdev);
@@ -103,17 +100,28 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
rule_added = true;
}
+ vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
+ if (vlan_ops->dis_rx_filtering(uplink_vsi))
+ goto err_dis_rx;
+
if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_uplink;
if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_control;
+ if (ice_vsi_update_local_lb(uplink_vsi, true))
+ goto err_override_local_lb;
+
return 0;
+err_override_local_lb:
+ ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
err_override_control:
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
err_override_uplink:
+ vlan_ops->ena_rx_filtering(uplink_vsi);
+err_dis_rx:
if (rule_added)
ice_clear_dflt_vsi(uplink_vsi);
err_def_rx:
@@ -306,6 +314,9 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
repr->src_vsi = vsi;
repr->dst->u.port_info.port_id = vsi->vsi_num;
+ if (repr->br_port)
+ repr->br_port->vsi = vsi;
+
ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (ret) {
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI);
@@ -331,6 +342,9 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
np = netdev_priv(netdev);
vsi = np->vsi;
+ if (!vsi || !ice_is_switchdev_running(vsi->back))
+ return NETDEV_TX_BUSY;
+
if (ice_is_reset_in_progress(vsi->back->state) ||
test_bit(ICE_VF_DIS, vsi->back->state))
return NETDEV_TX_BUSY;
@@ -378,9 +392,14 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
{
struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_vsi_vlan_ops *vlan_ops;
+ vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
+
+ ice_vsi_update_local_lb(uplink_vsi, false);
ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
+ vlan_ops->ena_rx_filtering(uplink_vsi);
ice_clear_dflt_vsi(uplink_vsi);
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
@@ -455,16 +474,24 @@ static void ice_eswitch_napi_disable(struct ice_pf *pf)
*/
static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
{
- struct ice_vsi *ctrl_vsi;
+ struct ice_vsi *ctrl_vsi, *uplink_vsi;
+
+ uplink_vsi = ice_get_main_vsi(pf);
+ if (!uplink_vsi)
+ return -ENODEV;
+
+ if (netif_is_any_bridge_port(uplink_vsi->netdev)) {
+ dev_err(ice_pf_to_dev(pf),
+ "Uplink port cannot be a bridge port\n");
+ return -EINVAL;
+ }
pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
if (!pf->switchdev.control_vsi)
return -ENODEV;
ctrl_vsi = pf->switchdev.control_vsi;
- pf->switchdev.uplink_vsi = ice_get_main_vsi(pf);
- if (!pf->switchdev.uplink_vsi)
- goto err_vsi;
+ pf->switchdev.uplink_vsi = uplink_vsi;
if (ice_eswitch_setup_env(pf))
goto err_vsi;
@@ -480,10 +507,15 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
if (ice_vsi_open(ctrl_vsi))
goto err_setup_reprs;
+ if (ice_eswitch_br_offloads_init(pf))
+ goto err_br_offloads;
+
ice_eswitch_napi_enable(pf);
return 0;
+err_br_offloads:
+ ice_vsi_close(ctrl_vsi);
err_setup_reprs:
ice_repr_rem_from_all_vfs(pf);
err_repr_add:
@@ -502,8 +534,8 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
ice_eswitch_napi_disable(pf);
+ ice_eswitch_br_offloads_deinit(pf);
ice_eswitch_release_env(pf);
- ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx);
ice_eswitch_release_reprs(pf, ctrl_vsi);
ice_vsi_release(ctrl_vsi);
ice_repr_rem_from_all_vfs(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
new file mode 100644
index 000000000000..67bfd1f61cdd
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
@@ -0,0 +1,1346 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_eswitch_br.h"
+#include "ice_repr.h"
+#include "ice_switch.h"
+#include "ice_vlan.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_trace.h"
+
+#define ICE_ESW_BRIDGE_UPDATE_INTERVAL msecs_to_jiffies(1000)
+
+static const struct rhashtable_params ice_fdb_ht_params = {
+ .key_offset = offsetof(struct ice_esw_br_fdb_entry, data),
+ .key_len = sizeof(struct ice_esw_br_fdb_data),
+ .head_offset = offsetof(struct ice_esw_br_fdb_entry, ht_node),
+ .automatic_shrinking = true,
+};
+
+static bool ice_eswitch_br_is_dev_valid(const struct net_device *dev)
+{
+ /* Accept only PF netdev, PRs and LAG */
+ return ice_is_port_repr_netdev(dev) || netif_is_ice(dev) ||
+ netif_is_lag_master(dev);
+}
+
+static struct net_device *
+ice_eswitch_br_get_uplink_from_lag(struct net_device *lag_dev)
+{
+ struct net_device *lower;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(lag_dev, lower, iter) {
+ if (netif_is_ice(lower))
+ return lower;
+ }
+
+ return NULL;
+}
+
+static struct ice_esw_br_port *
+ice_eswitch_br_netdev_to_port(struct net_device *dev)
+{
+ if (ice_is_port_repr_netdev(dev)) {
+ struct ice_repr *repr = ice_netdev_to_repr(dev);
+
+ return repr->br_port;
+ } else if (netif_is_ice(dev) || netif_is_lag_master(dev)) {
+ struct net_device *ice_dev;
+ struct ice_pf *pf;
+
+ if (netif_is_lag_master(dev))
+ ice_dev = ice_eswitch_br_get_uplink_from_lag(dev);
+ else
+ ice_dev = dev;
+
+ if (!ice_dev)
+ return NULL;
+
+ pf = ice_netdev_to_pf(ice_dev);
+
+ return pf->br_port;
+ }
+
+ return NULL;
+}
+
+static void
+ice_eswitch_br_ingress_rule_setup(struct ice_adv_rule_info *rule_info,
+ u8 pf_id, u16 vf_vsi_idx)
+{
+ rule_info->sw_act.vsi_handle = vf_vsi_idx;
+ rule_info->sw_act.flag |= ICE_FLTR_RX;
+ rule_info->sw_act.src = pf_id;
+ rule_info->priority = 5;
+}
+
+static void
+ice_eswitch_br_egress_rule_setup(struct ice_adv_rule_info *rule_info,
+ u16 pf_vsi_idx)
+{
+ rule_info->sw_act.vsi_handle = pf_vsi_idx;
+ rule_info->sw_act.flag |= ICE_FLTR_TX;
+ rule_info->flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
+ rule_info->flags_info.act_valid = true;
+ rule_info->priority = 5;
+}
+
+static int
+ice_eswitch_br_rule_delete(struct ice_hw *hw, struct ice_rule_query_data *rule)
+{
+ int err;
+
+ if (!rule)
+ return -EINVAL;
+
+ err = ice_rem_adv_rule_by_id(hw, rule);
+ kfree(rule);
+
+ return err;
+}
+
+static u16
+ice_eswitch_br_get_lkups_cnt(u16 vid)
+{
+ return ice_eswitch_br_is_vid_valid(vid) ? 2 : 1;
+}
+
+static void
+ice_eswitch_br_add_vlan_lkup(struct ice_adv_lkup_elem *list, u16 vid)
+{
+ if (ice_eswitch_br_is_vid_valid(vid)) {
+ list[1].type = ICE_VLAN_OFOS;
+ list[1].h_u.vlan_hdr.vlan = cpu_to_be16(vid & VLAN_VID_MASK);
+ list[1].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+ }
+}
+
+static struct ice_rule_query_data *
+ice_eswitch_br_fwd_rule_create(struct ice_hw *hw, int vsi_idx, int port_type,
+ const unsigned char *mac, u16 vid)
+{
+ struct ice_adv_rule_info rule_info = { 0 };
+ struct ice_rule_query_data *rule;
+ struct ice_adv_lkup_elem *list;
+ u16 lkups_cnt;
+ int err;
+
+ lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid);
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return ERR_PTR(-ENOMEM);
+
+ list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
+ if (!list) {
+ err = -ENOMEM;
+ goto err_list_alloc;
+ }
+
+ switch (port_type) {
+ case ICE_ESWITCH_BR_UPLINK_PORT:
+ ice_eswitch_br_egress_rule_setup(&rule_info, vsi_idx);
+ break;
+ case ICE_ESWITCH_BR_VF_REPR_PORT:
+ ice_eswitch_br_ingress_rule_setup(&rule_info, hw->pf_id,
+ vsi_idx);
+ break;
+ default:
+ err = -EINVAL;
+ goto err_add_rule;
+ }
+
+ list[0].type = ICE_MAC_OFOS;
+ ether_addr_copy(list[0].h_u.eth_hdr.dst_addr, mac);
+ eth_broadcast_addr(list[0].m_u.eth_hdr.dst_addr);
+
+ ice_eswitch_br_add_vlan_lkup(list, vid);
+
+ rule_info.need_pass_l2 = true;
+
+ rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
+
+ err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule);
+ if (err)
+ goto err_add_rule;
+
+ kfree(list);
+
+ return rule;
+
+err_add_rule:
+ kfree(list);
+err_list_alloc:
+ kfree(rule);
+
+ return ERR_PTR(err);
+}
+
+static struct ice_rule_query_data *
+ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx,
+ const unsigned char *mac, u16 vid)
+{
+ struct ice_adv_rule_info rule_info = { 0 };
+ struct ice_rule_query_data *rule;
+ struct ice_adv_lkup_elem *list;
+ int err = -ENOMEM;
+ u16 lkups_cnt;
+
+ lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid);
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ goto err_exit;
+
+ list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
+ if (!list)
+ goto err_list_alloc;
+
+ list[0].type = ICE_MAC_OFOS;
+ ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
+ eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
+
+ ice_eswitch_br_add_vlan_lkup(list, vid);
+
+ rule_info.allow_pass_l2 = true;
+ rule_info.sw_act.vsi_handle = vsi_idx;
+ rule_info.sw_act.fltr_act = ICE_NOP;
+ rule_info.priority = 5;
+
+ err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule);
+ if (err)
+ goto err_add_rule;
+
+ kfree(list);
+
+ return rule;
+
+err_add_rule:
+ kfree(list);
+err_list_alloc:
+ kfree(rule);
+err_exit:
+ return ERR_PTR(err);
+}
+
+static struct ice_esw_br_flow *
+ice_eswitch_br_flow_create(struct device *dev, struct ice_hw *hw, int vsi_idx,
+ int port_type, const unsigned char *mac, u16 vid)
+{
+ struct ice_rule_query_data *fwd_rule, *guard_rule;
+ struct ice_esw_br_flow *flow;
+ int err;
+
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (!flow)
+ return ERR_PTR(-ENOMEM);
+
+ fwd_rule = ice_eswitch_br_fwd_rule_create(hw, vsi_idx, port_type, mac,
+ vid);
+ err = PTR_ERR_OR_ZERO(fwd_rule);
+ if (err) {
+ dev_err(dev, "Failed to create eswitch bridge %sgress forward rule, err: %d\n",
+ port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in",
+ err);
+ goto err_fwd_rule;
+ }
+
+ guard_rule = ice_eswitch_br_guard_rule_create(hw, vsi_idx, mac, vid);
+ err = PTR_ERR_OR_ZERO(guard_rule);
+ if (err) {
+ dev_err(dev, "Failed to create eswitch bridge %sgress guard rule, err: %d\n",
+ port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in",
+ err);
+ goto err_guard_rule;
+ }
+
+ flow->fwd_rule = fwd_rule;
+ flow->guard_rule = guard_rule;
+
+ return flow;
+
+err_guard_rule:
+ ice_eswitch_br_rule_delete(hw, fwd_rule);
+err_fwd_rule:
+ kfree(flow);
+
+ return ERR_PTR(err);
+}
+
+static struct ice_esw_br_fdb_entry *
+ice_eswitch_br_fdb_find(struct ice_esw_br *bridge, const unsigned char *mac,
+ u16 vid)
+{
+ struct ice_esw_br_fdb_data data = {
+ .vid = vid,
+ };
+
+ ether_addr_copy(data.addr, mac);
+ return rhashtable_lookup_fast(&bridge->fdb_ht, &data,
+ ice_fdb_ht_params);
+}
+
+static void
+ice_eswitch_br_flow_delete(struct ice_pf *pf, struct ice_esw_br_flow *flow)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ err = ice_eswitch_br_rule_delete(&pf->hw, flow->fwd_rule);
+ if (err)
+ dev_err(dev, "Failed to delete FDB forward rule, err: %d\n",
+ err);
+
+ err = ice_eswitch_br_rule_delete(&pf->hw, flow->guard_rule);
+ if (err)
+ dev_err(dev, "Failed to delete FDB guard rule, err: %d\n",
+ err);
+
+ kfree(flow);
+}
+
+static struct ice_esw_br_vlan *
+ice_esw_br_port_vlan_lookup(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
+{
+ struct ice_pf *pf = bridge->br_offloads->pf;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_esw_br_port *port;
+ struct ice_esw_br_vlan *vlan;
+
+ port = xa_load(&bridge->ports, vsi_idx);
+ if (!port) {
+ dev_info(dev, "Bridge port lookup failed (vsi=%u)\n", vsi_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ vlan = xa_load(&port->vlans, vid);
+ if (!vlan) {
+ dev_info(dev, "Bridge port vlan metadata lookup failed (vsi=%u)\n",
+ vsi_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return vlan;
+}
+
+static void
+ice_eswitch_br_fdb_entry_delete(struct ice_esw_br *bridge,
+ struct ice_esw_br_fdb_entry *fdb_entry)
+{
+ struct ice_pf *pf = bridge->br_offloads->pf;
+
+ rhashtable_remove_fast(&bridge->fdb_ht, &fdb_entry->ht_node,
+ ice_fdb_ht_params);
+ list_del(&fdb_entry->list);
+
+ ice_eswitch_br_flow_delete(pf, fdb_entry->flow);
+
+ kfree(fdb_entry);
+}
+
+static void
+ice_eswitch_br_fdb_offload_notify(struct net_device *dev,
+ const unsigned char *mac, u16 vid,
+ unsigned long val)
+{
+ struct switchdev_notifier_fdb_info fdb_info = {
+ .addr = mac,
+ .vid = vid,
+ .offloaded = true,
+ };
+
+ call_switchdev_notifiers(val, dev, &fdb_info.info, NULL);
+}
+
+static void
+ice_eswitch_br_fdb_entry_notify_and_cleanup(struct ice_esw_br *bridge,
+ struct ice_esw_br_fdb_entry *entry)
+{
+ if (!(entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER))
+ ice_eswitch_br_fdb_offload_notify(entry->dev, entry->data.addr,
+ entry->data.vid,
+ SWITCHDEV_FDB_DEL_TO_BRIDGE);
+ ice_eswitch_br_fdb_entry_delete(bridge, entry);
+}
+
+static void
+ice_eswitch_br_fdb_entry_find_and_delete(struct ice_esw_br *bridge,
+ const unsigned char *mac, u16 vid)
+{
+ struct ice_pf *pf = bridge->br_offloads->pf;
+ struct ice_esw_br_fdb_entry *fdb_entry;
+ struct device *dev = ice_pf_to_dev(pf);
+
+ fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid);
+ if (!fdb_entry) {
+ dev_err(dev, "FDB entry with mac: %pM and vid: %u not found\n",
+ mac, vid);
+ return;
+ }
+
+ trace_ice_eswitch_br_fdb_entry_find_and_delete(fdb_entry);
+ ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry);
+}
+
+static void
+ice_eswitch_br_fdb_entry_create(struct net_device *netdev,
+ struct ice_esw_br_port *br_port,
+ bool added_by_user,
+ const unsigned char *mac, u16 vid)
+{
+ struct ice_esw_br *bridge = br_port->bridge;
+ struct ice_pf *pf = bridge->br_offloads->pf;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_esw_br_fdb_entry *fdb_entry;
+ struct ice_esw_br_flow *flow;
+ struct ice_esw_br_vlan *vlan;
+ struct ice_hw *hw = &pf->hw;
+ unsigned long event;
+ int err;
+
+ /* untagged filtering is not yet supported */
+ if (!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING) && vid)
+ return;
+
+ if ((bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) {
+ vlan = ice_esw_br_port_vlan_lookup(bridge, br_port->vsi_idx,
+ vid);
+ if (IS_ERR(vlan)) {
+ dev_err(dev, "Failed to find vlan lookup, err: %ld\n",
+ PTR_ERR(vlan));
+ return;
+ }
+ }
+
+ fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid);
+ if (fdb_entry)
+ ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry);
+
+ fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL);
+ if (!fdb_entry) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ flow = ice_eswitch_br_flow_create(dev, hw, br_port->vsi_idx,
+ br_port->type, mac, vid);
+ if (IS_ERR(flow)) {
+ err = PTR_ERR(flow);
+ goto err_add_flow;
+ }
+
+ ether_addr_copy(fdb_entry->data.addr, mac);
+ fdb_entry->data.vid = vid;
+ fdb_entry->br_port = br_port;
+ fdb_entry->flow = flow;
+ fdb_entry->dev = netdev;
+ fdb_entry->last_use = jiffies;
+ event = SWITCHDEV_FDB_ADD_TO_BRIDGE;
+
+ if (added_by_user) {
+ fdb_entry->flags |= ICE_ESWITCH_BR_FDB_ADDED_BY_USER;
+ event = SWITCHDEV_FDB_OFFLOADED;
+ }
+
+ err = rhashtable_insert_fast(&bridge->fdb_ht, &fdb_entry->ht_node,
+ ice_fdb_ht_params);
+ if (err)
+ goto err_fdb_insert;
+
+ list_add(&fdb_entry->list, &bridge->fdb_list);
+ trace_ice_eswitch_br_fdb_entry_create(fdb_entry);
+
+ ice_eswitch_br_fdb_offload_notify(netdev, mac, vid, event);
+
+ return;
+
+err_fdb_insert:
+ ice_eswitch_br_flow_delete(pf, flow);
+err_add_flow:
+ kfree(fdb_entry);
+err_exit:
+ dev_err(dev, "Failed to create fdb entry, err: %d\n", err);
+}
+
+static void
+ice_eswitch_br_fdb_work_dealloc(struct ice_esw_br_fdb_work *fdb_work)
+{
+ kfree(fdb_work->fdb_info.addr);
+ kfree(fdb_work);
+}
+
+static void
+ice_eswitch_br_fdb_event_work(struct work_struct *work)
+{
+ struct ice_esw_br_fdb_work *fdb_work = ice_work_to_fdb_work(work);
+ bool added_by_user = fdb_work->fdb_info.added_by_user;
+ const unsigned char *mac = fdb_work->fdb_info.addr;
+ u16 vid = fdb_work->fdb_info.vid;
+ struct ice_esw_br_port *br_port;
+
+ rtnl_lock();
+
+ br_port = ice_eswitch_br_netdev_to_port(fdb_work->dev);
+ if (!br_port)
+ goto err_exit;
+
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ ice_eswitch_br_fdb_entry_create(fdb_work->dev, br_port,
+ added_by_user, mac, vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ ice_eswitch_br_fdb_entry_find_and_delete(br_port->bridge,
+ mac, vid);
+ break;
+ default:
+ goto err_exit;
+ }
+
+err_exit:
+ rtnl_unlock();
+ dev_put(fdb_work->dev);
+ ice_eswitch_br_fdb_work_dealloc(fdb_work);
+}
+
+static struct ice_esw_br_fdb_work *
+ice_eswitch_br_fdb_work_alloc(struct switchdev_notifier_fdb_info *fdb_info,
+ struct net_device *dev,
+ unsigned long event)
+{
+ struct ice_esw_br_fdb_work *work;
+ unsigned char *mac;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_WORK(&work->work, ice_eswitch_br_fdb_event_work);
+ memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
+
+ mac = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!mac) {
+ kfree(work);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ether_addr_copy(mac, fdb_info->addr);
+ work->fdb_info.addr = mac;
+ work->event = event;
+ work->dev = dev;
+
+ return work;
+}
+
+static int
+ice_eswitch_br_switchdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct switchdev_notifier_info *info = ptr;
+ struct ice_esw_br_offloads *br_offloads;
+ struct ice_esw_br_fdb_work *work;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper;
+
+ br_offloads = ice_nb_to_br_offloads(nb, switchdev_nb);
+ extack = switchdev_notifier_info_to_extack(ptr);
+
+ upper = netdev_master_upper_dev_get_rcu(dev);
+ if (!upper)
+ return NOTIFY_DONE;
+
+ if (!netif_is_bridge_master(upper))
+ return NOTIFY_DONE;
+
+ if (!ice_eswitch_br_is_dev_valid(dev))
+ return NOTIFY_DONE;
+
+ if (!ice_eswitch_br_netdev_to_port(dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = container_of(info, typeof(*fdb_info), info);
+
+ work = ice_eswitch_br_fdb_work_alloc(fdb_info, dev, event);
+ if (IS_ERR(work)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to init switchdev fdb work");
+ return notifier_from_errno(PTR_ERR(work));
+ }
+ dev_hold(dev);
+
+ queue_work(br_offloads->wq, &work->work);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge)
+{
+ struct ice_esw_br_fdb_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
+ ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
+}
+
+static void
+ice_eswitch_br_vlan_filtering_set(struct ice_esw_br *bridge, bool enable)
+{
+ if (enable == !!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING))
+ return;
+
+ ice_eswitch_br_fdb_flush(bridge);
+ if (enable)
+ bridge->flags |= ICE_ESWITCH_BR_VLAN_FILTERING;
+ else
+ bridge->flags &= ~ICE_ESWITCH_BR_VLAN_FILTERING;
+}
+
+static void
+ice_eswitch_br_clear_pvid(struct ice_esw_br_port *port)
+{
+ struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, port->pvid, 0);
+ struct ice_vsi_vlan_ops *vlan_ops;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi);
+
+ vlan_ops->del_vlan(port->vsi, &port_vlan);
+ vlan_ops->clear_port_vlan(port->vsi);
+
+ ice_vf_vsi_disable_port_vlan(port->vsi);
+
+ port->pvid = 0;
+}
+
+static void
+ice_eswitch_br_vlan_cleanup(struct ice_esw_br_port *port,
+ struct ice_esw_br_vlan *vlan)
+{
+ struct ice_esw_br_fdb_entry *fdb_entry, *tmp;
+ struct ice_esw_br *bridge = port->bridge;
+
+ trace_ice_eswitch_br_vlan_cleanup(vlan);
+
+ list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) {
+ if (vlan->vid == fdb_entry->data.vid)
+ ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
+ }
+
+ xa_erase(&port->vlans, vlan->vid);
+ if (port->pvid == vlan->vid)
+ ice_eswitch_br_clear_pvid(port);
+ kfree(vlan);
+}
+
+static void ice_eswitch_br_port_vlans_flush(struct ice_esw_br_port *port)
+{
+ struct ice_esw_br_vlan *vlan;
+ unsigned long index;
+
+ xa_for_each(&port->vlans, index, vlan)
+ ice_eswitch_br_vlan_cleanup(port, vlan);
+}
+
+static int
+ice_eswitch_br_set_pvid(struct ice_esw_br_port *port,
+ struct ice_esw_br_vlan *vlan)
+{
+ struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, vlan->vid, 0);
+ struct device *dev = ice_pf_to_dev(port->vsi->back);
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int err;
+
+ if (port->pvid == vlan->vid || vlan->vid == 1)
+ return 0;
+
+ /* Setting port vlan on uplink isn't supported by hw */
+ if (port->type == ICE_ESWITCH_BR_UPLINK_PORT)
+ return -EOPNOTSUPP;
+
+ if (port->pvid) {
+ dev_info(dev,
+ "Port VLAN (vsi=%u, vid=%u) already exists on the port, remove it before adding new one\n",
+ port->vsi_idx, port->pvid);
+ return -EEXIST;
+ }
+
+ ice_vf_vsi_enable_port_vlan(port->vsi);
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi);
+ err = vlan_ops->set_port_vlan(port->vsi, &port_vlan);
+ if (err)
+ return err;
+
+ err = vlan_ops->add_vlan(port->vsi, &port_vlan);
+ if (err)
+ return err;
+
+ ice_eswitch_br_port_vlans_flush(port);
+ port->pvid = vlan->vid;
+
+ return 0;
+}
+
+static struct ice_esw_br_vlan *
+ice_eswitch_br_vlan_create(u16 vid, u16 flags, struct ice_esw_br_port *port)
+{
+ struct device *dev = ice_pf_to_dev(port->vsi->back);
+ struct ice_esw_br_vlan *vlan;
+ int err;
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return ERR_PTR(-ENOMEM);
+
+ vlan->vid = vid;
+ vlan->flags = flags;
+ if ((flags & BRIDGE_VLAN_INFO_PVID) &&
+ (flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
+ err = ice_eswitch_br_set_pvid(port, vlan);
+ if (err)
+ goto err_set_pvid;
+ } else if ((flags & BRIDGE_VLAN_INFO_PVID) ||
+ (flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
+ dev_info(dev, "VLAN push and pop are supported only simultaneously\n");
+ err = -EOPNOTSUPP;
+ goto err_set_pvid;
+ }
+
+ err = xa_insert(&port->vlans, vlan->vid, vlan, GFP_KERNEL);
+ if (err)
+ goto err_insert;
+
+ trace_ice_eswitch_br_vlan_create(vlan);
+
+ return vlan;
+
+err_insert:
+ if (port->pvid)
+ ice_eswitch_br_clear_pvid(port);
+err_set_pvid:
+ kfree(vlan);
+ return ERR_PTR(err);
+}
+
+static int
+ice_eswitch_br_port_vlan_add(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid,
+ u16 flags, struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br_port *port;
+ struct ice_esw_br_vlan *vlan;
+
+ port = xa_load(&bridge->ports, vsi_idx);
+ if (!port)
+ return -EINVAL;
+
+ if (port->pvid) {
+ dev_info(ice_pf_to_dev(port->vsi->back),
+ "Port VLAN (vsi=%u, vid=%d) exists on the port, remove it to add trunk VLANs\n",
+ port->vsi_idx, port->pvid);
+ return -EEXIST;
+ }
+
+ vlan = xa_load(&port->vlans, vid);
+ if (vlan) {
+ if (vlan->flags == flags)
+ return 0;
+
+ ice_eswitch_br_vlan_cleanup(port, vlan);
+ }
+
+ vlan = ice_eswitch_br_vlan_create(vid, flags, port);
+ if (IS_ERR(vlan)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to create VLAN entry, vid: %u, vsi: %u",
+ vid, vsi_idx);
+ return PTR_ERR(vlan);
+ }
+
+ return 0;
+}
+
+static void
+ice_eswitch_br_port_vlan_del(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
+{
+ struct ice_esw_br_port *port;
+ struct ice_esw_br_vlan *vlan;
+
+ port = xa_load(&bridge->ports, vsi_idx);
+ if (!port)
+ return;
+
+ vlan = xa_load(&port->vlans, vid);
+ if (!vlan)
+ return;
+
+ ice_eswitch_br_vlan_cleanup(port, vlan);
+}
+
+static int
+ice_eswitch_br_port_obj_add(struct net_device *netdev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
+ struct switchdev_obj_port_vlan *vlan;
+ int err;
+
+ if (!br_port)
+ return -EINVAL;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ err = ice_eswitch_br_port_vlan_add(br_port->bridge,
+ br_port->vsi_idx, vlan->vid,
+ vlan->flags, extack);
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+ice_eswitch_br_port_obj_del(struct net_device *netdev, const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
+ struct switchdev_obj_port_vlan *vlan;
+
+ if (!br_port)
+ return -EINVAL;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ ice_eswitch_br_port_vlan_del(br_port->bridge, br_port->vsi_idx,
+ vlan->vid);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+ice_eswitch_br_port_obj_attr_set(struct net_device *netdev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
+
+ if (!br_port)
+ return -EINVAL;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ ice_eswitch_br_vlan_filtering_set(br_port->bridge,
+ attr->u.vlan_filtering);
+ return 0;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ br_port->bridge->ageing_time =
+ clock_t_to_jiffies(attr->u.ageing_time);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+ice_eswitch_br_event_blocking(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ ice_eswitch_br_is_dev_valid,
+ ice_eswitch_br_port_obj_add);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ ice_eswitch_br_is_dev_valid,
+ ice_eswitch_br_port_obj_del);
+ break;
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ ice_eswitch_br_is_dev_valid,
+ ice_eswitch_br_port_obj_attr_set);
+ break;
+ default:
+ err = 0;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static void
+ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
+ struct ice_esw_br_port *br_port)
+{
+ struct ice_esw_br_fdb_entry *fdb_entry, *tmp;
+ struct ice_vsi *vsi = br_port->vsi;
+
+ list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) {
+ if (br_port == fdb_entry->br_port)
+ ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
+ }
+
+ if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back)
+ vsi->back->br_port = NULL;
+ else if (vsi->vf && vsi->vf->repr)
+ vsi->vf->repr->br_port = NULL;
+
+ xa_erase(&bridge->ports, br_port->vsi_idx);
+ ice_eswitch_br_port_vlans_flush(br_port);
+ kfree(br_port);
+}
+
+static struct ice_esw_br_port *
+ice_eswitch_br_port_init(struct ice_esw_br *bridge)
+{
+ struct ice_esw_br_port *br_port;
+
+ br_port = kzalloc(sizeof(*br_port), GFP_KERNEL);
+ if (!br_port)
+ return ERR_PTR(-ENOMEM);
+
+ xa_init(&br_port->vlans);
+
+ br_port->bridge = bridge;
+
+ return br_port;
+}
+
+static int
+ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
+ struct ice_repr *repr)
+{
+ struct ice_esw_br_port *br_port;
+ int err;
+
+ br_port = ice_eswitch_br_port_init(bridge);
+ if (IS_ERR(br_port))
+ return PTR_ERR(br_port);
+
+ br_port->vsi = repr->src_vsi;
+ br_port->vsi_idx = br_port->vsi->idx;
+ br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT;
+ repr->br_port = br_port;
+
+ err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
+ if (err) {
+ ice_eswitch_br_port_deinit(bridge, br_port);
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = pf->switchdev.uplink_vsi;
+ struct ice_esw_br_port *br_port;
+ int err;
+
+ br_port = ice_eswitch_br_port_init(bridge);
+ if (IS_ERR(br_port))
+ return PTR_ERR(br_port);
+
+ br_port->vsi = vsi;
+ br_port->vsi_idx = br_port->vsi->idx;
+ br_port->type = ICE_ESWITCH_BR_UPLINK_PORT;
+ pf->br_port = br_port;
+
+ err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
+ if (err) {
+ ice_eswitch_br_port_deinit(bridge, br_port);
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+ice_eswitch_br_ports_flush(struct ice_esw_br *bridge)
+{
+ struct ice_esw_br_port *port;
+ unsigned long i;
+
+ xa_for_each(&bridge->ports, i, port)
+ ice_eswitch_br_port_deinit(bridge, port);
+}
+
+static void
+ice_eswitch_br_deinit(struct ice_esw_br_offloads *br_offloads,
+ struct ice_esw_br *bridge)
+{
+ if (!bridge)
+ return;
+
+ /* Cleanup all the ports that were added asynchronously
+ * through NETDEV_CHANGEUPPER event.
+ */
+ ice_eswitch_br_ports_flush(bridge);
+ WARN_ON(!xa_empty(&bridge->ports));
+ xa_destroy(&bridge->ports);
+ rhashtable_destroy(&bridge->fdb_ht);
+
+ br_offloads->bridge = NULL;
+ kfree(bridge);
+}
+
+static struct ice_esw_br *
+ice_eswitch_br_init(struct ice_esw_br_offloads *br_offloads, int ifindex)
+{
+ struct ice_esw_br *bridge;
+ int err;
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return ERR_PTR(-ENOMEM);
+
+ err = rhashtable_init(&bridge->fdb_ht, &ice_fdb_ht_params);
+ if (err) {
+ kfree(bridge);
+ return ERR_PTR(err);
+ }
+
+ INIT_LIST_HEAD(&bridge->fdb_list);
+ bridge->br_offloads = br_offloads;
+ bridge->ifindex = ifindex;
+ bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
+ xa_init(&bridge->ports);
+ br_offloads->bridge = bridge;
+
+ return bridge;
+}
+
+static struct ice_esw_br *
+ice_eswitch_br_get(struct ice_esw_br_offloads *br_offloads, int ifindex,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br *bridge = br_offloads->bridge;
+
+ if (bridge) {
+ if (bridge->ifindex != ifindex) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one bridge is supported per eswitch");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ return bridge;
+ }
+
+ /* Create the bridge if it doesn't exist yet */
+ bridge = ice_eswitch_br_init(br_offloads, ifindex);
+ if (IS_ERR(bridge))
+ NL_SET_ERR_MSG_MOD(extack, "Failed to init the bridge");
+
+ return bridge;
+}
+
+static void
+ice_eswitch_br_verify_deinit(struct ice_esw_br_offloads *br_offloads,
+ struct ice_esw_br *bridge)
+{
+ /* Remove the bridge if it exists and there are no ports left */
+ if (!bridge || !xa_empty(&bridge->ports))
+ return;
+
+ ice_eswitch_br_deinit(br_offloads, bridge);
+}
+
+static int
+ice_eswitch_br_port_unlink(struct ice_esw_br_offloads *br_offloads,
+ struct net_device *dev, int ifindex,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(dev);
+ struct ice_esw_br *bridge;
+
+ if (!br_port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port representor is not attached to any bridge");
+ return -EINVAL;
+ }
+
+ if (br_port->bridge->ifindex != ifindex) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port representor is attached to another bridge");
+ return -EINVAL;
+ }
+
+ bridge = br_port->bridge;
+
+ trace_ice_eswitch_br_port_unlink(br_port);
+ ice_eswitch_br_port_deinit(br_port->bridge, br_port);
+ ice_eswitch_br_verify_deinit(br_offloads, bridge);
+
+ return 0;
+}
+
+static int
+ice_eswitch_br_port_link(struct ice_esw_br_offloads *br_offloads,
+ struct net_device *dev, int ifindex,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br *bridge;
+ int err;
+
+ if (ice_eswitch_br_netdev_to_port(dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port is already attached to the bridge");
+ return -EINVAL;
+ }
+
+ bridge = ice_eswitch_br_get(br_offloads, ifindex, extack);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ if (ice_is_port_repr_netdev(dev)) {
+ struct ice_repr *repr = ice_netdev_to_repr(dev);
+
+ err = ice_eswitch_br_vf_repr_port_init(bridge, repr);
+ trace_ice_eswitch_br_port_link(repr->br_port);
+ } else {
+ struct net_device *ice_dev;
+ struct ice_pf *pf;
+
+ if (netif_is_lag_master(dev))
+ ice_dev = ice_eswitch_br_get_uplink_from_lag(dev);
+ else
+ ice_dev = dev;
+
+ if (!ice_dev)
+ return 0;
+
+ pf = ice_netdev_to_pf(ice_dev);
+
+ err = ice_eswitch_br_uplink_port_init(bridge, pf);
+ trace_ice_eswitch_br_port_link(pf->br_port);
+ }
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to init bridge port");
+ goto err_port_init;
+ }
+
+ return 0;
+
+err_port_init:
+ ice_eswitch_br_verify_deinit(br_offloads, bridge);
+ return err;
+}
+
+static int
+ice_eswitch_br_port_changeupper(struct notifier_block *nb, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct ice_esw_br_offloads *br_offloads;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper;
+
+ br_offloads = ice_nb_to_br_offloads(nb, netdev_nb);
+
+ if (!ice_eswitch_br_is_dev_valid(dev))
+ return 0;
+
+ upper = info->upper_dev;
+ if (!netif_is_bridge_master(upper))
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ if (info->linking)
+ return ice_eswitch_br_port_link(br_offloads, dev,
+ upper->ifindex, extack);
+ else
+ return ice_eswitch_br_port_unlink(br_offloads, dev,
+ upper->ifindex, extack);
+}
+
+static int
+ice_eswitch_br_port_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ int err = 0;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ err = ice_eswitch_br_port_changeupper(nb, ptr);
+ break;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static void
+ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
+{
+ struct ice_esw_br_offloads *br_offloads = pf->switchdev.br_offloads;
+
+ ASSERT_RTNL();
+
+ if (!br_offloads)
+ return;
+
+ ice_eswitch_br_deinit(br_offloads, br_offloads->bridge);
+
+ pf->switchdev.br_offloads = NULL;
+ kfree(br_offloads);
+}
+
+static struct ice_esw_br_offloads *
+ice_eswitch_br_offloads_alloc(struct ice_pf *pf)
+{
+ struct ice_esw_br_offloads *br_offloads;
+
+ ASSERT_RTNL();
+
+ if (pf->switchdev.br_offloads)
+ return ERR_PTR(-EEXIST);
+
+ br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL);
+ if (!br_offloads)
+ return ERR_PTR(-ENOMEM);
+
+ pf->switchdev.br_offloads = br_offloads;
+ br_offloads->pf = pf;
+
+ return br_offloads;
+}
+
+void
+ice_eswitch_br_offloads_deinit(struct ice_pf *pf)
+{
+ struct ice_esw_br_offloads *br_offloads;
+
+ br_offloads = pf->switchdev.br_offloads;
+ if (!br_offloads)
+ return;
+
+ cancel_delayed_work_sync(&br_offloads->update_work);
+ unregister_netdevice_notifier(&br_offloads->netdev_nb);
+ unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
+ unregister_switchdev_notifier(&br_offloads->switchdev_nb);
+ destroy_workqueue(br_offloads->wq);
+ /* Although notifier block is unregistered just before,
+ * so we don't get any new events, some events might be
+ * already in progress. Hold the rtnl lock and wait for
+ * them to finished.
+ */
+ rtnl_lock();
+ ice_eswitch_br_offloads_dealloc(pf);
+ rtnl_unlock();
+}
+
+static void ice_eswitch_br_update(struct ice_esw_br_offloads *br_offloads)
+{
+ struct ice_esw_br *bridge = br_offloads->bridge;
+ struct ice_esw_br_fdb_entry *entry, *tmp;
+
+ if (!bridge)
+ return;
+
+ rtnl_lock();
+ list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
+ if (entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER)
+ continue;
+
+ if (time_is_after_eq_jiffies(entry->last_use +
+ bridge->ageing_time))
+ continue;
+
+ ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
+ }
+ rtnl_unlock();
+}
+
+static void ice_eswitch_br_update_work(struct work_struct *work)
+{
+ struct ice_esw_br_offloads *br_offloads;
+
+ br_offloads = ice_work_to_br_offloads(work);
+
+ ice_eswitch_br_update(br_offloads);
+
+ queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
+ ICE_ESW_BRIDGE_UPDATE_INTERVAL);
+}
+
+int
+ice_eswitch_br_offloads_init(struct ice_pf *pf)
+{
+ struct ice_esw_br_offloads *br_offloads;
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ rtnl_lock();
+ br_offloads = ice_eswitch_br_offloads_alloc(pf);
+ rtnl_unlock();
+ if (IS_ERR(br_offloads)) {
+ dev_err(dev, "Failed to init eswitch bridge\n");
+ return PTR_ERR(br_offloads);
+ }
+
+ br_offloads->wq = alloc_ordered_workqueue("ice_bridge_wq", 0);
+ if (!br_offloads->wq) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to allocate bridge workqueue\n");
+ goto err_alloc_wq;
+ }
+
+ br_offloads->switchdev_nb.notifier_call =
+ ice_eswitch_br_switchdev_event;
+ err = register_switchdev_notifier(&br_offloads->switchdev_nb);
+ if (err) {
+ dev_err(dev,
+ "Failed to register switchdev notifier\n");
+ goto err_reg_switchdev_nb;
+ }
+
+ br_offloads->switchdev_blk.notifier_call =
+ ice_eswitch_br_event_blocking;
+ err = register_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
+ if (err) {
+ dev_err(dev,
+ "Failed to register bridge blocking switchdev notifier\n");
+ goto err_reg_switchdev_blk;
+ }
+
+ br_offloads->netdev_nb.notifier_call = ice_eswitch_br_port_event;
+ err = register_netdevice_notifier(&br_offloads->netdev_nb);
+ if (err) {
+ dev_err(dev,
+ "Failed to register bridge port event notifier\n");
+ goto err_reg_netdev_nb;
+ }
+
+ INIT_DELAYED_WORK(&br_offloads->update_work,
+ ice_eswitch_br_update_work);
+ queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
+ ICE_ESW_BRIDGE_UPDATE_INTERVAL);
+
+ return 0;
+
+err_reg_netdev_nb:
+ unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
+err_reg_switchdev_blk:
+ unregister_switchdev_notifier(&br_offloads->switchdev_nb);
+err_reg_switchdev_nb:
+ destroy_workqueue(br_offloads->wq);
+err_alloc_wq:
+ rtnl_lock();
+ ice_eswitch_br_offloads_dealloc(pf);
+ rtnl_unlock();
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
new file mode 100644
index 000000000000..85a8fadb2928
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2023, Intel Corporation. */
+
+#ifndef _ICE_ESWITCH_BR_H_
+#define _ICE_ESWITCH_BR_H_
+
+#include <linux/rhashtable.h>
+#include <linux/workqueue.h>
+
+struct ice_esw_br_fdb_data {
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+};
+
+struct ice_esw_br_flow {
+ struct ice_rule_query_data *fwd_rule;
+ struct ice_rule_query_data *guard_rule;
+};
+
+enum {
+ ICE_ESWITCH_BR_FDB_ADDED_BY_USER = BIT(0),
+};
+
+struct ice_esw_br_fdb_entry {
+ struct ice_esw_br_fdb_data data;
+ struct rhash_head ht_node;
+ struct list_head list;
+
+ int flags;
+
+ struct net_device *dev;
+ struct ice_esw_br_port *br_port;
+ struct ice_esw_br_flow *flow;
+
+ unsigned long last_use;
+};
+
+enum ice_esw_br_port_type {
+ ICE_ESWITCH_BR_UPLINK_PORT = 0,
+ ICE_ESWITCH_BR_VF_REPR_PORT = 1,
+};
+
+struct ice_esw_br_port {
+ struct ice_esw_br *bridge;
+ struct ice_vsi *vsi;
+ enum ice_esw_br_port_type type;
+ u16 vsi_idx;
+ u16 pvid;
+ struct xarray vlans;
+};
+
+enum {
+ ICE_ESWITCH_BR_VLAN_FILTERING = BIT(0),
+};
+
+struct ice_esw_br {
+ struct ice_esw_br_offloads *br_offloads;
+ struct xarray ports;
+
+ struct rhashtable fdb_ht;
+ struct list_head fdb_list;
+
+ int ifindex;
+ u32 flags;
+ unsigned long ageing_time;
+};
+
+struct ice_esw_br_offloads {
+ struct ice_pf *pf;
+ struct ice_esw_br *bridge;
+ struct notifier_block netdev_nb;
+ struct notifier_block switchdev_blk;
+ struct notifier_block switchdev_nb;
+
+ struct workqueue_struct *wq;
+ struct delayed_work update_work;
+};
+
+struct ice_esw_br_fdb_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+struct ice_esw_br_vlan {
+ u16 vid;
+ u16 flags;
+};
+
+#define ice_nb_to_br_offloads(nb, nb_name) \
+ container_of(nb, \
+ struct ice_esw_br_offloads, \
+ nb_name)
+
+#define ice_work_to_br_offloads(w) \
+ container_of(w, \
+ struct ice_esw_br_offloads, \
+ update_work.work)
+
+#define ice_work_to_fdb_work(w) \
+ container_of(w, \
+ struct ice_esw_br_fdb_work, \
+ work)
+
+static inline bool ice_eswitch_br_is_vid_valid(u16 vid)
+{
+ /* In trunk VLAN mode, for untagged traffic the bridge sends requests
+ * to offload VLAN 1 with pvid and untagged flags set. Since these
+ * flags are not supported, add a MAC filter instead.
+ */
+ return vid > 1;
+}
+
+void
+ice_eswitch_br_offloads_deinit(struct ice_pf *pf);
+int
+ice_eswitch_br_offloads_init(struct ice_pf *pf);
+
+#endif /* _ICE_ESWITCH_BR_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 3dc5662d62a6..319a2d6fe26c 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -293,16 +293,17 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
{
u16 completion_module, completion_retval;
struct device *dev = ice_pf_to_dev(pf);
- struct ice_rq_event_info event;
+ struct ice_aq_task task = {};
struct ice_hw *hw = &pf->hw;
+ struct ice_aq_desc *desc;
u32 completion_offset;
int err;
- memset(&event, 0, sizeof(event));
-
dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n",
block_size, module, offset);
+ ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_write);
+
err = ice_aq_update_nvm(hw, module, offset, block_size, block,
last_cmd, 0, NULL);
if (err) {
@@ -319,7 +320,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
* is conservative and is intended to prevent failure to update when
* firmware is slow to respond.
*/
- err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, 15 * HZ, &event);
+ err = ice_aq_wait_for_event(pf, &task, 15 * HZ);
if (err) {
dev_err(dev, "Timed out while trying to flash module 0x%02x with block of size %u at offset %u, err %d\n",
module, block_size, offset, err);
@@ -327,11 +328,12 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
return -EIO;
}
- completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid);
- completion_retval = le16_to_cpu(event.desc.retval);
+ desc = &task.event.desc;
+ completion_module = le16_to_cpu(desc->params.nvm.module_typeid);
+ completion_retval = le16_to_cpu(desc->retval);
- completion_offset = le16_to_cpu(event.desc.params.nvm.offset_low);
- completion_offset |= event.desc.params.nvm.offset_high << 16;
+ completion_offset = le16_to_cpu(desc->params.nvm.offset_low);
+ completion_offset |= desc->params.nvm.offset_high << 16;
if (completion_module != module) {
dev_err(dev, "Unexpected module_typeid in write completion: got 0x%x, expected 0x%x\n",
@@ -363,8 +365,8 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
*/
if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) {
if (hw->dev_caps.common_cap.pcie_reset_avoidance) {
- *reset_level = (event.desc.params.nvm.cmd_flags &
- ICE_AQC_NVM_RESET_LVL_M);
+ *reset_level = desc->params.nvm.cmd_flags &
+ ICE_AQC_NVM_RESET_LVL_M;
dev_dbg(dev, "Firmware reported required reset level as %u\n",
*reset_level);
} else {
@@ -479,19 +481,20 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
{
u16 completion_module, completion_retval;
struct device *dev = ice_pf_to_dev(pf);
- struct ice_rq_event_info event;
+ struct ice_aq_task task = {};
struct ice_hw *hw = &pf->hw;
+ struct ice_aq_desc *desc;
struct devlink *devlink;
int err;
dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module);
- memset(&event, 0, sizeof(event));
-
devlink = priv_to_devlink(pf);
devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT);
+ ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_erase);
+
err = ice_aq_erase_nvm(hw, module, NULL);
if (err) {
dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n",
@@ -502,7 +505,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
goto out_notify_devlink;
}
- err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, ICE_FW_ERASE_TIMEOUT * HZ, &event);
+ err = ice_aq_wait_for_event(pf, &task, ICE_FW_ERASE_TIMEOUT * HZ);
if (err) {
dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n",
component, module, err);
@@ -510,8 +513,9 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
goto out_notify_devlink;
}
- completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid);
- completion_retval = le16_to_cpu(event.desc.retval);
+ desc = &task.event.desc;
+ completion_module = le16_to_cpu(desc->params.nvm.module_typeid);
+ completion_retval = le16_to_cpu(desc->retval);
if (completion_module != module) {
dev_err(dev, "Unexpected module_typeid in erase completion for %s: got 0x%x, expected 0x%x\n",
@@ -560,13 +564,13 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
u8 *emp_reset_available, struct netlink_ext_ack *extack)
{
struct device *dev = ice_pf_to_dev(pf);
- struct ice_rq_event_info event;
+ struct ice_aq_task task = {};
struct ice_hw *hw = &pf->hw;
u16 completion_retval;
u8 response_flags;
int err;
- memset(&event, 0, sizeof(event));
+ ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_write_activate);
err = ice_nvm_write_activate(hw, activate_flags, &response_flags);
if (err) {
@@ -592,8 +596,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
}
}
- err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, 30 * HZ,
- &event);
+ err = ice_aq_wait_for_event(pf, &task, 30 * HZ);
if (err) {
dev_err(dev, "Timed out waiting for firmware to switch active flash banks, err %d\n",
err);
@@ -601,7 +604,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
return err;
}
- completion_retval = le16_to_cpu(event.desc.retval);
+ completion_retval = le16_to_cpu(task.event.desc.retval);
if (completion_retval) {
dev_err(dev, "Firmware failed to switch active flash banks aq_err %s\n",
ice_aq_str((enum ice_aq_err)completion_retval));
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index a92dc9a16035..531cc2194741 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -335,6 +335,8 @@
#define VP_MDET_TX_TCLAN_VALID_M BIT(0)
#define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4))
#define VP_MDET_TX_TDPU_VALID_M BIT(0)
+#define GL_MNG_FWSM 0x000B6134
+#define GL_MNG_FWSM_FW_LOADING_M BIT(30)
#define GLNVM_FLA 0x000B6108
#define GLNVM_FLA_LOCKED_M BIT(6)
#define GLNVM_GENS 0x000B6100
@@ -489,7 +491,6 @@
#define VSIQF_FD_CNT_FD_BCNT_M ICE_M(0x3FFF, 16)
#define VSIQF_FD_SIZE(_VSI) (0x00462000 + ((_VSI) * 4))
#define VSIQF_HKEY_MAX_INDEX 12
-#define VSIQF_HLUT_MAX_INDEX 15
#define PFPM_APM 0x000B8080
#define PFPM_APM_APME_M BIT(0)
#define PFPM_WUFC 0x0009DC00
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 5a7753bda324..4f39863b5537 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -4,8 +4,24 @@
/* Link Aggregation code */
#include "ice.h"
+#include "ice_lib.h"
#include "ice_lag.h"
+#define ICE_LAG_RES_SHARED BIT(14)
+#define ICE_LAG_RES_VALID BIT(15)
+
+#define LACP_TRAIN_PKT_LEN 16
+static const u8 lacp_train_pkt[LACP_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0x88, 0x09, 0, 0 };
+
+#define ICE_RECIPE_LEN 64
+static const u8 ice_dflt_vsi_rcp[ICE_RECIPE_LEN] = {
+ 0x05, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x85, 0, 0x01, 0, 0, 0, 0xff, 0xff, 0x08, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
/**
* ice_lag_set_primary - set PF LAG state as Primary
* @lag: LAG info struct
@@ -47,16 +63,219 @@ static void ice_lag_set_backup(struct ice_lag *lag)
}
/**
+ * netif_is_same_ice - determine if netdev is on the same ice NIC as local PF
+ * @pf: local PF struct
+ * @netdev: netdev we are evaluating
+ */
+static bool netif_is_same_ice(struct ice_pf *pf, struct net_device *netdev)
+{
+ struct ice_netdev_priv *np;
+ struct ice_pf *test_pf;
+ struct ice_vsi *vsi;
+
+ if (!netif_is_ice(netdev))
+ return false;
+
+ np = netdev_priv(netdev);
+ if (!np)
+ return false;
+
+ vsi = np->vsi;
+ if (!vsi)
+ return false;
+
+ test_pf = vsi->back;
+ if (!test_pf)
+ return false;
+
+ if (pf->pdev->bus != test_pf->pdev->bus ||
+ pf->pdev->slot != test_pf->pdev->slot)
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_netdev_to_lag - return pointer to associated lag struct from netdev
+ * @netdev: pointer to net_device struct to query
+ */
+static struct ice_lag *ice_netdev_to_lag(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np;
+ struct ice_vsi *vsi;
+
+ if (!netif_is_ice(netdev))
+ return NULL;
+
+ np = netdev_priv(netdev);
+ if (!np)
+ return NULL;
+
+ vsi = np->vsi;
+ if (!vsi)
+ return NULL;
+
+ return vsi->back->lag;
+}
+
+/**
+ * ice_lag_find_hw_by_lport - return an hw struct from bond members lport
+ * @lag: lag struct
+ * @lport: lport value to search for
+ */
+static struct ice_hw *
+ice_lag_find_hw_by_lport(struct ice_lag *lag, u8 lport)
+{
+ struct ice_lag_netdev_list *entry;
+ struct net_device *tmp_netdev;
+ struct ice_netdev_priv *np;
+ struct ice_hw *hw;
+
+ list_for_each_entry(entry, lag->netdev_head, node) {
+ tmp_netdev = entry->netdev;
+ if (!tmp_netdev || !netif_is_ice(tmp_netdev))
+ continue;
+
+ np = netdev_priv(tmp_netdev);
+ if (!np || !np->vsi)
+ continue;
+
+ hw = &np->vsi->back->hw;
+ if (hw->port_info->lport == lport)
+ return hw;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_lag_find_primary - returns pointer to primary interfaces lag struct
+ * @lag: local interfaces lag struct
+ */
+static struct ice_lag *ice_lag_find_primary(struct ice_lag *lag)
+{
+ struct ice_lag *primary_lag = NULL;
+ struct list_head *tmp;
+
+ list_for_each(tmp, lag->netdev_head) {
+ struct ice_lag_netdev_list *entry;
+ struct ice_lag *tmp_lag;
+
+ entry = list_entry(tmp, struct ice_lag_netdev_list, node);
+ tmp_lag = ice_netdev_to_lag(entry->netdev);
+ if (tmp_lag && tmp_lag->primary) {
+ primary_lag = tmp_lag;
+ break;
+ }
+ }
+
+ return primary_lag;
+}
+
+/**
+ * ice_lag_cfg_dflt_fltr - Add/Remove default VSI rule for LAG
+ * @lag: lag struct for local interface
+ * @add: boolean on whether we are adding filters
+ */
+static int
+ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add)
+{
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
+ u16 s_rule_sz, vsi_num;
+ struct ice_hw *hw;
+ u32 act, opc;
+ u8 *eth_hdr;
+ int err;
+
+ hw = &lag->pf->hw;
+ vsi_num = ice_get_hw_vsi_num(hw, 0);
+
+ s_rule_sz = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
+ s_rule = kzalloc(s_rule_sz, GFP_KERNEL);
+ if (!s_rule) {
+ dev_err(ice_pf_to_dev(lag->pf), "error allocating rule for LAG default VSI\n");
+ return -ENOMEM;
+ }
+
+ if (add) {
+ eth_hdr = s_rule->hdr_data;
+ ice_fill_eth_hdr(eth_hdr);
+
+ act = (vsi_num << ICE_SINGLE_ACT_VSI_ID_S) &
+ ICE_SINGLE_ACT_VSI_ID_M;
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING |
+ ICE_SINGLE_ACT_VALID_BIT | ICE_SINGLE_ACT_LAN_ENABLE;
+
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ s_rule->recipe_id = cpu_to_le16(lag->pf_recipe);
+ s_rule->src = cpu_to_le16(hw->port_info->lport);
+ s_rule->act = cpu_to_le32(act);
+ s_rule->hdr_len = cpu_to_le16(DUMMY_ETH_HDR_LEN);
+ opc = ice_aqc_opc_add_sw_rules;
+ } else {
+ s_rule->index = cpu_to_le16(lag->pf_rule_id);
+ opc = ice_aqc_opc_remove_sw_rules;
+ }
+
+ err = ice_aq_sw_rules(&lag->pf->hw, s_rule, s_rule_sz, 1, opc, NULL);
+ if (err)
+ goto dflt_fltr_free;
+
+ if (add)
+ lag->pf_rule_id = le16_to_cpu(s_rule->index);
+ else
+ lag->pf_rule_id = 0;
+
+dflt_fltr_free:
+ kfree(s_rule);
+ return err;
+}
+
+/**
+ * ice_lag_cfg_pf_fltrs - set filters up for new active port
+ * @lag: local interfaces lag struct
+ * @ptr: opaque data containing notifier event
+ */
+static void
+ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_bonding_info *info;
+ struct netdev_bonding_info *bonding_info;
+ struct net_device *event_netdev;
+ struct device *dev;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ /* not for this netdev */
+ if (event_netdev != lag->netdev)
+ return;
+
+ info = (struct netdev_notifier_bonding_info *)ptr;
+ bonding_info = &info->bonding_info;
+ dev = ice_pf_to_dev(lag->pf);
+
+ /* interface not active - remove old default VSI rule */
+ if (bonding_info->slave.state && lag->pf_rule_id) {
+ if (ice_lag_cfg_dflt_fltr(lag, false))
+ dev_err(dev, "Error removing old default VSI filter\n");
+ return;
+ }
+
+ /* interface becoming active - add new default VSI rule */
+ if (!bonding_info->slave.state && !lag->pf_rule_id)
+ if (ice_lag_cfg_dflt_fltr(lag, true))
+ dev_err(dev, "Error adding new default VSI filter\n");
+}
+
+/**
* ice_display_lag_info - print LAG info
* @lag: LAG info struct
*/
static void ice_display_lag_info(struct ice_lag *lag)
{
- const char *name, *peer, *upper, *role, *bonded, *primary;
+ const char *name, *upper, *role, *bonded, *primary;
struct device *dev = &lag->pf->pdev->dev;
name = lag->netdev ? netdev_name(lag->netdev) : "unset";
- peer = lag->peer_netdev ? netdev_name(lag->peer_netdev) : "unset";
upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset";
primary = lag->primary ? "TRUE" : "FALSE";
bonded = lag->bonded ? "BONDED" : "UNBONDED";
@@ -78,8 +297,410 @@ static void ice_display_lag_info(struct ice_lag *lag)
role = "ERROR";
}
- dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, primary:%s\n", name,
- bonded, peer, upper, role, primary);
+ dev_dbg(dev, "%s %s, upper:%s, role:%s, primary:%s\n", name, bonded,
+ upper, role, primary);
+}
+
+/**
+ * ice_lag_qbuf_recfg - generate a buffer of queues for a reconfigure command
+ * @hw: HW struct that contains the queue contexts
+ * @qbuf: pointer to buffer to populate
+ * @vsi_num: index of the VSI in PF space
+ * @numq: number of queues to search for
+ * @tc: traffic class that contains the queues
+ *
+ * function returns the number of valid queues in buffer
+ */
+static u16
+ice_lag_qbuf_recfg(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *qbuf,
+ u16 vsi_num, u16 numq, u8 tc)
+{
+ struct ice_q_ctx *q_ctx;
+ u16 qid, count = 0;
+ struct ice_pf *pf;
+ int i;
+
+ pf = hw->back;
+ for (i = 0; i < numq; i++) {
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_num, tc, i);
+ if (!q_ctx) {
+ dev_dbg(ice_hw_to_dev(hw), "%s queue %d NO Q CONTEXT\n",
+ __func__, i);
+ continue;
+ }
+ if (q_ctx->q_teid == ICE_INVAL_TEID) {
+ dev_dbg(ice_hw_to_dev(hw), "%s queue %d INVAL TEID\n",
+ __func__, i);
+ continue;
+ }
+ if (q_ctx->q_handle == ICE_INVAL_Q_HANDLE) {
+ dev_dbg(ice_hw_to_dev(hw), "%s queue %d INVAL Q HANDLE\n",
+ __func__, i);
+ continue;
+ }
+
+ qid = pf->vsi[vsi_num]->txq_map[q_ctx->q_handle];
+ qbuf->queue_info[count].q_handle = cpu_to_le16(qid);
+ qbuf->queue_info[count].tc = tc;
+ qbuf->queue_info[count].q_teid = cpu_to_le32(q_ctx->q_teid);
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * ice_lag_get_sched_parent - locate or create a sched node parent
+ * @hw: HW struct for getting parent in
+ * @tc: traffic class on parent/node
+ */
+static struct ice_sched_node *
+ice_lag_get_sched_parent(struct ice_hw *hw, u8 tc)
+{
+ struct ice_sched_node *tc_node, *aggnode, *parent = NULL;
+ u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
+ struct ice_port_info *pi = hw->port_info;
+ struct device *dev;
+ u8 aggl, vsil;
+ int n;
+
+ dev = ice_hw_to_dev(hw);
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node) {
+ dev_warn(dev, "Failure to find TC node for LAG move\n");
+ return parent;
+ }
+
+ aggnode = ice_sched_get_agg_node(pi, tc_node, ICE_DFLT_AGG_ID);
+ if (!aggnode) {
+ dev_warn(dev, "Failure to find aggregate node for LAG move\n");
+ return parent;
+ }
+
+ aggl = ice_sched_get_agg_layer(hw);
+ vsil = ice_sched_get_vsi_layer(hw);
+
+ for (n = aggl + 1; n < vsil; n++)
+ num_nodes[n] = 1;
+
+ for (n = 0; n < aggnode->num_children; n++) {
+ parent = ice_sched_get_free_vsi_parent(hw, aggnode->children[n],
+ num_nodes);
+ if (parent)
+ return parent;
+ }
+
+ /* if free parent not found - add one */
+ parent = aggnode;
+ for (n = aggl + 1; n < vsil; n++) {
+ u16 num_nodes_added;
+ u32 first_teid;
+ int err;
+
+ err = ice_sched_add_nodes_to_layer(pi, tc_node, parent, n,
+ num_nodes[n], &first_teid,
+ &num_nodes_added);
+ if (err || num_nodes[n] != num_nodes_added)
+ return NULL;
+
+ if (num_nodes_added)
+ parent = ice_sched_find_node_by_teid(tc_node,
+ first_teid);
+ else
+ parent = parent->children[0];
+ if (!parent) {
+ dev_warn(dev, "Failure to add new parent for LAG move\n");
+ return parent;
+ }
+ }
+
+ return parent;
+}
+
+/**
+ * ice_lag_move_vf_node_tc - move scheduling nodes for one VF on one TC
+ * @lag: lag info struct
+ * @oldport: lport of previous nodes location
+ * @newport: lport of destination nodes location
+ * @vsi_num: array index of VSI in PF space
+ * @tc: traffic class to move
+ */
+static void
+ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
+ u16 vsi_num, u8 tc)
+{
+ u16 numq, valq, buf_size, num_moved, qbuf_size;
+ struct device *dev = ice_pf_to_dev(lag->pf);
+ struct ice_aqc_cfg_txqs_buf *qbuf;
+ struct ice_aqc_move_elem *buf;
+ struct ice_sched_node *n_prt;
+ struct ice_hw *new_hw = NULL;
+ __le32 teid, parent_teid;
+ struct ice_vsi_ctx *ctx;
+ u32 tmp_teid;
+
+ ctx = ice_get_vsi_ctx(&lag->pf->hw, vsi_num);
+ if (!ctx) {
+ dev_warn(dev, "Unable to locate VSI context for LAG failover\n");
+ return;
+ }
+
+ /* check to see if this VF is enabled on this TC */
+ if (!ctx->sched.vsi_node[tc])
+ return;
+
+ /* locate HW struct for destination port */
+ new_hw = ice_lag_find_hw_by_lport(lag, newport);
+ if (!new_hw) {
+ dev_warn(dev, "Unable to locate HW struct for LAG node destination\n");
+ return;
+ }
+
+ numq = ctx->num_lan_q_entries[tc];
+ teid = ctx->sched.vsi_node[tc]->info.node_teid;
+ tmp_teid = le32_to_cpu(teid);
+ parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid;
+ /* if no teid assigned or numq == 0, then this TC is not active */
+ if (!tmp_teid || !numq)
+ return;
+
+ /* suspend VSI subtree for Traffic Class "tc" on
+ * this VF's VSI
+ */
+ if (ice_sched_suspend_resume_elems(&lag->pf->hw, 1, &tmp_teid, true))
+ dev_dbg(dev, "Problem suspending traffic for LAG node move\n");
+
+ /* reconfigure all VF's queues on this Traffic Class
+ * to new port
+ */
+ qbuf_size = struct_size(qbuf, queue_info, numq);
+ qbuf = kzalloc(qbuf_size, GFP_KERNEL);
+ if (!qbuf) {
+ dev_warn(dev, "Failure allocating memory for VF queue recfg buffer\n");
+ goto resume_traffic;
+ }
+
+ /* add the per queue info for the reconfigure command buffer */
+ valq = ice_lag_qbuf_recfg(&lag->pf->hw, qbuf, vsi_num, numq, tc);
+ if (!valq) {
+ dev_dbg(dev, "No valid queues found for LAG failover\n");
+ goto qbuf_none;
+ }
+
+ if (ice_aq_cfg_lan_txq(&lag->pf->hw, qbuf, qbuf_size, valq, oldport,
+ newport, NULL)) {
+ dev_warn(dev, "Failure to configure queues for LAG failover\n");
+ goto qbuf_err;
+ }
+
+qbuf_none:
+ kfree(qbuf);
+
+ /* find new parent in destination port's tree for VF VSI node on this
+ * Traffic Class
+ */
+ n_prt = ice_lag_get_sched_parent(new_hw, tc);
+ if (!n_prt)
+ goto resume_traffic;
+
+ /* Move Vf's VSI node for this TC to newport's scheduler tree */
+ buf_size = struct_size(buf, teid, 1);
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ dev_warn(dev, "Failure to alloc memory for VF node failover\n");
+ goto resume_traffic;
+ }
+
+ buf->hdr.src_parent_teid = parent_teid;
+ buf->hdr.dest_parent_teid = n_prt->info.node_teid;
+ buf->hdr.num_elems = cpu_to_le16(1);
+ buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
+ buf->teid[0] = teid;
+
+ if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
+ NULL))
+ dev_warn(dev, "Failure to move VF nodes for failover\n");
+ else
+ ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
+
+ kfree(buf);
+ goto resume_traffic;
+
+qbuf_err:
+ kfree(qbuf);
+
+resume_traffic:
+ /* restart traffic for VSI node */
+ if (ice_sched_suspend_resume_elems(&lag->pf->hw, 1, &tmp_teid, false))
+ dev_dbg(dev, "Problem restarting traffic for LAG node move\n");
+}
+
+/**
+ * ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF
+ * @lag: primary interface LAG struct
+ * @oldport: lport of previous interface
+ * @newport: lport of destination interface
+ * @vsi_num: SW index of VF's VSI
+ */
+static void
+ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
+ u16 vsi_num)
+{
+ u8 tc;
+
+ ice_for_each_traffic_class(tc)
+ ice_lag_move_vf_node_tc(lag, oldport, newport, vsi_num, tc);
+}
+
+/**
+ * ice_lag_move_new_vf_nodes - Move Tx scheduling nodes for a VF if required
+ * @vf: the VF to move Tx nodes for
+ *
+ * Called just after configuring new VF queues. Check whether the VF Tx
+ * scheduling nodes need to be updated to fail over to the active port. If so,
+ * move them now.
+ */
+void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
+{
+ struct ice_lag_netdev_list ndlist;
+ struct list_head *tmp, *n;
+ u8 pri_port, act_port;
+ struct ice_lag *lag;
+ struct ice_vsi *vsi;
+ struct ice_pf *pf;
+
+ vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ return;
+
+ if (WARN_ON(vsi->type != ICE_VSI_VF))
+ return;
+
+ pf = vf->pf;
+ lag = pf->lag;
+
+ mutex_lock(&pf->lag_mutex);
+ if (!lag->bonded)
+ goto new_vf_unlock;
+
+ pri_port = pf->hw.port_info->lport;
+ act_port = lag->active_port;
+
+ if (lag->upper_netdev) {
+ struct ice_lag_netdev_list *nl;
+ struct net_device *tmp_nd;
+
+ INIT_LIST_HEAD(&ndlist.node);
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
+ nl = kzalloc(sizeof(*nl), GFP_KERNEL);
+ if (!nl)
+ break;
+
+ nl->netdev = tmp_nd;
+ list_add(&nl->node, &ndlist.node);
+ }
+ rcu_read_unlock();
+ }
+
+ lag->netdev_head = &ndlist.node;
+
+ if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
+ lag->bonded && lag->primary && pri_port != act_port &&
+ !list_empty(lag->netdev_head))
+ ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
+
+ list_for_each_safe(tmp, n, &ndlist.node) {
+ struct ice_lag_netdev_list *entry;
+
+ entry = list_entry(tmp, struct ice_lag_netdev_list, node);
+ list_del(&entry->node);
+ kfree(entry);
+ }
+ lag->netdev_head = NULL;
+
+new_vf_unlock:
+ mutex_unlock(&pf->lag_mutex);
+}
+
+/**
+ * ice_lag_move_vf_nodes - move Tx scheduling nodes for all VFs to new port
+ * @lag: lag info struct
+ * @oldport: lport of previous interface
+ * @newport: lport of destination interface
+ */
+static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport)
+{
+ struct ice_pf *pf;
+ int i;
+
+ if (!lag->primary)
+ return;
+
+ pf = lag->pf;
+ ice_for_each_vsi(pf, i)
+ if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
+ pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ ice_lag_move_single_vf_nodes(lag, oldport, newport, i);
+}
+
+#define ICE_LAG_SRIOV_CP_RECIPE 10
+#define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16
+
+/**
+ * ice_lag_cfg_cp_fltr - configure filter for control packets
+ * @lag: local interface's lag struct
+ * @add: add or remove rule
+ */
+static void
+ice_lag_cfg_cp_fltr(struct ice_lag *lag, bool add)
+{
+ struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
+ struct ice_vsi *vsi;
+ u16 buf_len, opc;
+
+ vsi = lag->pf->vsi[0];
+
+ buf_len = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule,
+ ICE_LAG_SRIOV_TRAIN_PKT_LEN);
+ s_rule = kzalloc(buf_len, GFP_KERNEL);
+ if (!s_rule) {
+ netdev_warn(lag->netdev, "-ENOMEM error configuring CP filter\n");
+ return;
+ }
+
+ if (add) {
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ s_rule->recipe_id = cpu_to_le16(ICE_LAG_SRIOV_CP_RECIPE);
+ s_rule->src = cpu_to_le16(vsi->port_info->lport);
+ s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI |
+ ICE_SINGLE_ACT_LAN_ENABLE |
+ ICE_SINGLE_ACT_VALID_BIT |
+ ((vsi->vsi_num <<
+ ICE_SINGLE_ACT_VSI_ID_S) &
+ ICE_SINGLE_ACT_VSI_ID_M));
+ s_rule->hdr_len = cpu_to_le16(ICE_LAG_SRIOV_TRAIN_PKT_LEN);
+ memcpy(s_rule->hdr_data, lacp_train_pkt, LACP_TRAIN_PKT_LEN);
+ opc = ice_aqc_opc_add_sw_rules;
+ } else {
+ opc = ice_aqc_opc_remove_sw_rules;
+ s_rule->index = cpu_to_le16(lag->cp_rule_idx);
+ }
+ if (ice_aq_sw_rules(&lag->pf->hw, s_rule, buf_len, 1, opc, NULL)) {
+ netdev_warn(lag->netdev, "Error %s CP rule for fail-over\n",
+ add ? "ADDING" : "REMOVING");
+ goto cp_free;
+ }
+
+ if (add)
+ lag->cp_rule_idx = le16_to_cpu(s_rule->index);
+ else
+ lag->cp_rule_idx = 0;
+
+cp_free:
+ kfree(s_rule);
}
/**
@@ -124,117 +745,430 @@ lag_out:
}
/**
+ * ice_lag_reclaim_vf_tc - move scheduling nodes back to primary interface
+ * @lag: primary interface lag struct
+ * @src_hw: HW struct current node location
+ * @vsi_num: VSI index in PF space
+ * @tc: traffic class to move
+ */
+static void
+ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
+ u8 tc)
+{
+ u16 numq, valq, buf_size, num_moved, qbuf_size;
+ struct device *dev = ice_pf_to_dev(lag->pf);
+ struct ice_aqc_cfg_txqs_buf *qbuf;
+ struct ice_aqc_move_elem *buf;
+ struct ice_sched_node *n_prt;
+ __le32 teid, parent_teid;
+ struct ice_vsi_ctx *ctx;
+ struct ice_hw *hw;
+ u32 tmp_teid;
+
+ hw = &lag->pf->hw;
+ ctx = ice_get_vsi_ctx(hw, vsi_num);
+ if (!ctx) {
+ dev_warn(dev, "Unable to locate VSI context for LAG reclaim\n");
+ return;
+ }
+
+ /* check to see if this VF is enabled on this TC */
+ if (!ctx->sched.vsi_node[tc])
+ return;
+
+ numq = ctx->num_lan_q_entries[tc];
+ teid = ctx->sched.vsi_node[tc]->info.node_teid;
+ tmp_teid = le32_to_cpu(teid);
+ parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid;
+
+ /* if !teid or !numq, then this TC is not active */
+ if (!tmp_teid || !numq)
+ return;
+
+ /* suspend traffic */
+ if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, true))
+ dev_dbg(dev, "Problem suspending traffic for LAG node move\n");
+
+ /* reconfig queues for new port */
+ qbuf_size = struct_size(qbuf, queue_info, numq);
+ qbuf = kzalloc(qbuf_size, GFP_KERNEL);
+ if (!qbuf) {
+ dev_warn(dev, "Failure allocating memory for VF queue recfg buffer\n");
+ goto resume_reclaim;
+ }
+
+ /* add the per queue info for the reconfigure command buffer */
+ valq = ice_lag_qbuf_recfg(hw, qbuf, vsi_num, numq, tc);
+ if (!valq) {
+ dev_dbg(dev, "No valid queues found for LAG reclaim\n");
+ goto reclaim_none;
+ }
+
+ if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq,
+ src_hw->port_info->lport, hw->port_info->lport,
+ NULL)) {
+ dev_warn(dev, "Failure to configure queues for LAG failover\n");
+ goto reclaim_qerr;
+ }
+
+reclaim_none:
+ kfree(qbuf);
+
+ /* find parent in primary tree */
+ n_prt = ice_lag_get_sched_parent(hw, tc);
+ if (!n_prt)
+ goto resume_reclaim;
+
+ /* Move node to new parent */
+ buf_size = struct_size(buf, teid, 1);
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ dev_warn(dev, "Failure to alloc memory for VF node failover\n");
+ goto resume_reclaim;
+ }
+
+ buf->hdr.src_parent_teid = parent_teid;
+ buf->hdr.dest_parent_teid = n_prt->info.node_teid;
+ buf->hdr.num_elems = cpu_to_le16(1);
+ buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
+ buf->teid[0] = teid;
+
+ if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
+ NULL))
+ dev_warn(dev, "Failure to move VF nodes for LAG reclaim\n");
+ else
+ ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
+
+ kfree(buf);
+ goto resume_reclaim;
+
+reclaim_qerr:
+ kfree(qbuf);
+
+resume_reclaim:
+ /* restart traffic */
+ if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, false))
+ dev_warn(dev, "Problem restarting traffic for LAG node reclaim\n");
+}
+
+/**
+ * ice_lag_reclaim_vf_nodes - When interface leaving bond primary reclaims nodes
+ * @lag: primary interface lag struct
+ * @src_hw: HW struct for current node location
+ */
+static void
+ice_lag_reclaim_vf_nodes(struct ice_lag *lag, struct ice_hw *src_hw)
+{
+ struct ice_pf *pf;
+ int i, tc;
+
+ if (!lag->primary || !src_hw)
+ return;
+
+ pf = lag->pf;
+ ice_for_each_vsi(pf, i)
+ if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
+ pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ ice_for_each_traffic_class(tc)
+ ice_lag_reclaim_vf_tc(lag, src_hw, i, tc);
+}
+
+/**
* ice_lag_link - handle LAG link event
* @lag: LAG info struct
- * @info: info from the netdev notifier
*/
-static void
-ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
+static void ice_lag_link(struct ice_lag *lag)
{
- struct net_device *netdev_tmp, *upper = info->upper_dev;
struct ice_pf *pf = lag->pf;
- int peers = 0;
if (lag->bonded)
dev_warn(ice_pf_to_dev(pf), "%s Already part of a bond\n",
netdev_name(lag->netdev));
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(upper, netdev_tmp)
- peers++;
- rcu_read_unlock();
-
- if (lag->upper_netdev != upper) {
- dev_hold(upper);
- lag->upper_netdev = upper;
- }
-
- ice_clear_rdma_cap(pf);
-
lag->bonded = true;
lag->role = ICE_LAG_UNSET;
-
- /* if this is the first element in an LAG mark as primary */
- lag->primary = !!(peers == 1);
+ netdev_info(lag->netdev, "Shared SR-IOV resources in bond are active\n");
}
/**
* ice_lag_unlink - handle unlink event
* @lag: LAG info struct
- * @info: info from netdev notification
*/
-static void
-ice_lag_unlink(struct ice_lag *lag,
- struct netdev_notifier_changeupper_info *info)
+static void ice_lag_unlink(struct ice_lag *lag)
{
- struct net_device *netdev_tmp, *upper = info->upper_dev;
+ u8 pri_port, act_port, loc_port;
struct ice_pf *pf = lag->pf;
- bool found = false;
if (!lag->bonded) {
netdev_dbg(lag->netdev, "bonding unlink event on non-LAG netdev\n");
return;
}
- /* determine if we are in the new LAG config or not */
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(upper, netdev_tmp) {
- if (netdev_tmp == lag->netdev) {
- found = true;
- break;
+ if (lag->primary) {
+ act_port = lag->active_port;
+ pri_port = lag->pf->hw.port_info->lport;
+ if (act_port != pri_port && act_port != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes(lag, act_port, pri_port);
+ lag->primary = false;
+ lag->active_port = ICE_LAG_INVALID_PORT;
+ } else {
+ struct ice_lag *primary_lag;
+
+ primary_lag = ice_lag_find_primary(lag);
+ if (primary_lag) {
+ act_port = primary_lag->active_port;
+ pri_port = primary_lag->pf->hw.port_info->lport;
+ loc_port = pf->hw.port_info->lport;
+ if (act_port == loc_port &&
+ act_port != ICE_LAG_INVALID_PORT) {
+ ice_lag_reclaim_vf_nodes(primary_lag,
+ &lag->pf->hw);
+ primary_lag->active_port = ICE_LAG_INVALID_PORT;
+ }
}
}
- rcu_read_unlock();
- if (found)
+ lag->bonded = false;
+ lag->role = ICE_LAG_NONE;
+ lag->upper_netdev = NULL;
+}
+
+/**
+ * ice_lag_link_unlink - helper function to call lag_link/unlink
+ * @lag: lag info struct
+ * @ptr: opaque pointer data
+ */
+static void ice_lag_link_unlink(struct ice_lag *lag, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+
+ if (netdev != lag->netdev)
return;
- if (lag->upper_netdev) {
- dev_put(lag->upper_netdev);
- lag->upper_netdev = NULL;
+ if (info->linking)
+ ice_lag_link(lag);
+ else
+ ice_lag_unlink(lag);
+}
+
+/**
+ * ice_lag_set_swid - set the SWID on secondary interface
+ * @primary_swid: primary interface's SWID
+ * @local_lag: local interfaces LAG struct
+ * @link: Is this a linking activity
+ *
+ * If link is false, then primary_swid should be expected to not be valid
+ * This function should never be called in interrupt context.
+ */
+static void
+ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
+ bool link)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ struct ice_aqc_set_port_params *cmd;
+ struct ice_aq_desc desc;
+ u16 buf_len, swid;
+ int status, i;
+
+ buf_len = struct_size(buf, elem, 1);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf) {
+ dev_err(ice_pf_to_dev(local_lag->pf), "-ENOMEM error setting SWID\n");
+ return;
}
- lag->peer_netdev = NULL;
- ice_set_rdma_cap(pf);
- lag->bonded = false;
- lag->role = ICE_LAG_NONE;
+ buf->num_elems = cpu_to_le16(1);
+ buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_SWID);
+ /* if unlinnking need to free the shared resource */
+ if (!link && local_lag->bond_swid) {
+ buf->elem[0].e.sw_resp = cpu_to_le16(local_lag->bond_swid);
+ status = ice_aq_alloc_free_res(&local_lag->pf->hw, buf,
+ buf_len, ice_aqc_opc_free_res);
+ if (status)
+ dev_err(ice_pf_to_dev(local_lag->pf), "Error freeing SWID during LAG unlink\n");
+ local_lag->bond_swid = 0;
+ }
+
+ if (link) {
+ buf->res_type |= cpu_to_le16(ICE_LAG_RES_SHARED |
+ ICE_LAG_RES_VALID);
+ /* store the primary's SWID in case it leaves bond first */
+ local_lag->bond_swid = primary_swid;
+ buf->elem[0].e.sw_resp = cpu_to_le16(local_lag->bond_swid);
+ } else {
+ buf->elem[0].e.sw_resp =
+ cpu_to_le16(local_lag->pf->hw.port_info->sw_id);
+ }
+
+ status = ice_aq_alloc_free_res(&local_lag->pf->hw, buf, buf_len,
+ ice_aqc_opc_alloc_res);
+ if (status)
+ dev_err(ice_pf_to_dev(local_lag->pf), "Error subscribing to SWID 0x%04X\n",
+ local_lag->bond_swid);
+
+ kfree(buf);
+
+ /* Configure port param SWID to correct value */
+ if (link)
+ swid = primary_swid;
+ else
+ swid = local_lag->pf->hw.port_info->sw_id;
+
+ cmd = &desc.params.set_port_params;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
+
+ cmd->swid = cpu_to_le16(ICE_AQC_PORT_SWID_VALID | swid);
+ /* If this is happening in reset context, it is possible that the
+ * primary interface has not finished setting its SWID to SHARED
+ * yet. Allow retries to account for this timing issue between
+ * interfaces.
+ */
+ for (i = 0; i < ICE_LAG_RESET_RETRIES; i++) {
+ status = ice_aq_send_cmd(&local_lag->pf->hw, &desc, NULL, 0,
+ NULL);
+ if (!status)
+ break;
+
+ usleep_range(1000, 2000);
+ }
+
+ if (status)
+ dev_err(ice_pf_to_dev(local_lag->pf), "Error setting SWID in port params %d\n",
+ status);
}
/**
- * ice_lag_unregister - handle netdev unregister events
- * @lag: LAG info struct
- * @netdev: netdev reporting the event
+ * ice_lag_primary_swid - set/clear the SHARED attrib of primary's SWID
+ * @lag: primary interface's lag struct
+ * @link: is this a linking activity
+ *
+ * Implement setting primary SWID as shared using 0x020B
*/
-static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev)
+static void ice_lag_primary_swid(struct ice_lag *lag, bool link)
{
- struct ice_pf *pf = lag->pf;
+ struct ice_hw *hw;
+ u16 swid;
- /* check to see if this event is for this netdev
- * check that we are in an aggregate
- */
- if (netdev != lag->netdev || !lag->bonded)
+ hw = &lag->pf->hw;
+ swid = hw->port_info->sw_id;
+
+ if (ice_share_res(hw, ICE_AQC_RES_TYPE_SWID, link, swid))
+ dev_warn(ice_pf_to_dev(lag->pf), "Failure to set primary interface shared status\n");
+}
+
+/**
+ * ice_lag_add_prune_list - Adds event_pf's VSI to primary's prune list
+ * @lag: lag info struct
+ * @event_pf: PF struct for VSI we are adding to primary's prune list
+ */
+static void ice_lag_add_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
+{
+ u16 num_vsi, rule_buf_sz, vsi_list_id, event_vsi_num, prim_vsi_idx;
+ struct ice_sw_rule_vsi_list *s_rule = NULL;
+ struct device *dev;
+
+ num_vsi = 1;
+
+ dev = ice_pf_to_dev(lag->pf);
+ event_vsi_num = event_pf->vsi[0]->vsi_num;
+ prim_vsi_idx = lag->pf->vsi[0]->idx;
+
+ if (!ice_find_vsi_list_entry(&lag->pf->hw, ICE_SW_LKUP_VLAN,
+ prim_vsi_idx, &vsi_list_id)) {
+ dev_warn(dev, "Could not locate prune list when setting up SRIOV LAG\n");
return;
+ }
- if (lag->upper_netdev) {
- dev_put(lag->upper_netdev);
- lag->upper_netdev = NULL;
- ice_set_rdma_cap(pf);
+ rule_buf_sz = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
+ s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
+ if (!s_rule) {
+ dev_warn(dev, "Error allocating space for prune list when configuring SRIOV LAG\n");
+ return;
}
- /* perform some cleanup in case we come back */
- lag->bonded = false;
- lag->role = ICE_LAG_NONE;
+
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_PRUNE_LIST_SET);
+ s_rule->index = cpu_to_le16(vsi_list_id);
+ s_rule->number_vsi = cpu_to_le16(num_vsi);
+ s_rule->vsi[0] = cpu_to_le16(event_vsi_num);
+
+ if (ice_aq_sw_rules(&event_pf->hw, s_rule, rule_buf_sz, 1,
+ ice_aqc_opc_update_sw_rules, NULL))
+ dev_warn(dev, "Error adding VSI prune list\n");
+ kfree(s_rule);
+}
+
+/**
+ * ice_lag_del_prune_list - Remove secondary's vsi from primary's prune list
+ * @lag: primary interface's ice_lag struct
+ * @event_pf: PF struct for unlinking interface
+ */
+static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
+{
+ u16 num_vsi, vsi_num, vsi_idx, rule_buf_sz, vsi_list_id;
+ struct ice_sw_rule_vsi_list *s_rule = NULL;
+ struct device *dev;
+
+ num_vsi = 1;
+
+ dev = ice_pf_to_dev(lag->pf);
+ vsi_num = event_pf->vsi[0]->vsi_num;
+ vsi_idx = lag->pf->vsi[0]->idx;
+
+ if (!ice_find_vsi_list_entry(&lag->pf->hw, ICE_SW_LKUP_VLAN,
+ vsi_idx, &vsi_list_id)) {
+ dev_warn(dev, "Could not locate prune list when unwinding SRIOV LAG\n");
+ return;
+ }
+
+ rule_buf_sz = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
+ s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
+ if (!s_rule) {
+ dev_warn(dev, "Error allocating prune list when unwinding SRIOV LAG\n");
+ return;
+ }
+
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR);
+ s_rule->index = cpu_to_le16(vsi_list_id);
+ s_rule->number_vsi = cpu_to_le16(num_vsi);
+ s_rule->vsi[0] = cpu_to_le16(vsi_num);
+
+ if (ice_aq_sw_rules(&event_pf->hw, (struct ice_aqc_sw_rules *)s_rule,
+ rule_buf_sz, 1, ice_aqc_opc_update_sw_rules, NULL))
+ dev_warn(dev, "Error clearing VSI prune list\n");
+
+ kfree(s_rule);
+}
+
+/**
+ * ice_lag_init_feature_support_flag - Check for NVM support for LAG
+ * @pf: PF struct
+ */
+static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
+{
+ struct ice_hw_common_caps *caps;
+
+ caps = &pf->hw.dev_caps.common_cap;
+ if (caps->roce_lag)
+ ice_set_feature_support(pf, ICE_F_ROCE_LAG);
+ else
+ ice_clear_feature_support(pf, ICE_F_ROCE_LAG);
+
+ if (caps->sriov_lag)
+ ice_set_feature_support(pf, ICE_F_SRIOV_LAG);
+ else
+ ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
}
/**
* ice_lag_changeupper_event - handle LAG changeupper event
* @lag: LAG info struct
* @ptr: opaque pointer data
- *
- * ptr is to be cast into netdev_notifier_changeupper_info
*/
static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
{
struct netdev_notifier_changeupper_info *info;
+ struct ice_lag *primary_lag;
struct net_device *netdev;
info = ptr;
@@ -244,44 +1178,442 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
if (netdev != lag->netdev)
return;
- if (!info->upper_dev) {
- netdev_dbg(netdev, "changeupper rcvd, but no upper defined\n");
+ primary_lag = ice_lag_find_primary(lag);
+ if (info->linking) {
+ lag->upper_netdev = info->upper_dev;
+ /* If there is not already a primary interface in the LAG,
+ * then mark this one as primary.
+ */
+ if (!primary_lag) {
+ lag->primary = true;
+ /* Configure primary's SWID to be shared */
+ ice_lag_primary_swid(lag, true);
+ primary_lag = lag;
+ } else {
+ u16 swid;
+
+ swid = primary_lag->pf->hw.port_info->sw_id;
+ ice_lag_set_swid(swid, lag, true);
+ ice_lag_add_prune_list(primary_lag, lag->pf);
+ }
+ /* add filter for primary control packets */
+ ice_lag_cfg_cp_fltr(lag, true);
+ } else {
+ if (!primary_lag && lag->primary)
+ primary_lag = lag;
+
+ if (!lag->primary) {
+ ice_lag_set_swid(0, lag, false);
+ } else {
+ if (primary_lag && lag->primary) {
+ ice_lag_primary_swid(lag, false);
+ ice_lag_del_prune_list(primary_lag, lag->pf);
+ }
+ }
+ /* remove filter for control packets */
+ ice_lag_cfg_cp_fltr(lag, false);
+ }
+}
+
+/**
+ * ice_lag_monitor_link - monitor interfaces entering/leaving the aggregate
+ * @lag: lag info struct
+ * @ptr: opaque data containing notifier event
+ *
+ * This function only operates after a primary has been set.
+ */
+static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info;
+ struct ice_hw *prim_hw, *active_hw;
+ struct net_device *event_netdev;
+ struct ice_pf *pf;
+ u8 prim_port;
+
+ if (!lag->primary)
+ return;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ if (!netif_is_same_ice(lag->pf, event_netdev))
+ return;
+
+ pf = lag->pf;
+ prim_hw = &pf->hw;
+ prim_port = prim_hw->port_info->lport;
+
+ info = (struct netdev_notifier_changeupper_info *)ptr;
+ if (info->upper_dev != lag->upper_netdev)
return;
+
+ if (!info->linking) {
+ /* Since there are only two interfaces allowed in SRIOV+LAG, if
+ * one port is leaving, then nodes need to be on primary
+ * interface.
+ */
+ if (prim_port != lag->active_port &&
+ lag->active_port != ICE_LAG_INVALID_PORT) {
+ active_hw = ice_lag_find_hw_by_lport(lag,
+ lag->active_port);
+ ice_lag_reclaim_vf_nodes(lag, active_hw);
+ lag->active_port = ICE_LAG_INVALID_PORT;
+ }
}
+}
+
+/**
+ * ice_lag_monitor_active - main PF keep track of which port is active
+ * @lag: lag info struct
+ * @ptr: opaque data containing notifier event
+ *
+ * This function is for the primary PF to monitor changes in which port is
+ * active and handle changes for SRIOV VF functionality
+ */
+static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
+{
+ struct net_device *event_netdev, *event_upper;
+ struct netdev_notifier_bonding_info *info;
+ struct netdev_bonding_info *bonding_info;
+ struct ice_netdev_priv *event_np;
+ struct ice_pf *pf, *event_pf;
+ u8 prim_port, event_port;
+
+ if (!lag->primary)
+ return;
- netdev_dbg(netdev, "bonding %s\n", info->linking ? "LINK" : "UNLINK");
+ pf = lag->pf;
+ if (!pf)
+ return;
- if (!netif_is_lag_master(info->upper_dev)) {
- netdev_dbg(netdev, "changeupper rcvd, but not primary. bail\n");
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ rcu_read_lock();
+ event_upper = netdev_master_upper_dev_get_rcu(event_netdev);
+ rcu_read_unlock();
+ if (!netif_is_ice(event_netdev) || event_upper != lag->upper_netdev)
return;
+
+ event_np = netdev_priv(event_netdev);
+ event_pf = event_np->vsi->back;
+ event_port = event_pf->hw.port_info->lport;
+ prim_port = pf->hw.port_info->lport;
+
+ info = (struct netdev_notifier_bonding_info *)ptr;
+ bonding_info = &info->bonding_info;
+
+ if (!bonding_info->slave.state) {
+ /* if no port is currently active, then nodes and filters exist
+ * on primary port, check if we need to move them
+ */
+ if (lag->active_port == ICE_LAG_INVALID_PORT) {
+ if (event_port != prim_port)
+ ice_lag_move_vf_nodes(lag, prim_port,
+ event_port);
+ lag->active_port = event_port;
+ return;
+ }
+
+ /* active port is already set and is current event port */
+ if (lag->active_port == event_port)
+ return;
+ /* new active port */
+ ice_lag_move_vf_nodes(lag, lag->active_port, event_port);
+ lag->active_port = event_port;
+ } else {
+ /* port not set as currently active (e.g. new active port
+ * has already claimed the nodes and filters
+ */
+ if (lag->active_port != event_port)
+ return;
+ /* This is the case when neither port is active (both link down)
+ * Link down on the bond - set active port to invalid and move
+ * nodes and filters back to primary if not already there
+ */
+ if (event_port != prim_port)
+ ice_lag_move_vf_nodes(lag, event_port, prim_port);
+ lag->active_port = ICE_LAG_INVALID_PORT;
}
+}
- if (info->linking)
- ice_lag_link(lag, info);
- else
- ice_lag_unlink(lag, info);
+/**
+ * ice_lag_chk_comp - evaluate bonded interface for feature support
+ * @lag: lag info struct
+ * @ptr: opaque data for netdev event info
+ */
+static bool
+ice_lag_chk_comp(struct ice_lag *lag, void *ptr)
+{
+ struct net_device *event_netdev, *event_upper;
+ struct netdev_notifier_bonding_info *info;
+ struct netdev_bonding_info *bonding_info;
+ struct list_head *tmp;
+ struct device *dev;
+ int count = 0;
- ice_display_lag_info(lag);
+ if (!lag->primary)
+ return true;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ rcu_read_lock();
+ event_upper = netdev_master_upper_dev_get_rcu(event_netdev);
+ rcu_read_unlock();
+ if (event_upper != lag->upper_netdev)
+ return true;
+
+ dev = ice_pf_to_dev(lag->pf);
+
+ /* only supporting switchdev mode for SRIOV VF LAG.
+ * primary interface has to be in switchdev mode
+ */
+ if (!ice_is_switchdev_running(lag->pf)) {
+ dev_info(dev, "Primary interface not in switchdev mode - VF LAG disabled\n");
+ return false;
+ }
+
+ info = (struct netdev_notifier_bonding_info *)ptr;
+ bonding_info = &info->bonding_info;
+ lag->bond_mode = bonding_info->master.bond_mode;
+ if (lag->bond_mode != BOND_MODE_ACTIVEBACKUP) {
+ dev_info(dev, "Bond Mode not ACTIVE-BACKUP - VF LAG disabled\n");
+ return false;
+ }
+
+ list_for_each(tmp, lag->netdev_head) {
+ struct ice_dcbx_cfg *dcb_cfg, *peer_dcb_cfg;
+ struct ice_lag_netdev_list *entry;
+ struct ice_netdev_priv *peer_np;
+ struct net_device *peer_netdev;
+ struct ice_vsi *vsi, *peer_vsi;
+ struct ice_pf *peer_pf;
+
+ entry = list_entry(tmp, struct ice_lag_netdev_list, node);
+ peer_netdev = entry->netdev;
+ if (!netif_is_ice(peer_netdev)) {
+ dev_info(dev, "Found %s non-ice netdev in LAG - VF LAG disabled\n",
+ netdev_name(peer_netdev));
+ return false;
+ }
+
+ count++;
+ if (count > 2) {
+ dev_info(dev, "Found more than two netdevs in LAG - VF LAG disabled\n");
+ return false;
+ }
+
+ peer_np = netdev_priv(peer_netdev);
+ vsi = ice_get_main_vsi(lag->pf);
+ peer_vsi = peer_np->vsi;
+ if (lag->pf->pdev->bus != peer_vsi->back->pdev->bus ||
+ lag->pf->pdev->slot != peer_vsi->back->pdev->slot) {
+ dev_info(dev, "Found %s on different device in LAG - VF LAG disabled\n",
+ netdev_name(peer_netdev));
+ return false;
+ }
+
+ dcb_cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
+ peer_dcb_cfg = &peer_vsi->port_info->qos_cfg.local_dcbx_cfg;
+ if (memcmp(dcb_cfg, peer_dcb_cfg,
+ sizeof(struct ice_dcbx_cfg))) {
+ dev_info(dev, "Found %s with different DCB in LAG - VF LAG disabled\n",
+ netdev_name(peer_netdev));
+ return false;
+ }
+
+ peer_pf = peer_vsi->back;
+ if (test_bit(ICE_FLAG_FW_LLDP_AGENT, peer_pf->flags)) {
+ dev_warn(dev, "Found %s with FW LLDP agent active - VF LAG disabled\n",
+ netdev_name(peer_netdev));
+ return false;
+ }
+ }
+
+ return true;
}
/**
- * ice_lag_changelower_event - handle LAG changelower event
+ * ice_lag_unregister - handle netdev unregister events
* @lag: LAG info struct
- * @ptr: opaque data pointer
+ * @event_netdev: netdev struct for target of notifier event
+ */
+static void
+ice_lag_unregister(struct ice_lag *lag, struct net_device *event_netdev)
+{
+ struct ice_netdev_priv *np;
+ struct ice_pf *event_pf;
+ struct ice_lag *p_lag;
+
+ p_lag = ice_lag_find_primary(lag);
+ np = netdev_priv(event_netdev);
+ event_pf = np->vsi->back;
+
+ if (p_lag) {
+ if (p_lag->active_port != p_lag->pf->hw.port_info->lport &&
+ p_lag->active_port != ICE_LAG_INVALID_PORT) {
+ struct ice_hw *active_hw;
+
+ active_hw = ice_lag_find_hw_by_lport(lag,
+ p_lag->active_port);
+ if (active_hw)
+ ice_lag_reclaim_vf_nodes(p_lag, active_hw);
+ lag->active_port = ICE_LAG_INVALID_PORT;
+ }
+ }
+
+ /* primary processing for primary */
+ if (lag->primary && lag->netdev == event_netdev)
+ ice_lag_primary_swid(lag, false);
+
+ /* primary processing for secondary */
+ if (lag->primary && lag->netdev != event_netdev)
+ ice_lag_del_prune_list(lag, event_pf);
+
+ /* secondary processing for secondary */
+ if (!lag->primary && lag->netdev == event_netdev)
+ ice_lag_set_swid(0, lag, false);
+}
+
+/**
+ * ice_lag_monitor_rdma - set and clear rdma functionality
+ * @lag: pointer to lag struct
+ * @ptr: opaque data for netdev event info
+ */
+static void
+ice_lag_monitor_rdma(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info;
+ struct net_device *netdev;
+
+ info = ptr;
+ netdev = netdev_notifier_info_to_dev(ptr);
+
+ if (netdev != lag->netdev)
+ return;
+
+ if (info->linking)
+ ice_clear_rdma_cap(lag->pf);
+ else
+ ice_set_rdma_cap(lag->pf);
+}
+
+/**
+ * ice_lag_chk_disabled_bond - monitor interfaces entering/leaving disabled bond
+ * @lag: lag info struct
+ * @ptr: opaque data containing event
*
- * ptr to be cast to netdev_notifier_changelowerstate_info
+ * as interfaces enter a bond - determine if the bond is currently
+ * SRIOV LAG compliant and flag if not. As interfaces leave the
+ * bond, reset their compliant status.
*/
-static void ice_lag_changelower_event(struct ice_lag *lag, void *ptr)
+static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct ice_lag *prim_lag;
if (netdev != lag->netdev)
return;
- netdev_dbg(netdev, "bonding info\n");
+ if (info->linking) {
+ prim_lag = ice_lag_find_primary(lag);
+ if (prim_lag &&
+ !ice_is_feature_supported(prim_lag->pf, ICE_F_SRIOV_LAG)) {
+ ice_clear_feature_support(lag->pf, ICE_F_SRIOV_LAG);
+ netdev_info(netdev, "Interface added to non-compliant SRIOV LAG aggregate\n");
+ }
+ } else {
+ ice_lag_init_feature_support_flag(lag->pf);
+ }
+}
+
+/**
+ * ice_lag_disable_sriov_bond - set members of bond as not supporting SRIOV LAG
+ * @lag: primary interfaces lag struct
+ */
+static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
+{
+ struct ice_lag_netdev_list *entry;
+ struct ice_netdev_priv *np;
+ struct net_device *netdev;
+ struct ice_pf *pf;
+
+ list_for_each_entry(entry, lag->netdev_head, node) {
+ netdev = entry->netdev;
+ np = netdev_priv(netdev);
+ pf = np->vsi->back;
+
+ ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+ }
+}
+
+/**
+ * ice_lag_process_event - process a task assigned to the lag_wq
+ * @work: pointer to work_struct
+ */
+static void ice_lag_process_event(struct work_struct *work)
+{
+ struct netdev_notifier_changeupper_info *info;
+ struct ice_lag_work *lag_work;
+ struct net_device *netdev;
+ struct list_head *tmp, *n;
+ struct ice_pf *pf;
+
+ lag_work = container_of(work, struct ice_lag_work, lag_task);
+ pf = lag_work->lag->pf;
+
+ mutex_lock(&pf->lag_mutex);
+ lag_work->lag->netdev_head = &lag_work->netdev_list.node;
+
+ switch (lag_work->event) {
+ case NETDEV_CHANGEUPPER:
+ info = &lag_work->info.changeupper_info;
+ ice_lag_chk_disabled_bond(lag_work->lag, info);
+ if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) {
+ ice_lag_monitor_link(lag_work->lag, info);
+ ice_lag_changeupper_event(lag_work->lag, info);
+ ice_lag_link_unlink(lag_work->lag, info);
+ }
+ ice_lag_monitor_rdma(lag_work->lag, info);
+ break;
+ case NETDEV_BONDING_INFO:
+ if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) {
+ if (!ice_lag_chk_comp(lag_work->lag,
+ &lag_work->info.bonding_info)) {
+ netdev = lag_work->info.bonding_info.info.dev;
+ ice_lag_disable_sriov_bond(lag_work->lag);
+ ice_lag_unregister(lag_work->lag, netdev);
+ goto lag_cleanup;
+ }
+ ice_lag_monitor_active(lag_work->lag,
+ &lag_work->info.bonding_info);
+ ice_lag_cfg_pf_fltrs(lag_work->lag,
+ &lag_work->info.bonding_info);
+ }
+ ice_lag_info_event(lag_work->lag, &lag_work->info.bonding_info);
+ break;
+ case NETDEV_UNREGISTER:
+ if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) {
+ netdev = lag_work->info.bonding_info.info.dev;
+ if ((netdev == lag_work->lag->netdev ||
+ lag_work->lag->primary) && lag_work->lag->bonded)
+ ice_lag_unregister(lag_work->lag, netdev);
+ }
+ break;
+ default:
+ break;
+ }
+
+lag_cleanup:
+ /* cleanup resources allocated for this work item */
+ list_for_each_safe(tmp, n, &lag_work->netdev_list.node) {
+ struct ice_lag_netdev_list *entry;
+
+ entry = list_entry(tmp, struct ice_lag_netdev_list, node);
+ list_del(&entry->node);
+ kfree(entry);
+ }
+ lag_work->lag->netdev_head = NULL;
- if (!netif_is_lag_port(netdev))
- netdev_dbg(netdev, "CHANGELOWER rcvd, but netdev not in LAG. Bail\n");
+ mutex_unlock(&pf->lag_mutex);
+
+ kfree(lag_work);
}
/**
@@ -295,34 +1627,79 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct net_device *upper_netdev;
+ struct ice_lag_work *lag_work;
struct ice_lag *lag;
- lag = container_of(notif_blk, struct ice_lag, notif_block);
+ if (!netif_is_ice(netdev))
+ return NOTIFY_DONE;
+
+ if (event != NETDEV_CHANGEUPPER && event != NETDEV_BONDING_INFO &&
+ event != NETDEV_UNREGISTER)
+ return NOTIFY_DONE;
+
+ if (!(netdev->priv_flags & IFF_BONDING))
+ return NOTIFY_DONE;
+ lag = container_of(notif_blk, struct ice_lag, notif_block);
if (!lag->netdev)
return NOTIFY_DONE;
- /* Check that the netdev is in the working namespace */
if (!net_eq(dev_net(netdev), &init_net))
return NOTIFY_DONE;
+ /* This memory will be freed at the end of ice_lag_process_event */
+ lag_work = kzalloc(sizeof(*lag_work), GFP_KERNEL);
+ if (!lag_work)
+ return -ENOMEM;
+
+ lag_work->event_netdev = netdev;
+ lag_work->lag = lag;
+ lag_work->event = event;
+ if (event == NETDEV_CHANGEUPPER) {
+ struct netdev_notifier_changeupper_info *info;
+
+ info = ptr;
+ upper_netdev = info->upper_dev;
+ } else {
+ upper_netdev = netdev_master_upper_dev_get(netdev);
+ }
+
+ INIT_LIST_HEAD(&lag_work->netdev_list.node);
+ if (upper_netdev) {
+ struct ice_lag_netdev_list *nd_list;
+ struct net_device *tmp_nd;
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) {
+ nd_list = kzalloc(sizeof(*nd_list), GFP_KERNEL);
+ if (!nd_list)
+ break;
+
+ nd_list->netdev = tmp_nd;
+ list_add(&nd_list->node, &lag_work->netdev_list.node);
+ }
+ rcu_read_unlock();
+ }
+
switch (event) {
case NETDEV_CHANGEUPPER:
- ice_lag_changeupper_event(lag, ptr);
- break;
- case NETDEV_CHANGELOWERSTATE:
- ice_lag_changelower_event(lag, ptr);
+ lag_work->info.changeupper_info =
+ *((struct netdev_notifier_changeupper_info *)ptr);
break;
case NETDEV_BONDING_INFO:
- ice_lag_info_event(lag, ptr);
- break;
- case NETDEV_UNREGISTER:
- ice_lag_unregister(lag, netdev);
+ lag_work->info.bonding_info =
+ *((struct netdev_notifier_bonding_info *)ptr);
break;
default:
+ lag_work->info.notifier_info =
+ *((struct netdev_notifier_info *)ptr);
break;
}
+ INIT_WORK(&lag_work->lag_task, ice_lag_process_event);
+ queue_work(ice_lag_wq, &lag_work->lag_task);
+
return NOTIFY_DONE;
}
@@ -366,6 +1743,174 @@ static void ice_unregister_lag_handler(struct ice_lag *lag)
}
/**
+ * ice_create_lag_recipe
+ * @hw: pointer to HW struct
+ * @rid: pointer to u16 to pass back recipe index
+ * @base_recipe: recipe to base the new recipe on
+ * @prio: priority for new recipe
+ *
+ * function returns 0 on error
+ */
+static int ice_create_lag_recipe(struct ice_hw *hw, u16 *rid,
+ const u8 *base_recipe, u8 prio)
+{
+ struct ice_aqc_recipe_data_elem *new_rcp;
+ int err;
+
+ err = ice_alloc_recipe(hw, rid);
+ if (err)
+ return err;
+
+ new_rcp = kzalloc(ICE_RECIPE_LEN * ICE_MAX_NUM_RECIPES, GFP_KERNEL);
+ if (!new_rcp)
+ return -ENOMEM;
+
+ memcpy(new_rcp, base_recipe, ICE_RECIPE_LEN);
+ new_rcp->content.act_ctrl_fwd_priority = prio;
+ new_rcp->content.rid = *rid | ICE_AQ_RECIPE_ID_IS_ROOT;
+ new_rcp->recipe_indx = *rid;
+ bitmap_zero((unsigned long *)new_rcp->recipe_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ set_bit(*rid, (unsigned long *)new_rcp->recipe_bitmap);
+
+ err = ice_aq_add_recipe(hw, new_rcp, 1, NULL);
+ if (err)
+ *rid = 0;
+
+ kfree(new_rcp);
+ return err;
+}
+
+/**
+ * ice_lag_move_vf_nodes_tc_sync - move a VF's nodes for a tc during reset
+ * @lag: primary interfaces lag struct
+ * @dest_hw: HW struct for destination's interface
+ * @vsi_num: VSI index in PF space
+ * @tc: traffic class to move
+ */
+static void
+ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
+ u16 vsi_num, u8 tc)
+{
+ u16 numq, valq, buf_size, num_moved, qbuf_size;
+ struct device *dev = ice_pf_to_dev(lag->pf);
+ struct ice_aqc_cfg_txqs_buf *qbuf;
+ struct ice_aqc_move_elem *buf;
+ struct ice_sched_node *n_prt;
+ __le32 teid, parent_teid;
+ struct ice_vsi_ctx *ctx;
+ struct ice_hw *hw;
+ u32 tmp_teid;
+
+ hw = &lag->pf->hw;
+ ctx = ice_get_vsi_ctx(hw, vsi_num);
+ if (!ctx) {
+ dev_warn(dev, "LAG rebuild failed after reset due to VSI Context failure\n");
+ return;
+ }
+
+ if (!ctx->sched.vsi_node[tc])
+ return;
+
+ numq = ctx->num_lan_q_entries[tc];
+ teid = ctx->sched.vsi_node[tc]->info.node_teid;
+ tmp_teid = le32_to_cpu(teid);
+ parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid;
+
+ if (!tmp_teid || !numq)
+ return;
+
+ if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, true))
+ dev_dbg(dev, "Problem suspending traffic during reset rebuild\n");
+
+ /* reconfig queues for new port */
+ qbuf_size = struct_size(qbuf, queue_info, numq);
+ qbuf = kzalloc(qbuf_size, GFP_KERNEL);
+ if (!qbuf) {
+ dev_warn(dev, "Failure allocating VF queue recfg buffer for reset rebuild\n");
+ goto resume_sync;
+ }
+
+ /* add the per queue info for the reconfigure command buffer */
+ valq = ice_lag_qbuf_recfg(hw, qbuf, vsi_num, numq, tc);
+ if (!valq) {
+ dev_warn(dev, "Failure to reconfig queues for LAG reset rebuild\n");
+ goto sync_none;
+ }
+
+ if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq, hw->port_info->lport,
+ dest_hw->port_info->lport, NULL)) {
+ dev_warn(dev, "Failure to configure queues for LAG reset rebuild\n");
+ goto sync_qerr;
+ }
+
+sync_none:
+ kfree(qbuf);
+
+ /* find parent in destination tree */
+ n_prt = ice_lag_get_sched_parent(dest_hw, tc);
+ if (!n_prt)
+ goto resume_sync;
+
+ /* Move node to new parent */
+ buf_size = struct_size(buf, teid, 1);
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ dev_warn(dev, "Failure to alloc for VF node move in reset rebuild\n");
+ goto resume_sync;
+ }
+
+ buf->hdr.src_parent_teid = parent_teid;
+ buf->hdr.dest_parent_teid = n_prt->info.node_teid;
+ buf->hdr.num_elems = cpu_to_le16(1);
+ buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
+ buf->teid[0] = teid;
+
+ if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
+ NULL))
+ dev_warn(dev, "Failure to move VF nodes for LAG reset rebuild\n");
+ else
+ ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
+
+ kfree(buf);
+ goto resume_sync;
+
+sync_qerr:
+ kfree(qbuf);
+
+resume_sync:
+ if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, false))
+ dev_warn(dev, "Problem restarting traffic for LAG node reset rebuild\n");
+}
+
+/**
+ * ice_lag_move_vf_nodes_sync - move vf nodes to active interface
+ * @lag: primary interfaces lag struct
+ * @dest_hw: lport value for currently active port
+ *
+ * This function is used in a reset context, outside of event handling,
+ * to move the VF nodes to the secondary interface when that interface
+ * is the active interface during a reset rebuild
+ */
+static void
+ice_lag_move_vf_nodes_sync(struct ice_lag *lag, struct ice_hw *dest_hw)
+{
+ struct ice_pf *pf;
+ int i, tc;
+
+ if (!lag->primary || !dest_hw)
+ return;
+
+ pf = lag->pf;
+ ice_for_each_vsi(pf, i)
+ if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
+ pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ ice_for_each_traffic_class(tc)
+ ice_lag_move_vf_nodes_tc_sync(lag, dest_hw, i,
+ tc);
+}
+
+/**
* ice_init_lag - initialize support for LAG
* @pf: PF struct
*
@@ -377,7 +1922,10 @@ int ice_init_lag(struct ice_pf *pf)
struct device *dev = ice_pf_to_dev(pf);
struct ice_lag *lag;
struct ice_vsi *vsi;
- int err;
+ u64 recipe_bits = 0;
+ int n, err;
+
+ ice_lag_init_feature_support_flag(pf);
pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL);
if (!pf->lag)
@@ -394,8 +1942,8 @@ int ice_init_lag(struct ice_pf *pf)
lag->pf = pf;
lag->netdev = vsi->netdev;
lag->role = ICE_LAG_NONE;
+ lag->active_port = ICE_LAG_INVALID_PORT;
lag->bonded = false;
- lag->peer_netdev = NULL;
lag->upper_netdev = NULL;
lag->notif_block.notifier_call = NULL;
@@ -405,6 +1953,25 @@ int ice_init_lag(struct ice_pf *pf)
goto lag_error;
}
+ err = ice_create_lag_recipe(&pf->hw, &lag->pf_recipe, ice_dflt_vsi_rcp,
+ 1);
+ if (err)
+ goto lag_error;
+
+ /* associate recipes to profiles */
+ for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) {
+ err = ice_aq_get_recipe_to_profile(&pf->hw, n,
+ (u8 *)&recipe_bits, NULL);
+ if (err)
+ continue;
+
+ if (recipe_bits & BIT(ICE_SW_LKUP_DFLT)) {
+ recipe_bits |= BIT(lag->pf_recipe);
+ ice_aq_map_recipe_to_profile(&pf->hw, n,
+ (u8 *)&recipe_bits, NULL);
+ }
+ }
+
ice_display_lag_info(lag);
dev_dbg(dev, "INIT LAG complete\n");
@@ -435,11 +2002,94 @@ void ice_deinit_lag(struct ice_pf *pf)
if (lag->pf)
ice_unregister_lag_handler(lag);
- dev_put(lag->upper_netdev);
+ flush_workqueue(ice_lag_wq);
- dev_put(lag->peer_netdev);
+ ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
+ &pf->lag->pf_recipe);
kfree(lag);
pf->lag = NULL;
}
+
+/**
+ * ice_lag_rebuild - rebuild lag resources after reset
+ * @pf: pointer to local pf struct
+ *
+ * PF resets are promoted to CORER resets when interface in an aggregate. This
+ * means that we need to rebuild the PF resources for the interface. Since
+ * this will happen outside the normal event processing, need to acquire the lag
+ * lock.
+ *
+ * This function will also evaluate the VF resources if this is the primary
+ * interface.
+ */
+void ice_lag_rebuild(struct ice_pf *pf)
+{
+ struct ice_lag_netdev_list ndlist;
+ struct ice_lag *lag, *prim_lag;
+ struct list_head *tmp, *n;
+ u8 act_port, loc_port;
+
+ if (!pf->lag || !pf->lag->bonded)
+ return;
+
+ mutex_lock(&pf->lag_mutex);
+
+ lag = pf->lag;
+ if (lag->primary) {
+ prim_lag = lag;
+ } else {
+ struct ice_lag_netdev_list *nl;
+ struct net_device *tmp_nd;
+
+ INIT_LIST_HEAD(&ndlist.node);
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
+ nl = kzalloc(sizeof(*nl), GFP_KERNEL);
+ if (!nl)
+ break;
+
+ nl->netdev = tmp_nd;
+ list_add(&nl->node, &ndlist.node);
+ }
+ rcu_read_unlock();
+ lag->netdev_head = &ndlist.node;
+ prim_lag = ice_lag_find_primary(lag);
+ }
+
+ if (!prim_lag) {
+ dev_dbg(ice_pf_to_dev(pf), "No primary interface in aggregate, can't rebuild\n");
+ goto lag_rebuild_out;
+ }
+
+ act_port = prim_lag->active_port;
+ loc_port = lag->pf->hw.port_info->lport;
+
+ /* configure SWID for this port */
+ if (lag->primary) {
+ ice_lag_primary_swid(lag, true);
+ } else {
+ ice_lag_set_swid(prim_lag->pf->hw.port_info->sw_id, lag, true);
+ ice_lag_add_prune_list(prim_lag, pf);
+ if (act_port == loc_port)
+ ice_lag_move_vf_nodes_sync(prim_lag, &pf->hw);
+ }
+
+ ice_lag_cfg_cp_fltr(lag, true);
+
+ if (lag->pf_rule_id)
+ if (ice_lag_cfg_dflt_fltr(lag, true))
+ dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n");
+
+ ice_clear_rdma_cap(pf);
+lag_rebuild_out:
+ list_for_each_safe(tmp, n, &ndlist.node) {
+ struct ice_lag_netdev_list *entry;
+
+ entry = list_entry(tmp, struct ice_lag_netdev_list, node);
+ list_del(&entry->node);
+ kfree(entry);
+ }
+ mutex_unlock(&pf->lag_mutex);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index 2c373676c42f..18075b82485a 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -14,20 +14,52 @@ enum ice_lag_role {
ICE_LAG_UNSET
};
+#define ICE_LAG_INVALID_PORT 0xFF
+
+#define ICE_LAG_RESET_RETRIES 5
+
struct ice_pf;
+struct ice_vf;
+
+struct ice_lag_netdev_list {
+ struct list_head node;
+ struct net_device *netdev;
+};
/* LAG info struct */
struct ice_lag {
struct ice_pf *pf; /* backlink to PF struct */
struct net_device *netdev; /* this PF's netdev */
- struct net_device *peer_netdev;
struct net_device *upper_netdev; /* upper bonding netdev */
+ struct list_head *netdev_head;
struct notifier_block notif_block;
+ s32 bond_mode;
+ u16 bond_swid; /* swid for primary interface */
+ u8 active_port; /* lport value for the current active port */
u8 bonded:1; /* currently bonded */
u8 primary:1; /* this is primary */
+ u16 pf_recipe;
+ u16 pf_rule_id;
+ u16 cp_rule_idx;
u8 role;
};
+/* LAG workqueue struct */
+struct ice_lag_work {
+ struct work_struct lag_task;
+ struct ice_lag_netdev_list netdev_list;
+ struct ice_lag *lag;
+ unsigned long event;
+ struct net_device *event_netdev;
+ union {
+ struct netdev_notifier_changeupper_info changeupper_info;
+ struct netdev_notifier_bonding_info bonding_info;
+ struct netdev_notifier_info notifier_info;
+ } info;
+};
+
+void ice_lag_move_new_vf_nodes(struct ice_vf *vf);
int ice_init_lag(struct ice_pf *pf);
void ice_deinit_lag(struct ice_pf *pf);
+void ice_lag_rebuild(struct ice_pf *pf);
#endif /* _ICE_LAG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 0054d7e64ec3..201570cd2e0b 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -907,6 +907,7 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
{
struct ice_hw_common_caps *cap;
struct ice_pf *pf = vsi->back;
+ u16 max_rss_size;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
vsi->rss_size = 1;
@@ -914,32 +915,31 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
}
cap = &pf->hw.func_caps.common_cap;
+ max_rss_size = BIT(cap->rss_table_entry_width);
switch (vsi->type) {
case ICE_VSI_CHNL:
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
vsi->rss_table_size = (u16)cap->rss_table_size;
if (vsi->type == ICE_VSI_CHNL)
- vsi->rss_size = min_t(u16, vsi->num_rxq,
- BIT(cap->rss_table_entry_width));
+ vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size);
else
vsi->rss_size = min_t(u16, num_online_cpus(),
- BIT(cap->rss_table_entry_width));
- vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
+ max_rss_size);
+ vsi->rss_lut_type = ICE_LUT_PF;
break;
case ICE_VSI_SWITCHDEV_CTRL:
- vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
- vsi->rss_size = min_t(u16, num_online_cpus(),
- BIT(cap->rss_table_entry_width));
- vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
+ vsi->rss_table_size = ICE_LUT_VSI_SIZE;
+ vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
+ vsi->rss_lut_type = ICE_LUT_VSI;
break;
case ICE_VSI_VF:
/* VF VSI will get a small RSS table.
* For VSI_LUT, LUT size should be set to 64 bytes.
*/
- vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ vsi->rss_table_size = ICE_LUT_VSI_SIZE;
vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
- vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
+ vsi->rss_lut_type = ICE_LUT_VSI;
break;
case ICE_VSI_LB:
break;
@@ -1228,6 +1228,17 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
}
/**
+ * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
+ * @vsi: VSI to check whether or not VLAN pruning is enabled.
+ *
+ * returns true if Rx VLAN pruning is enabled and false otherwise.
+ */
+static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
+{
+ return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+}
+
+/**
* ice_vsi_init - Create and initialize a VSI
* @vsi: the VSI being configured
* @vsi_flags: VSI configuration flags
@@ -1685,6 +1696,27 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
+ * @vsi: VSI
+ */
+static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
+{
+ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
+ vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
+ vsi->rx_buf_len = ICE_RXBUF_1664;
+#if (PAGE_SIZE < 8192)
+ } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
+ (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+ vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
+ vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
+#endif
+ } else {
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+ vsi->rx_buf_len = ICE_RXBUF_3072;
+ }
+}
+
+/**
* ice_pf_state_is_nominal - checks the PF for nominal state
* @pf: pointer to PF to check
*
@@ -1759,27 +1791,6 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
}
/**
- * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
- * @vsi: VSI
- */
-void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
-{
- if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
- vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
- vsi->rx_buf_len = ICE_RXBUF_1664;
-#if (PAGE_SIZE < 8192)
- } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
- (vsi->netdev->mtu <= ETH_DATA_LEN)) {
- vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
- vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
-#endif
- } else {
- vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
- vsi->rx_buf_len = ICE_RXBUF_3072;
- }
-}
-
-/**
* ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
* @hw: HW pointer
* @pf_q: index of the Rx queue in the PF's queue space
@@ -2185,20 +2196,6 @@ bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi)
return false;
}
-/**
- * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
- * @vsi: VSI to check whether or not VLAN pruning is enabled.
- *
- * returns true if Rx VLAN pruning is enabled and false otherwise.
- */
-bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
-{
- if (!vsi)
- return false;
-
- return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA);
-}
-
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
@@ -2944,21 +2941,6 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
}
/**
- * ice_napi_del - Remove NAPI handler for the VSI
- * @vsi: VSI for which NAPI handler is to be removed
- */
-void ice_napi_del(struct ice_vsi *vsi)
-{
- int v_idx;
-
- if (!vsi->netdev)
- return;
-
- ice_for_each_q_vector(vsi, v_idx)
- netif_napi_del(&vsi->q_vectors[v_idx]->napi);
-}
-
-/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
@@ -3970,7 +3952,7 @@ bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f)
* @pf: pointer to the struct ice_pf instance
* @f: feature enum to set
*/
-static void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
+void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
{
if (f < 0 || f >= ICE_F_MAX)
return;
@@ -4076,3 +4058,28 @@ void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
{
ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
}
+
+/**
+ * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
+ * @vsi: pointer to VSI structure
+ * @set: set or unset the bit
+ */
+int
+ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set)
+{
+ struct ice_vsi_ctx ctx = {
+ .info = vsi->info,
+ };
+
+ ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
+ if (set)
+ ctx.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
+ else
+ ctx.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
+
+ if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
+ return -ENODEV;
+
+ vsi->info = ctx.info;
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index e985766e6bb5..f24f5d1e6f9c 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -76,8 +76,6 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
-bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi);
-
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
int ice_set_link(struct ice_vsi *vsi, bool ena);
@@ -93,8 +91,6 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
-void ice_napi_del(struct ice_vsi *vsi);
-
int ice_vsi_release(struct ice_vsi *vsi);
void ice_vsi_close(struct ice_vsi *vsi);
@@ -130,7 +126,6 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
-void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr);
void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
@@ -157,11 +152,13 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
+int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set);
int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi);
u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi);
bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f);
+void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_init_feature_support(struct ice_pf *pf);
bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index b40dfe6ae321..c8286adae946 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -64,6 +64,7 @@ struct device *ice_hw_to_dev(struct ice_hw *hw)
}
static struct workqueue_struct *ice_wq;
+struct workqueue_struct *ice_lag_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
@@ -80,7 +81,7 @@ ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
void *data,
void (*cleanup)(struct flow_block_cb *block_cb));
-bool netif_is_ice(struct net_device *dev)
+bool netif_is_ice(const struct net_device *dev)
{
return dev && (dev->netdev_ops == &ice_netdev_ops);
}
@@ -635,6 +636,11 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
+ if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
+ dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
+ reset_type = ICE_RESET_CORER;
+ }
+
ice_prepare_for_reset(pf, reset_type);
/* trigger the reset */
@@ -718,8 +724,13 @@ static void ice_reset_subtask(struct ice_pf *pf)
}
/* No pending resets to finish processing. Check for new resets */
- if (test_bit(ICE_PFR_REQ, pf->state))
+ if (test_bit(ICE_PFR_REQ, pf->state)) {
reset_type = ICE_RESET_PFR;
+ if (pf->lag && pf->lag->bonded) {
+ dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
+ reset_type = ICE_RESET_CORER;
+ }
+ }
if (test_bit(ICE_CORER_REQ, pf->state))
reset_type = ICE_RESET_CORER;
if (test_bit(ICE_GLOBR_REQ, pf->state))
@@ -1239,64 +1250,63 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
return status;
}
-enum ice_aq_task_state {
- ICE_AQ_TASK_WAITING = 0,
- ICE_AQ_TASK_COMPLETE,
- ICE_AQ_TASK_CANCELED,
-};
-
-struct ice_aq_task {
- struct hlist_node entry;
+/**
+ * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
+ * @pf: pointer to the PF private structure
+ * @task: intermediate helper storage and identifier for waiting
+ * @opcode: the opcode to wait for
+ *
+ * Prepares to wait for a specific AdminQ completion event on the ARQ for
+ * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
+ *
+ * Calls are separated to allow caller registering for event before sending
+ * the command, which mitigates a race between registering and FW responding.
+ *
+ * To obtain only the descriptor contents, pass an task->event with null
+ * msg_buf. If the complete data buffer is desired, allocate the
+ * task->event.msg_buf with enough space ahead of time.
+ */
+void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
+ u16 opcode)
+{
+ INIT_HLIST_NODE(&task->entry);
+ task->opcode = opcode;
+ task->state = ICE_AQ_TASK_WAITING;
- u16 opcode;
- struct ice_rq_event_info *event;
- enum ice_aq_task_state state;
-};
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_add_head(&task->entry, &pf->aq_wait_list);
+ spin_unlock_bh(&pf->aq_wait_lock);
+}
/**
* ice_aq_wait_for_event - Wait for an AdminQ event from firmware
* @pf: pointer to the PF private structure
- * @opcode: the opcode to wait for
+ * @task: ptr prepared by ice_aq_prep_for_event()
* @timeout: how long to wait, in jiffies
- * @event: storage for the event info
*
* Waits for a specific AdminQ completion event on the ARQ for a given PF. The
* current thread will be put to sleep until the specified event occurs or
* until the given timeout is reached.
*
- * To obtain only the descriptor contents, pass an event without an allocated
- * msg_buf. If the complete data buffer is desired, allocate the
- * event->msg_buf with enough space ahead of time.
- *
* Returns: zero on success, or a negative error code on failure.
*/
-int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
- struct ice_rq_event_info *event)
+int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
+ unsigned long timeout)
{
+ enum ice_aq_task_state *state = &task->state;
struct device *dev = ice_pf_to_dev(pf);
- struct ice_aq_task *task;
- unsigned long start;
+ unsigned long start = jiffies;
long ret;
int err;
- task = kzalloc(sizeof(*task), GFP_KERNEL);
- if (!task)
- return -ENOMEM;
-
- INIT_HLIST_NODE(&task->entry);
- task->opcode = opcode;
- task->event = event;
- task->state = ICE_AQ_TASK_WAITING;
-
- spin_lock_bh(&pf->aq_wait_lock);
- hlist_add_head(&task->entry, &pf->aq_wait_list);
- spin_unlock_bh(&pf->aq_wait_lock);
-
- start = jiffies;
-
- ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
+ ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
+ *state != ICE_AQ_TASK_WAITING,
timeout);
- switch (task->state) {
+ switch (*state) {
+ case ICE_AQ_TASK_NOT_PREPARED:
+ WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
+ err = -EINVAL;
+ break;
case ICE_AQ_TASK_WAITING:
err = ret < 0 ? ret : -ETIMEDOUT;
break;
@@ -1307,7 +1317,7 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
err = ret < 0 ? ret : 0;
break;
default:
- WARN(1, "Unexpected AdminQ wait task state %u", task->state);
+ WARN(1, "Unexpected AdminQ wait task state %u", *state);
err = -EINVAL;
break;
}
@@ -1315,12 +1325,11 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
jiffies_to_msecs(jiffies - start),
jiffies_to_msecs(timeout),
- opcode);
+ task->opcode);
spin_lock_bh(&pf->aq_wait_lock);
hlist_del(&task->entry);
spin_unlock_bh(&pf->aq_wait_lock);
- kfree(task);
return err;
}
@@ -1346,23 +1355,26 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
struct ice_rq_event_info *event)
{
+ struct ice_rq_event_info *task_ev;
struct ice_aq_task *task;
bool found = false;
spin_lock_bh(&pf->aq_wait_lock);
hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
- if (task->state || task->opcode != opcode)
+ if (task->state != ICE_AQ_TASK_WAITING)
+ continue;
+ if (task->opcode != opcode)
continue;
- memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
- task->event->msg_len = event->msg_len;
+ task_ev = &task->event;
+ memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
+ task_ev->msg_len = event->msg_len;
/* Only copy the data buffer if a destination was set */
- if (task->event->msg_buf &&
- task->event->buf_len > event->buf_len) {
- memcpy(task->event->msg_buf, event->msg_buf,
+ if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
+ memcpy(task_ev->msg_buf, event->msg_buf,
event->buf_len);
- task->event->buf_len = event->buf_len;
+ task_ev->buf_len = event->buf_len;
}
task->state = ICE_AQ_TASK_COMPLETE;
@@ -3392,6 +3404,7 @@ static void ice_set_ops(struct ice_vsi *vsi)
netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_XSK_ZEROCOPY |
NETDEV_XDP_ACT_RX_SG;
+ netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
}
/**
@@ -3794,6 +3807,7 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf)
static void ice_deinit_pf(struct ice_pf *pf)
{
ice_service_task_stop(pf);
+ mutex_destroy(&pf->lag_mutex);
mutex_destroy(&pf->adev_mutex);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->tc_mutex);
@@ -3874,6 +3888,7 @@ static int ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
mutex_init(&pf->adev_mutex);
+ mutex_init(&pf->lag_mutex);
INIT_HLIST_HEAD(&pf->aq_wait_list);
spin_lock_init(&pf->aq_wait_lock);
@@ -4506,6 +4521,31 @@ static void ice_deinit_eth(struct ice_pf *pf)
ice_decfg_netdev(vsi);
}
+/**
+ * ice_wait_for_fw - wait for full FW readiness
+ * @hw: pointer to the hardware structure
+ * @timeout: milliseconds that can elapse before timing out
+ */
+static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
+{
+ int fw_loading;
+ u32 elapsed = 0;
+
+ while (elapsed <= timeout) {
+ fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
+
+ /* firmware was not yet loaded, we have to wait more */
+ if (fw_loading) {
+ elapsed += 100;
+ msleep(100);
+ continue;
+ }
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
static int ice_init_dev(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
@@ -4518,6 +4558,18 @@ static int ice_init_dev(struct ice_pf *pf)
return err;
}
+ /* Some cards require longer initialization times
+ * due to necessity of loading FW from an external source.
+ * This can take even half a minute.
+ */
+ if (ice_is_pf_c827(hw)) {
+ err = ice_wait_for_fw(hw, 30000);
+ if (err) {
+ dev_err(dev, "ice_wait_for_fw timed out");
+ return err;
+ }
+ }
+
ice_init_feature_support(pf);
ice_request_fw(pf);
@@ -5570,7 +5622,7 @@ static struct pci_driver ice_driver = {
*/
static int __init ice_module_init(void)
{
- int status;
+ int status = -ENOMEM;
pr_info("%s\n", ice_driver_string);
pr_info("%s\n", ice_copyright);
@@ -5578,15 +5630,27 @@ static int __init ice_module_init(void)
ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
- return -ENOMEM;
+ return status;
+ }
+
+ ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
+ if (!ice_lag_wq) {
+ pr_err("Failed to create LAG workqueue\n");
+ goto err_dest_wq;
}
status = pci_register_driver(&ice_driver);
if (status) {
pr_err("failed to register PCI driver, err %d\n", status);
- destroy_workqueue(ice_wq);
+ goto err_dest_lag_wq;
}
+ return 0;
+
+err_dest_lag_wq:
+ destroy_workqueue(ice_lag_wq);
+err_dest_wq:
+ destroy_workqueue(ice_wq);
return status;
}
module_init(ice_module_init);
@@ -5601,6 +5665,7 @@ static void __exit ice_module_exit(void)
{
pci_unregister_driver(&ice_driver);
destroy_workqueue(ice_wq);
+ destroy_workqueue(ice_lag_wq);
pr_info("module unloaded\n");
}
module_exit(ice_module_exit);
@@ -5703,7 +5768,7 @@ static void ice_set_rx_mode(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- if (!vsi)
+ if (!vsi || ice_is_switchdev_running(vsi->back))
return;
/* Set the flags to synchronize filters
@@ -6255,7 +6320,7 @@ static void ice_tx_dim_work(struct work_struct *work)
u16 itr;
dim = container_of(work, struct dim, work);
- rc = (struct ice_ring_container *)dim->priv;
+ rc = dim->priv;
WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
@@ -6275,7 +6340,7 @@ static void ice_rx_dim_work(struct work_struct *work)
u16 itr;
dim = container_of(work, struct dim, work);
- rc = (struct ice_ring_container *)dim->priv;
+ rc = dim->priv;
WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
@@ -7356,6 +7421,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
clear_bit(ICE_RESET_FAILED, pf->state);
ice_plug_aux_dev(pf);
+ if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
+ ice_lag_rebuild(pf);
return;
err_vsi_rebuild:
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 6a9364761165..f6f27361c3cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -287,6 +287,7 @@ struct ice_nvgre_hdr {
* M = EVLAN (0x8100) - Outer L2 header has EVLAN (ethernet type 0x8100)
* N = EVLAN (0x9100) - Outer L2 header has EVLAN (ethernet type 0x9100)
*/
+#define ICE_PKT_FROM_NETWORK BIT(3)
#define ICE_PKT_VLAN_STAG BIT(12)
#define ICE_PKT_VLAN_ITAG BIT(13)
#define ICE_PKT_VLAN_EVLAN (BIT(14) | BIT(15))
@@ -392,10 +393,10 @@ enum ice_hw_metadata_offset {
};
enum ice_pkt_flags {
- ICE_PKT_FLAGS_VLAN = 0,
- ICE_PKT_FLAGS_TUNNEL = 1,
- ICE_PKT_FLAGS_TCP = 2,
- ICE_PKT_FLAGS_ERROR = 3,
+ ICE_PKT_FLAGS_MDID20 = 0,
+ ICE_PKT_FLAGS_MDID21 = 1,
+ ICE_PKT_FLAGS_MDID22 = 2,
+ ICE_PKT_FLAGS_MDID23 = 3,
};
struct ice_hw_metadata {
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index de1d83300481..f818dd215c05 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -295,7 +295,7 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
*
* Read a PHY register for the given port over the device sideband queue.
*/
-int
+static int
ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
{
struct ice_sbq_msg_input msg = {0};
@@ -372,7 +372,7 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
*
* Write a PHY register for the given port over the device sideband queue.
*/
-int
+static int
ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
{
struct ice_sbq_msg_input msg = {0};
@@ -1081,7 +1081,7 @@ exit_err:
*
* Negative adjustments are supported using 2s complement arithmetic.
*/
-int
+static int
ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
{
u32 l_time, u_time;
@@ -2914,6 +2914,185 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
return 0;
}
+/**
+ * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @tstamp_ready: contents of the Tx memory status register
+ *
+ * E810 devices do not use a Tx memory status register. Instead simply
+ * indicate that all timestamps are currently ready.
+ */
+static int
+ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
+{
+ *tstamp_ready = 0xFFFFFFFFFFFFFFFF;
+ return 0;
+}
+
+/* E810T SMA functions
+ *
+ * The following functions operate specifically on E810T hardware and are used
+ * to access the extended GPIOs available.
+ */
+
+/**
+ * ice_get_pca9575_handle
+ * @hw: pointer to the hw struct
+ * @pca9575_handle: GPIO controller's handle
+ *
+ * Find and return the GPIO controller's handle in the netlist.
+ * When found - the value will be cached in the hw structure and following calls
+ * will return cached value
+ */
+static int
+ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct ice_aq_desc desc;
+ int status;
+ u8 idx;
+
+ /* If handle was read previously return cached value */
+ if (hw->io_expander_handle) {
+ *pca9575_handle = hw->io_expander_handle;
+ return 0;
+ }
+
+ /* If handle was not detected read it from the netlist */
+ cmd = &desc.params.get_link_topo;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+
+ /* Set node type to GPIO controller */
+ cmd->addr.topo_params.node_type_ctx =
+ (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
+ ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
+
+#define SW_PCA9575_SFP_TOPO_IDX 2
+#define SW_PCA9575_QSFP_TOPO_IDX 1
+
+ /* Check if the SW IO expander controlling SMA exists in the netlist. */
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP)
+ idx = SW_PCA9575_SFP_TOPO_IDX;
+ else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
+ idx = SW_PCA9575_QSFP_TOPO_IDX;
+ else
+ return -EOPNOTSUPP;
+
+ cmd->addr.topo_params.index = idx;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (status)
+ return -EOPNOTSUPP;
+
+ /* Verify if we found the right IO expander type */
+ if (desc.params.get_link_topo.node_part_num !=
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
+ return -EOPNOTSUPP;
+
+ /* If present save the handle and return it */
+ hw->io_expander_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ *pca9575_handle = hw->io_expander_handle;
+
+ return 0;
+}
+
+/**
+ * ice_read_sma_ctrl_e810t
+ * @hw: pointer to the hw struct
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
+ * PCA9575 expander, so only bits 3-7 in data are valid.
+ */
+int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
+{
+ int status;
+ u16 handle;
+ u8 i;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+ if (status)
+ return status;
+
+ *data = 0;
+
+ for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
+ bool pin;
+
+ status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
+ &pin, NULL);
+ if (status)
+ break;
+ *data |= (u8)(!pin) << i;
+ }
+
+ return status;
+}
+
+/**
+ * ice_write_sma_ctrl_e810t
+ * @hw: pointer to the hw struct
+ * @data: data to be written to the GPIO controller
+ *
+ * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
+ * of the PCA9575 expander, so only bits 3-7 in data are valid.
+ */
+int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
+{
+ int status;
+ u16 handle;
+ u8 i;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+ if (status)
+ return status;
+
+ for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
+ bool pin;
+
+ pin = !(data & (1 << i));
+ status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
+ pin, NULL);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ice_read_pca9575_reg_e810t
+ * @hw: pointer to the hw struct
+ * @offset: GPIO controller register offset
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Read the register from the GPIO controller
+ */
+int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ __le16 addr;
+ u16 handle;
+ int err;
+
+ memset(&link_topo, 0, sizeof(link_topo));
+
+ err = ice_get_pca9575_handle(hw, &handle);
+ if (err)
+ return err;
+
+ link_topo.handle = cpu_to_le16(handle);
+ link_topo.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
+
+ addr = cpu_to_le16((u16)offset);
+
+ return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
+}
+
/* Device agnostic functions
*
* The following functions implement shared behavior common to both E822 and
@@ -3175,204 +3354,6 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
}
/**
- * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register
- * @hw: pointer to the HW struct
- * @port: the PHY port to read
- * @tstamp_ready: contents of the Tx memory status register
- *
- * E810 devices do not use a Tx memory status register. Instead simply
- * indicate that all timestamps are currently ready.
- */
-static int
-ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
-{
- *tstamp_ready = 0xFFFFFFFFFFFFFFFF;
- return 0;
-}
-
-/* E810T SMA functions
- *
- * The following functions operate specifically on E810T hardware and are used
- * to access the extended GPIOs available.
- */
-
-/**
- * ice_get_pca9575_handle
- * @hw: pointer to the hw struct
- * @pca9575_handle: GPIO controller's handle
- *
- * Find and return the GPIO controller's handle in the netlist.
- * When found - the value will be cached in the hw structure and following calls
- * will return cached value
- */
-static int
-ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
-{
- struct ice_aqc_get_link_topo *cmd;
- struct ice_aq_desc desc;
- int status;
- u8 idx;
-
- /* If handle was read previously return cached value */
- if (hw->io_expander_handle) {
- *pca9575_handle = hw->io_expander_handle;
- return 0;
- }
-
- /* If handle was not detected read it from the netlist */
- cmd = &desc.params.get_link_topo;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
-
- /* Set node type to GPIO controller */
- cmd->addr.topo_params.node_type_ctx =
- (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
- ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
-
-#define SW_PCA9575_SFP_TOPO_IDX 2
-#define SW_PCA9575_QSFP_TOPO_IDX 1
-
- /* Check if the SW IO expander controlling SMA exists in the netlist. */
- if (hw->device_id == ICE_DEV_ID_E810C_SFP)
- idx = SW_PCA9575_SFP_TOPO_IDX;
- else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
- idx = SW_PCA9575_QSFP_TOPO_IDX;
- else
- return -EOPNOTSUPP;
-
- cmd->addr.topo_params.index = idx;
-
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
- if (status)
- return -EOPNOTSUPP;
-
- /* Verify if we found the right IO expander type */
- if (desc.params.get_link_topo.node_part_num !=
- ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
- return -EOPNOTSUPP;
-
- /* If present save the handle and return it */
- hw->io_expander_handle =
- le16_to_cpu(desc.params.get_link_topo.addr.handle);
- *pca9575_handle = hw->io_expander_handle;
-
- return 0;
-}
-
-/**
- * ice_read_sma_ctrl_e810t
- * @hw: pointer to the hw struct
- * @data: pointer to data to be read from the GPIO controller
- *
- * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
- * PCA9575 expander, so only bits 3-7 in data are valid.
- */
-int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
-{
- int status;
- u16 handle;
- u8 i;
-
- status = ice_get_pca9575_handle(hw, &handle);
- if (status)
- return status;
-
- *data = 0;
-
- for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
- bool pin;
-
- status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
- &pin, NULL);
- if (status)
- break;
- *data |= (u8)(!pin) << i;
- }
-
- return status;
-}
-
-/**
- * ice_write_sma_ctrl_e810t
- * @hw: pointer to the hw struct
- * @data: data to be written to the GPIO controller
- *
- * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
- * of the PCA9575 expander, so only bits 3-7 in data are valid.
- */
-int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
-{
- int status;
- u16 handle;
- u8 i;
-
- status = ice_get_pca9575_handle(hw, &handle);
- if (status)
- return status;
-
- for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
- bool pin;
-
- pin = !(data & (1 << i));
- status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
- pin, NULL);
- if (status)
- break;
- }
-
- return status;
-}
-
-/**
- * ice_read_pca9575_reg_e810t
- * @hw: pointer to the hw struct
- * @offset: GPIO controller register offset
- * @data: pointer to data to be read from the GPIO controller
- *
- * Read the register from the GPIO controller
- */
-int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
-{
- struct ice_aqc_link_topo_addr link_topo;
- __le16 addr;
- u16 handle;
- int err;
-
- memset(&link_topo, 0, sizeof(link_topo));
-
- err = ice_get_pca9575_handle(hw, &handle);
- if (err)
- return err;
-
- link_topo.handle = cpu_to_le16(handle);
- link_topo.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
- ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
-
- addr = cpu_to_le16((u16)offset);
-
- return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
-}
-
-/**
- * ice_is_pca9575_present
- * @hw: pointer to the hw struct
- *
- * Check if the SW IO expander is present in the netlist
- */
-bool ice_is_pca9575_present(struct ice_hw *hw)
-{
- u16 handle = 0;
- int status;
-
- if (!ice_is_e810t(hw))
- return false;
-
- status = ice_get_pca9575_handle(hw, &handle);
-
- return !status && handle;
-}
-
-/**
* ice_ptp_reset_ts_memory - Reset timestamp memory for all blocks
* @hw: pointer to the HW struct
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 096685237ca6..9aa10b0426fd 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -113,6 +113,9 @@ struct ice_cgu_pll_params_e822 {
extern const struct
ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
+#define E810C_QSFP_C827_0_HANDLE 2
+#define E810C_QSFP_C827_1_HANDLE 3
+
/* Table of constants related to possible TIME_REF sources */
extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ];
@@ -139,11 +142,8 @@ int ice_ptp_init_phc(struct ice_hw *hw);
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready);
/* E822 family functions */
-int ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val);
-int ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val);
int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val);
-int ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time);
void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad);
/**
@@ -197,7 +197,6 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw);
int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data);
-bool ice_is_pca9575_present(struct ice_hw *hw);
#define PFTSYN_SEM_BYTES 4
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index e30e12321abd..c686ac0935eb 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -254,7 +254,7 @@ static const struct net_device_ops ice_repr_netdev_ops = {
* ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
* @netdev: pointer to netdev
*/
-bool ice_is_port_repr_netdev(struct net_device *netdev)
+bool ice_is_port_repr_netdev(const struct net_device *netdev)
{
return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index 9c2a6f496b3b..e1ee2d2c1d2d 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -12,6 +12,7 @@ struct ice_repr {
struct ice_q_vector *q_vector;
struct net_device *netdev;
struct metadata_dst *dst;
+ struct ice_esw_br_port *br_port;
#ifdef CONFIG_ICE_SWITCHDEV
/* info about slow path rule */
struct ice_rule_query_data sp_rule;
@@ -27,5 +28,5 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr);
void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi);
struct ice_repr *ice_netdev_to_repr(struct net_device *netdev);
-bool ice_is_port_repr_netdev(struct net_device *netdev);
+bool ice_is_port_repr_netdev(const struct net_device *netdev);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index b664d60fd037..c0533d7b66b9 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -447,7 +447,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
*
* Move scheduling elements (0x0408)
*/
-static int
+int
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd)
@@ -526,7 +526,7 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
*
* This function suspends or resumes HW nodes
*/
-static int
+int
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
bool suspend)
{
@@ -569,18 +569,24 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
struct ice_q_ctx *q_ctx;
+ u16 idx;
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
return -EINVAL;
/* allocate LAN queue contexts */
if (!vsi_ctx->lan_q_ctx[tc]) {
- vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
- new_numqs,
- sizeof(*q_ctx),
- GFP_KERNEL);
- if (!vsi_ctx->lan_q_ctx[tc])
+ q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
+ sizeof(*q_ctx), GFP_KERNEL);
+ if (!q_ctx)
return -ENOMEM;
+
+ for (idx = 0; idx < new_numqs; idx++) {
+ q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE;
+ q_ctx[idx].q_teid = ICE_INVAL_TEID;
+ }
+
+ vsi_ctx->lan_q_ctx[tc] = q_ctx;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
return 0;
}
@@ -592,9 +598,16 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
sizeof(*q_ctx), GFP_KERNEL);
if (!q_ctx)
return -ENOMEM;
+
memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
prev_num * sizeof(*q_ctx));
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
+
+ for (idx = prev_num; idx < new_numqs; idx++) {
+ q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE;
+ q_ctx[idx].q_teid = ICE_INVAL_TEID;
+ }
+
vsi_ctx->lan_q_ctx[tc] = q_ctx;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
}
@@ -1044,7 +1057,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
*
* This function add nodes to a given layer.
*/
-static int
+int
ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
@@ -1119,7 +1132,7 @@ static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
*
* This function returns the current VSI layer number
*/
-static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
+u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
{
/* Num Layers VSI layer
* 9 6
@@ -1142,7 +1155,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
*
* This function returns the current aggregator layer number
*/
-static u8 ice_sched_get_agg_layer(struct ice_hw *hw)
+u8 ice_sched_get_agg_layer(struct ice_hw *hw)
{
/* Num Layers aggregator layer
* 9 4
@@ -1577,7 +1590,7 @@ ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
* This function retrieves an aggregator node for a given aggregator ID from
* a given TC branch
*/
-static struct ice_sched_node *
+struct ice_sched_node *
ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
u32 agg_id)
{
@@ -2139,7 +2152,7 @@ ice_get_agg_info(struct ice_hw *hw, u32 agg_id)
* This function walks through the aggregator subtree to find a free parent
* node
*/
-static struct ice_sched_node *
+struct ice_sched_node *
ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
u16 *num_nodes)
{
@@ -3958,7 +3971,7 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
* This function sets BW limit of VSI or Aggregator scheduling node
* based on TC information from passed in argument BW.
*/
-int
+static int
ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
enum ice_agg_type agg_type, u8 tc,
enum ice_rl_type rl_type, u32 bw)
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 9c100747445a..0055d9330c07 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -141,13 +141,30 @@ ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
int
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type);
-int
-ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
- enum ice_agg_type agg_type, u8 tc,
- enum ice_rl_type rl_type, u32 bw);
int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
+int
+ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
+ bool suspend);
+struct ice_sched_node *
+ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
+ u32 agg_id);
+u8 ice_sched_get_agg_layer(struct ice_hw *hw);
+u8 ice_sched_get_vsi_layer(struct ice_hw *hw);
+struct ice_sched_node *
+ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
+ u16 *num_nodes);
+int
+ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
+ struct ice_sched_node *tc_node,
+ struct ice_sched_node *parent, u8 layer,
+ u16 num_nodes, u32 *first_node_teid,
+ u16 *num_nodes_added);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
+int
+ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
+ struct ice_aqc_move_elem *buf, u16 buf_size,
+ u16 *grps_movd, struct ice_sq_cd *cd);
int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 6db4ca7978cb..2f77b684ff76 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -20,12 +20,11 @@
* byte 0 = 0x2: to identify it as locally administered DA MAC
* byte 6 = 0x2: to identify it as locally administered SA MAC
* byte 12 = 0x81 & byte 13 = 0x00:
- * In case of VLAN filter first two bytes defines ether type (0x8100)
- * and remaining two bytes are placeholder for programming a given VLAN ID
- * In case of Ether type filter it is treated as header without VLAN tag
- * and byte 12 and 13 is used to program a given Ether type instead
+ * In case of VLAN filter first two bytes defines ether type (0x8100)
+ * and remaining two bytes are placeholder for programming a given VLAN ID
+ * In case of Ether type filter it is treated as header without VLAN tag
+ * and byte 12 and 13 is used to program a given Ether type instead
*/
-#define DUMMY_ETH_HDR_LEN 16
static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
@@ -1369,14 +1368,6 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(tcp, 0),
};
-#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))
-#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s) \
- ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
-#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s) \
- ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
-#define ICE_SW_RULE_LG_ACT_SIZE(s, n) struct_size((s), act, (n))
-#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n))
-
/* this is a recipe to profile association bitmap */
static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
ICE_MAX_NUM_PROFILES);
@@ -1841,8 +1832,13 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
lkup_type == ICE_SW_LKUP_DFLT) {
sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
- sw_buf->res_type =
- cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
+ if (opc == ice_aqc_opc_alloc_res)
+ sw_buf->res_type =
+ cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE |
+ ICE_AQC_RES_TYPE_FLAG_SHARED);
+ else
+ sw_buf->res_type =
+ cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
} else {
status = -EINVAL;
goto ice_aq_alloc_free_vsi_list_exit;
@@ -1851,7 +1847,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
if (opc == ice_aqc_opc_free_res)
sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
- status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
+ status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, opc);
if (status)
goto ice_aq_alloc_free_vsi_list_exit;
@@ -1910,7 +1906,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
*
* Add(0x0290)
*/
-static int
+int
ice_aq_add_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 num_recipes, struct ice_sq_cd *cd)
@@ -1947,7 +1943,7 @@ ice_aq_add_recipe(struct ice_hw *hw,
* The caller must supply enough space in s_recipe_list to hold all possible
* recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
*/
-static int
+int
ice_aq_get_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
@@ -2040,7 +2036,7 @@ error_out:
* @cd: pointer to command details structure or NULL
* Recipe to profile association (0x0291)
*/
-static int
+int
ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
struct ice_sq_cd *cd)
{
@@ -2066,7 +2062,7 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* @cd: pointer to command details structure or NULL
* Associate profile ID with given recipe (0x0293)
*/
-static int
+int
ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
struct ice_sq_cd *cd)
{
@@ -2090,7 +2086,7 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* @hw: pointer to the hardware structure
* @rid: recipe ID returned as response to AQ call
*/
-static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
+int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
u16 buf_len;
@@ -2105,8 +2101,8 @@ static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
ICE_AQC_RES_TYPE_S) |
ICE_AQC_RES_TYPE_FLAG_SHARED);
- status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
- ice_aqc_opc_alloc_res, NULL);
+ status = ice_aq_alloc_free_res(hw, sw_buf, buf_len,
+ ice_aqc_opc_alloc_res);
if (!status)
*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
kfree(sw_buf);
@@ -2272,6 +2268,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
/* Propagate some data to the recipe database */
recps[idx].is_root = !!is_root;
recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
+ recps[idx].need_pass_l2 = root_bufs.content.act_ctrl &
+ ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
+ recps[idx].allow_pass_l2 = root_bufs.content.act_ctrl &
+ ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
recps[idx].chain_idx = root_bufs.content.result_indx &
@@ -2460,6 +2460,15 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
}
/**
+ * ice_fill_eth_hdr - helper to copy dummy_eth_hdr into supplied buffer
+ * @eth_hdr: pointer to buffer to populate
+ */
+void ice_fill_eth_hdr(u8 *eth_hdr)
+{
+ memcpy(eth_hdr, dummy_eth_header, DUMMY_ETH_HDR_LEN);
+}
+
+/**
* ice_fill_sw_rule - Helper function to fill switch rule structure
* @hw: pointer to the hardware structure
* @f_info: entry containing packet forwarding information
@@ -3118,7 +3127,7 @@ ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
* handle element. This can be extended further to search VSI list with more
* than 1 vsi_count. Returns pointer to VSI list entry if found.
*/
-static struct ice_vsi_list_map_info *
+struct ice_vsi_list_map_info *
ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
u16 *vsi_list_id)
{
@@ -3129,7 +3138,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
list_head = &sw->recp_list[recp_id].filt_rules;
list_for_each_entry(list_itr, list_head, list_entry) {
- if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
+ if (list_itr->vsi_list_info) {
map_info = list_itr->vsi_list_info;
if (test_bit(vsi_handle, map_info->vsi_map)) {
*vsi_list_id = map_info->vsi_list_id;
@@ -3400,54 +3409,6 @@ exit:
}
/**
- * ice_mac_fltr_exist - does this MAC filter exist for given VSI
- * @hw: pointer to the hardware structure
- * @mac: MAC address to be checked (for MAC filter)
- * @vsi_handle: check MAC filter for this VSI
- */
-bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
-{
- struct ice_fltr_mgmt_list_entry *entry;
- struct list_head *rule_head;
- struct ice_switch_info *sw;
- struct mutex *rule_lock; /* Lock to protect filter rule list */
- u16 hw_vsi_id;
-
- if (!ice_is_vsi_valid(hw, vsi_handle))
- return false;
-
- hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
- sw = hw->switch_info;
- rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
- if (!rule_head)
- return false;
-
- rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
- mutex_lock(rule_lock);
- list_for_each_entry(entry, rule_head, list_entry) {
- struct ice_fltr_info *f_info = &entry->fltr_info;
- u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
-
- if (is_zero_ether_addr(mac_addr))
- continue;
-
- if (f_info->flag != ICE_FLTR_TX ||
- f_info->src_id != ICE_SRC_ID_VSI ||
- f_info->lkup_type != ICE_SW_LKUP_MAC ||
- f_info->fltr_act != ICE_FWD_TO_VSI ||
- hw_vsi_id != f_info->fwd_id.hw_vsi_id)
- continue;
-
- if (ether_addr_equal(mac, mac_addr)) {
- mutex_unlock(rule_lock);
- return true;
- }
- }
- mutex_unlock(rule_lock);
- return false;
-}
-
-/**
* ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
* @hw: pointer to the hardware structure
* @vlan_id: VLAN ID
@@ -4487,8 +4448,7 @@ ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
ICE_AQC_RES_TYPE_M) | alloc_shared);
- status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
- ice_aqc_opc_alloc_res, NULL);
+ status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
if (status)
goto exit;
@@ -4526,8 +4486,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
ICE_AQC_RES_TYPE_M) | alloc_shared);
buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
- status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
- ice_aqc_opc_free_res, NULL);
+ status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res);
if (status)
ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
@@ -4540,6 +4499,45 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
.offs = {__VA_ARGS__}, \
}
+/**
+ * ice_share_res - set a resource as shared or dedicated
+ * @hw: hw struct of original owner of resource
+ * @type: resource type
+ * @shared: is the resource being set to shared
+ * @res_id: resource id (descriptor)
+ */
+int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ u16 buf_len;
+ int status;
+
+ buf_len = struct_size(buf, elem, 1);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf->num_elems = cpu_to_le16(1);
+ if (shared)
+ buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
+ ICE_AQC_RES_TYPE_M) |
+ ICE_AQC_RES_TYPE_FLAG_SHARED);
+ else
+ buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
+ ICE_AQC_RES_TYPE_M) &
+ ~ICE_AQC_RES_TYPE_FLAG_SHARED);
+
+ buf->elem[0].e.sw_resp = cpu_to_le16(res_id);
+ status = ice_aq_alloc_free_res(hw, buf, buf_len,
+ ice_aqc_opc_share_res);
+ if (status)
+ ice_debug(hw, ICE_DBG_SW, "Could not set resource type %u id %u to %s\n",
+ type, res_id, shared ? "SHARED" : "DEDICATED");
+
+ kfree(buf);
+ return status;
+}
+
/* This is mapping table entry that maps every word within a given protocol
* structure to the real byte offset as per the specification of that
* protocol header.
@@ -4613,13 +4611,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
* ice_find_recp - find a recipe
* @hw: pointer to the hardware structure
* @lkup_exts: extension sequence to match
- * @tun_type: type of recipe tunnel
+ * @rinfo: information regarding the rule e.g. priority and action info
*
* Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
*/
static u16
ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
- enum ice_sw_tunnel_type tun_type)
+ const struct ice_adv_rule_info *rinfo)
{
bool refresh_required = true;
struct ice_sw_recipe *recp;
@@ -4680,9 +4678,12 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
}
/* If for "i"th recipe the found was never set to false
* then it means we found our match
- * Also tun type of recipe needs to be checked
+ * Also tun type and *_pass_l2 of recipe needs to be
+ * checked
*/
- if (found && recp[i].tun_type == tun_type)
+ if (found && recp[i].tun_type == rinfo->tun_type &&
+ recp[i].need_pass_l2 == rinfo->need_pass_l2 &&
+ recp[i].allow_pass_l2 == rinfo->allow_pass_l2)
return i; /* Return the recipe ID */
}
}
@@ -4952,6 +4953,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
unsigned long *profiles)
{
DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
+ struct ice_aqc_recipe_content *content;
struct ice_aqc_recipe_data_elem *tmp;
struct ice_aqc_recipe_data_elem *buf;
struct ice_recp_grp_entry *entry;
@@ -5012,6 +5014,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
if (status)
goto err_unroll;
+ content = &buf[recps].content;
+
/* Clear the result index of the located recipe, as this will be
* updated, if needed, later in the recipe creation process.
*/
@@ -5022,26 +5026,24 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
/* if the recipe is a non-root recipe RID should be programmed
* as 0 for the rules to be applied correctly.
*/
- buf[recps].content.rid = 0;
- memset(&buf[recps].content.lkup_indx, 0,
- sizeof(buf[recps].content.lkup_indx));
+ content->rid = 0;
+ memset(&content->lkup_indx, 0,
+ sizeof(content->lkup_indx));
/* All recipes use look-up index 0 to match switch ID. */
- buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
- buf[recps].content.mask[0] =
- cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
+ content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
/* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
* to be 0
*/
for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
- buf[recps].content.lkup_indx[i] = 0x80;
- buf[recps].content.mask[i] = 0;
+ content->lkup_indx[i] = 0x80;
+ content->mask[i] = 0;
}
for (i = 0; i < entry->r_group.n_val_pairs; i++) {
- buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
- buf[recps].content.mask[i + 1] =
- cpu_to_le16(entry->fv_mask[i]);
+ content->lkup_indx[i + 1] = entry->fv_idx[i];
+ content->mask[i + 1] = cpu_to_le16(entry->fv_mask[i]);
}
if (rm->n_grp_count > 1) {
@@ -5055,7 +5057,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
}
entry->chain_idx = chain_idx;
- buf[recps].content.result_indx =
+ content->result_indx =
ICE_AQ_RECIPE_RESULT_EN |
((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
ICE_AQ_RECIPE_RESULT_DATA_M);
@@ -5069,7 +5071,13 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
ICE_MAX_NUM_RECIPES);
set_bit(buf[recps].recipe_indx,
(unsigned long *)buf[recps].recipe_bitmap);
- buf[recps].content.act_ctrl_fwd_priority = rm->priority;
+ content->act_ctrl_fwd_priority = rm->priority;
+
+ if (rm->need_pass_l2)
+ content->act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
+
+ if (rm->allow_pass_l2)
+ content->act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
recps++;
}
@@ -5107,9 +5115,11 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
if (status)
goto err_unroll;
+ content = &buf[recps].content;
+
buf[recps].recipe_indx = (u8)rid;
- buf[recps].content.rid = (u8)rid;
- buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
+ content->rid = (u8)rid;
+ content->rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
/* the new entry created should also be part of rg_list to
* make sure we have complete recipe
*/
@@ -5121,16 +5131,13 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
goto err_unroll;
}
last_chain_entry->rid = rid;
- memset(&buf[recps].content.lkup_indx, 0,
- sizeof(buf[recps].content.lkup_indx));
+ memset(&content->lkup_indx, 0, sizeof(content->lkup_indx));
/* All recipes use look-up index 0 to match switch ID. */
- buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
- buf[recps].content.mask[0] =
- cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
+ content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
- buf[recps].content.lkup_indx[i] =
- ICE_AQ_RECIPE_LKUP_IGNORE;
- buf[recps].content.mask[i] = 0;
+ content->lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE;
+ content->mask[i] = 0;
}
i = 1;
@@ -5142,8 +5149,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
list_for_each_entry(entry, &rm->rg_list, l_entry) {
last_chain_entry->fv_idx[i] = entry->chain_idx;
- buf[recps].content.lkup_indx[i] = entry->chain_idx;
- buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
+ content->lkup_indx[i] = entry->chain_idx;
+ content->mask[i++] = cpu_to_le16(0xFFFF);
set_bit(entry->rid, rm->r_bitmap);
}
list_add(&last_chain_entry->l_entry, &rm->rg_list);
@@ -5155,7 +5162,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
status = -EINVAL;
goto err_unroll;
}
- buf[recps].content.act_ctrl_fwd_priority = rm->priority;
+ content->act_ctrl_fwd_priority = rm->priority;
recps++;
rm->root_rid = (u8)rid;
@@ -5220,6 +5227,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
recp->n_grp_count = rm->n_grp_count;
recp->tun_type = rm->tun_type;
+ recp->need_pass_l2 = rm->need_pass_l2;
+ recp->allow_pass_l2 = rm->allow_pass_l2;
recp->recp_created = true;
}
rm->root_buf = buf;
@@ -5388,6 +5397,9 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* set the recipe priority if specified */
rm->priority = (u8)rinfo->priority;
+ rm->need_pass_l2 = rinfo->need_pass_l2;
+ rm->allow_pass_l2 = rinfo->allow_pass_l2;
+
/* Find offsets from the field vector. Pick the first one for all the
* recipes.
*/
@@ -5403,7 +5415,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
/* Look for a recipe which matches our requested fv / mask list */
- *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
+ *rid = ice_find_recp(hw, lkup_exts, rinfo);
if (*rid < ICE_MAX_NUM_RECIPES)
/* Success if found a recipe that match the existing criteria */
goto err_unroll;
@@ -5839,7 +5851,9 @@ static bool ice_rules_equal(const struct ice_adv_rule_info *first,
return first->sw_act.flag == second->sw_act.flag &&
first->tun_type == second->tun_type &&
first->vlan_type == second->vlan_type &&
- first->src_vsi == second->src_vsi;
+ first->src_vsi == second->src_vsi &&
+ first->need_pass_l2 == second->need_pass_l2 &&
+ first->allow_pass_l2 == second->allow_pass_l2;
}
/**
@@ -5994,14 +6008,21 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup)
{
lkup->type = ICE_HW_METADATA;
- lkup->m_u.metadata.flags[ICE_PKT_FLAGS_TUNNEL] =
+ lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID21] |=
cpu_to_be16(ICE_PKT_TUNNEL_MASK);
}
+void ice_rule_add_direction_metadata(struct ice_adv_lkup_elem *lkup)
+{
+ lkup->type = ICE_HW_METADATA;
+ lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID20] |=
+ cpu_to_be16(ICE_PKT_FROM_NETWORK);
+}
+
void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup)
{
lkup->type = ICE_HW_METADATA;
- lkup->m_u.metadata.flags[ICE_PKT_FLAGS_VLAN] =
+ lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID20] |=
cpu_to_be16(ICE_PKT_VLAN_MASK);
}
@@ -6078,7 +6099,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
- rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) {
+ rinfo->sw_act.fltr_act == ICE_DROP_PACKET ||
+ rinfo->sw_act.fltr_act == ICE_NOP)) {
status = -EIO;
goto free_pkt_profile;
}
@@ -6089,7 +6111,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
goto free_pkt_profile;
}
- if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
+ if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
+ rinfo->sw_act.fltr_act == ICE_NOP)
rinfo->sw_act.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle);
@@ -6159,6 +6182,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
ICE_SINGLE_ACT_VALID_BIT;
break;
+ case ICE_NOP:
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
+ rinfo->sw_act.fwd_id.hw_vsi_id);
+ act &= ~ICE_SINGLE_ACT_VALID_BIT;
+ break;
default:
status = -EIO;
goto err_ice_add_adv_rule;
@@ -6439,7 +6467,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
return -EIO;
}
- rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
+ rid = ice_find_recp(hw, &lkup_exts, rinfo);
/* If did not find a recipe that match the existing criteria */
if (rid == ICE_MAX_NUM_RECIPES)
return -EINVAL;
@@ -6533,59 +6561,6 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
}
/**
- * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
- * given VSI handle
- * @hw: pointer to the hardware structure
- * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
- *
- * This function is used to remove all the rules for a given VSI and as soon
- * as removing a rule fails, it will return immediately with the error code,
- * else it will return success.
- */
-int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
-{
- struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
- struct ice_vsi_list_map_info *map_info;
- struct ice_adv_rule_info rinfo;
- struct list_head *list_head;
- struct ice_switch_info *sw;
- int status;
- u8 rid;
-
- sw = hw->switch_info;
- for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
- if (!sw->recp_list[rid].recp_created)
- continue;
- if (!sw->recp_list[rid].adv_rule)
- continue;
-
- list_head = &sw->recp_list[rid].filt_rules;
- list_for_each_entry_safe(list_itr, tmp_entry, list_head,
- list_entry) {
- rinfo = list_itr->rule_info;
-
- if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
- map_info = list_itr->vsi_list_info;
- if (!map_info)
- continue;
-
- if (!test_bit(vsi_handle, map_info->vsi_map))
- continue;
- } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
- continue;
- }
-
- rinfo.sw_act.vsi_handle = vsi_handle;
- status = ice_rem_adv_rule(hw, list_itr->lkups,
- list_itr->lkups_cnt, &rinfo);
- if (status)
- return status;
- }
- }
- return 0;
-}
-
-/**
* ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
* @hw: pointer to the hardware structure
* @vsi_handle: driver VSI handle
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index c84b56fe84a5..db7e501b7e0a 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -22,6 +22,16 @@
#define ICE_PROFID_IPV6_GTPU_TEID 46
#define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70
+#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n))
+#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))
+#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s) \
+ ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
+#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s) \
+ ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
+#define ICE_SW_RULE_LG_ACT_SIZE(s, n) struct_size((s), act, (n))
+
+#define DUMMY_ETH_HDR_LEN 16
+
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
u16 vsi_num;
@@ -191,6 +201,8 @@ struct ice_adv_rule_info {
u16 vlan_type;
u16 fltr_rule_id;
u32 priority;
+ u16 need_pass_l2:1;
+ u16 allow_pass_l2:1;
u16 src_vsi;
struct ice_sw_act_ctrl sw_act;
struct ice_adv_rule_flags_info flags_info;
@@ -254,6 +266,9 @@ struct ice_sw_recipe {
*/
u8 priority;
+ u8 need_pass_l2:1;
+ u8 allow_pass_l2:1;
+
struct list_head rg_list;
/* AQ buffer associated with this recipe */
@@ -340,9 +355,11 @@ ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id);
+int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id);
/* Switch/bridge related commands */
void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup);
+void ice_rule_add_direction_metadata(struct ice_adv_lkup_elem *lkup);
void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup);
void ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem *lkup);
int
@@ -354,7 +371,6 @@ int ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
int ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
int ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
-bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle);
bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle);
int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
@@ -379,7 +395,6 @@ int
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc);
-int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle);
int
ice_rem_adv_rule_by_id(struct ice_hw *hw,
struct ice_rule_query_data *remove_entry);
@@ -389,6 +404,7 @@ u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
+void ice_fill_eth_hdr(u8 *eth_hdr);
int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
@@ -397,4 +413,21 @@ int
ice_update_recipe_lkup_idx(struct ice_hw *hw,
struct ice_update_recipe_lkup_idx_params *params);
void ice_change_proto_id_to_dvm(void);
+struct ice_vsi_list_map_info *
+ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
+ u16 *vsi_list_id);
+int ice_alloc_recipe(struct ice_hw *hw, u16 *rid);
+int ice_aq_get_recipe(struct ice_hw *hw,
+ struct ice_aqc_recipe_data_elem *s_recipe_list,
+ u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd);
+int ice_aq_add_recipe(struct ice_hw *hw,
+ struct ice_aqc_recipe_data_elem *s_recipe_list,
+ u16 num_recipes, struct ice_sq_cd *cd);
+int
+ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ struct ice_sq_cd *cd);
+int
+ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ struct ice_sq_cd *cd);
+
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 4a34ef5f58d3..37b54db91df2 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -7,6 +7,8 @@
#include "ice_lib.h"
#include "ice_protocol_type.h"
+#define ICE_TC_METADATA_LKUP_IDX 0
+
/**
* ice_tc_count_lkups - determine lookup count for switch filter
* @flags: TC-flower flags
@@ -19,7 +21,13 @@ static int
ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
struct ice_tc_flower_fltr *fltr)
{
- int lkups_cnt = 0;
+ int lkups_cnt = 1; /* 0th lookup is metadata */
+
+ /* Always add metadata as the 0th lookup. Included elements:
+ * - Direction flag (always present)
+ * - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
+ * - Tunnel flag (present if tunnel)
+ */
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
lkups_cnt++;
@@ -54,10 +62,6 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO))
lkups_cnt++;
- /* is VLAN TPID specified */
- if (flags & ICE_TC_FLWR_FIELD_VLAN_TPID)
- lkups_cnt++;
-
/* is CVLAN specified? */
if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO))
lkups_cnt++;
@@ -84,10 +88,6 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
ICE_TC_FLWR_FIELD_SRC_L4_PORT))
lkups_cnt++;
- /* matching for tunneled packets in metadata */
- if (fltr->tunnel_type != TNL_LAST)
- lkups_cnt++;
-
return lkups_cnt;
}
@@ -176,10 +176,9 @@ static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid)
static int
ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
- struct ice_adv_lkup_elem *list)
+ struct ice_adv_lkup_elem *list, int i)
{
struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers;
- int i = 0;
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) {
u32 tenant_id;
@@ -329,8 +328,7 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
}
/* always fill matching on tunneled packets in metadata */
- ice_rule_add_tunnel_metadata(&list[i]);
- i++;
+ ice_rule_add_tunnel_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
return i;
}
@@ -358,13 +356,16 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
bool inner = false;
u16 vlan_tpid = 0;
- int i = 0;
+ int i = 1; /* 0th lookup is metadata */
rule_info->vlan_type = vlan_tpid;
+ /* Always add direction metadata */
+ ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
+
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
if (tc_fltr->tunnel_type != TNL_LAST) {
- i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list);
+ i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
headers = &tc_fltr->inner_headers;
inner = true;
@@ -431,8 +432,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
rule_info->vlan_type =
ice_check_supported_vlan_tpid(vlan_tpid);
- ice_rule_add_vlan_metadata(&list[i]);
- i++;
+ ice_rule_add_vlan_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
}
if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) {
@@ -1343,24 +1343,24 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
dissector = rule->match.dissector;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_VLAN) |
- BIT(FLOW_DISSECTOR_KEY_CVLAN) |
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
- BIT(FLOW_DISSECTOR_KEY_IP) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
- BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_PPPOE) |
- BIT(FLOW_DISSECTOR_KEY_L2TPV3))) {
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PPPOE) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_L2TPV3))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
return -EOPNOTSUPP;
}
@@ -1382,10 +1382,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
*/
headers = &fltr->inner_headers;
} else if (dissector->used_keys &
- (BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
- BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
+ (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
return -EOPNOTSUPP;
} else {
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index ae98d5a8ff60..b2f5c9fe0149 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -21,6 +21,7 @@
#define _ICE_TRACE_H_
#include <linux/tracepoint.h>
+#include "ice_eswitch_br.h"
/* ice_trace() macro enables shared code to refer to trace points
* like:
@@ -240,6 +241,95 @@ DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_req);
DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_done);
DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_complete);
+DECLARE_EVENT_CLASS(ice_esw_br_fdb_template,
+ TP_PROTO(struct ice_esw_br_fdb_entry *fdb),
+ TP_ARGS(fdb),
+ TP_STRUCT__entry(__array(char, dev_name, IFNAMSIZ)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ __field(int, flags)),
+ TP_fast_assign(strscpy(__entry->dev_name,
+ netdev_name(fdb->dev),
+ IFNAMSIZ);
+ memcpy(__entry->addr, fdb->data.addr, ETH_ALEN);
+ __entry->vid = fdb->data.vid;
+ __entry->flags = fdb->flags;),
+ TP_printk("net_device=%s addr=%pM vid=%u flags=%x",
+ __entry->dev_name,
+ __entry->addr,
+ __entry->vid,
+ __entry->flags)
+);
+
+DEFINE_EVENT(ice_esw_br_fdb_template,
+ ice_eswitch_br_fdb_entry_create,
+ TP_PROTO(struct ice_esw_br_fdb_entry *fdb),
+ TP_ARGS(fdb)
+);
+
+DEFINE_EVENT(ice_esw_br_fdb_template,
+ ice_eswitch_br_fdb_entry_find_and_delete,
+ TP_PROTO(struct ice_esw_br_fdb_entry *fdb),
+ TP_ARGS(fdb)
+);
+
+DECLARE_EVENT_CLASS(ice_esw_br_vlan_template,
+ TP_PROTO(struct ice_esw_br_vlan *vlan),
+ TP_ARGS(vlan),
+ TP_STRUCT__entry(__field(u16, vid)
+ __field(u16, flags)),
+ TP_fast_assign(__entry->vid = vlan->vid;
+ __entry->flags = vlan->flags;),
+ TP_printk("vid=%u flags=%x",
+ __entry->vid,
+ __entry->flags)
+);
+
+DEFINE_EVENT(ice_esw_br_vlan_template,
+ ice_eswitch_br_vlan_create,
+ TP_PROTO(struct ice_esw_br_vlan *vlan),
+ TP_ARGS(vlan)
+);
+
+DEFINE_EVENT(ice_esw_br_vlan_template,
+ ice_eswitch_br_vlan_cleanup,
+ TP_PROTO(struct ice_esw_br_vlan *vlan),
+ TP_ARGS(vlan)
+);
+
+#define ICE_ESW_BR_PORT_NAME_L 16
+
+DECLARE_EVENT_CLASS(ice_esw_br_port_template,
+ TP_PROTO(struct ice_esw_br_port *port),
+ TP_ARGS(port),
+ TP_STRUCT__entry(__field(u16, vport_num)
+ __array(char, port_type, ICE_ESW_BR_PORT_NAME_L)),
+ TP_fast_assign(__entry->vport_num = port->vsi_idx;
+ if (port->type == ICE_ESWITCH_BR_UPLINK_PORT)
+ strscpy(__entry->port_type,
+ "Uplink",
+ ICE_ESW_BR_PORT_NAME_L);
+ else
+ strscpy(__entry->port_type,
+ "VF Representor",
+ ICE_ESW_BR_PORT_NAME_L);),
+ TP_printk("vport_num=%u port type=%s",
+ __entry->vport_num,
+ __entry->port_type)
+);
+
+DEFINE_EVENT(ice_esw_br_port_template,
+ ice_eswitch_br_port_link,
+ TP_PROTO(struct ice_esw_br_port *port),
+ TP_ARGS(port)
+);
+
+DEFINE_EVENT(ice_esw_br_port_template,
+ ice_eswitch_br_port_unlink,
+ TP_PROTO(struct ice_esw_br_port *port),
+ TP_ARGS(port)
+);
+
/* End tracepoints */
#endif /* _ICE_TRACE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index a09556e57803..5e353b0cbe6f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -277,6 +277,8 @@ struct ice_hw_common_caps {
u8 dcb;
u8 ieee_1588;
u8 rdma;
+ u8 roce_lag;
+ u8 sriov_lag;
bool nvm_update_pending_nvm;
bool nvm_update_pending_orom;
@@ -1033,14 +1035,15 @@ enum ice_sw_fwd_act_type {
ICE_FWD_TO_Q,
ICE_FWD_TO_QGRP,
ICE_DROP_PACKET,
+ ICE_NOP,
ICE_INVAL_ACT
};
struct ice_aq_get_set_rss_lut_params {
- u16 vsi_handle; /* software VSI handle */
- u16 lut_size; /* size of the LUT buffer */
- u8 lut_type; /* type of the LUT (i.e. VSI, PF, Global) */
u8 *lut; /* input RSS LUT for set and output RSS LUT for get */
+ enum ice_lut_size lut_size; /* size of the LUT buffer */
+ enum ice_lut_type lut_type; /* type of the LUT (i.e. VSI, PF, Global) */
+ u16 vsi_handle; /* software VSI handle */
u8 global_lut_id; /* only valid when lut_type is global */
};
@@ -1142,9 +1145,6 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_SR_WORDS_IN_1KB 512
-/* Hash redirection LUT for VSI - maximum array size */
-#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4)
-
/* AQ API version for LLDP_FILTER_CONTROL */
#define ICE_FW_API_LLDP_FLTR_MAJ 1
#define ICE_FW_API_LLDP_FLTR_MIN 7
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index ea3310be8354..24e4f4d897b6 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -304,6 +304,237 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
}
/**
+ * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
+ * @vf: VF to add MAC filters for
+ * @vsi: Pointer to VSI
+ *
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds either a VLAN 0 or port VLAN based filter after reset.
+ */
+static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ int err;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
+ if (err) {
+ dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
+ } else {
+ err = ice_vsi_add_vlan_zero(vsi);
+ }
+
+ if (err) {
+ dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
+ ice_vf_is_port_vlan_ena(vf) ?
+ ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
+ return err;
+ }
+
+ err = vlan_ops->ena_rx_filtering(vsi);
+ if (err)
+ dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
+ vf->vf_id, vsi->idx, err);
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
+ * @vf: VF to re-apply the configuration for
+ *
+ * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
+ * needs to re-apply the host configured Tx rate limiting configuration.
+ */
+static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ int err;
+
+ if (WARN_ON(!vsi))
+ return -EINVAL;
+
+ if (vf->min_tx_rate) {
+ err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->min_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ if (vf->max_tx_rate) {
+ err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->max_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
+ * @vf: VF to configure trust setting for
+ */
+static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
+{
+ assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted);
+}
+
+/**
+ * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
+ * @vf: VF to add MAC filters for
+ *
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
+ */
+static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ u8 broadcast[ETH_ALEN];
+ int status;
+
+ if (WARN_ON(!vsi))
+ return -EINVAL;
+
+ if (ice_is_eswitch_mode_switchdev(vf->pf))
+ return 0;
+
+ eth_broadcast_addr(broadcast);
+ status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
+ vf->vf_id, status);
+ return status;
+ }
+
+ vf->num_mac++;
+
+ if (is_valid_ether_addr(vf->hw_lan_addr)) {
+ status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
+ ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
+ &vf->hw_lan_addr[0], vf->vf_id,
+ status);
+ return status;
+ }
+ vf->num_mac++;
+
+ ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
+ * @vsi: Pointer to VSI
+ *
+ * This function moves VSI into corresponding scheduler aggregator node
+ * based on cached value of "aggregator node info" per VSI
+ */
+static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ int status;
+
+ if (!vsi->agg_node)
+ return;
+
+ dev = ice_pf_to_dev(pf);
+ if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
+ dev_dbg(dev,
+ "agg_id %u already has reached max_num_vsis %u\n",
+ vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
+ return;
+ }
+
+ status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
+ vsi->idx, vsi->tc_cfg.ena_tc);
+ if (status)
+ dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
+ vsi->idx, vsi->agg_node->agg_id);
+ else
+ vsi->agg_node->num_vsis++;
+}
+
+/**
+ * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
+ * @vf: VF to rebuild host configuration on
+ */
+static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ return;
+
+ ice_vf_set_host_trust_cfg(vf);
+
+ if (ice_vf_rebuild_host_mac_cfg(vf))
+ dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
+ vf->vf_id);
+
+ if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
+ dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
+ vf->vf_id);
+
+ if (ice_vf_rebuild_host_tx_rate_cfg(vf))
+ dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
+ vf->vf_id);
+
+ if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
+ dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
+ vf->vf_id);
+
+ /* rebuild aggregator node config for main VF VSI */
+ ice_vf_rebuild_aggregator_node_cfg(vsi);
+}
+
+/**
+ * ice_set_vf_state_qs_dis - Set VF queues state to disabled
+ * @vf: pointer to the VF structure
+ */
+static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+{
+ /* Clear Rx/Tx enabled queues flag */
+ bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+}
+
+/**
+ * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
+ * @vf: VF to set in initialized state
+ *
+ * After this function the VF will be ready to receive/handle the
+ * VIRTCHNL_OP_GET_VF_RESOURCES message
+ */
+static void ice_vf_set_initialized(struct ice_vf *vf)
+{
+ ice_set_vf_state_qs_dis(vf);
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
+}
+
+/**
* ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
* @vf: the VF being reset
*
@@ -708,18 +939,6 @@ out_unlock:
}
/**
- * ice_set_vf_state_qs_dis - Set VF queues state to disabled
- * @vf: pointer to the VF structure
- */
-static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
-{
- /* Clear Rx/Tx enabled queues flag */
- bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
- bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
- clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-}
-
-/**
* ice_set_vf_state_dis - Set VF state to disabled
* @vf: pointer to the VF structure
*/
@@ -960,211 +1179,6 @@ bool ice_is_vf_link_up(struct ice_vf *vf)
}
/**
- * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
- * @vf: VF to configure trust setting for
- */
-static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
-{
- if (vf->trusted)
- set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
- else
- clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-}
-
-/**
- * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
- * @vf: VF to add MAC filters for
- *
- * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
- * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
- */
-static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- u8 broadcast[ETH_ALEN];
- int status;
-
- if (WARN_ON(!vsi))
- return -EINVAL;
-
- if (ice_is_eswitch_mode_switchdev(vf->pf))
- return 0;
-
- eth_broadcast_addr(broadcast);
- status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
- if (status) {
- dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
- vf->vf_id, status);
- return status;
- }
-
- vf->num_mac++;
-
- if (is_valid_ether_addr(vf->hw_lan_addr)) {
- status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
- ICE_FWD_TO_VSI);
- if (status) {
- dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
- &vf->hw_lan_addr[0], vf->vf_id,
- status);
- return status;
- }
- vf->num_mac++;
-
- ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
- }
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
- * @vf: VF to add MAC filters for
- * @vsi: Pointer to VSI
- *
- * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
- * always re-adds either a VLAN 0 or port VLAN based filter after reset.
- */
-static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
-{
- struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
- struct device *dev = ice_pf_to_dev(vf->pf);
- int err;
-
- if (ice_vf_is_port_vlan_ena(vf)) {
- err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
- if (err) {
- dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
- vf->vf_id, err);
- return err;
- }
-
- err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
- } else {
- err = ice_vsi_add_vlan_zero(vsi);
- }
-
- if (err) {
- dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
- ice_vf_is_port_vlan_ena(vf) ?
- ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
- return err;
- }
-
- err = vlan_ops->ena_rx_filtering(vsi);
- if (err)
- dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
- vf->vf_id, vsi->idx, err);
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
- * @vf: VF to re-apply the configuration for
- *
- * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
- * needs to re-apply the host configured Tx rate limiting configuration.
- */
-static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- int err;
-
- if (WARN_ON(!vsi))
- return -EINVAL;
-
- if (vf->min_tx_rate) {
- err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
- if (err) {
- dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
- vf->min_tx_rate, vf->vf_id, err);
- return err;
- }
- }
-
- if (vf->max_tx_rate) {
- err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
- if (err) {
- dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
- vf->max_tx_rate, vf->vf_id, err);
- return err;
- }
- }
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
- * @vsi: Pointer to VSI
- *
- * This function moves VSI into corresponding scheduler aggregator node
- * based on cached value of "aggregator node info" per VSI
- */
-static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- int status;
-
- if (!vsi->agg_node)
- return;
-
- dev = ice_pf_to_dev(pf);
- if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
- dev_dbg(dev,
- "agg_id %u already has reached max_num_vsis %u\n",
- vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
- return;
- }
-
- status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
- vsi->idx, vsi->tc_cfg.ena_tc);
- if (status)
- dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
- vsi->idx, vsi->agg_node->agg_id);
- else
- vsi->agg_node->num_vsis++;
-}
-
-/**
- * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
- * @vf: VF to rebuild host configuration on
- */
-void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- if (WARN_ON(!vsi))
- return;
-
- ice_vf_set_host_trust_cfg(vf);
-
- if (ice_vf_rebuild_host_mac_cfg(vf))
- dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
- vf->vf_id);
-
- if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
- dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
- vf->vf_id);
-
- if (ice_vf_rebuild_host_tx_rate_cfg(vf))
- dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
- vf->vf_id);
-
- if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
- dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
- vf->vf_id);
-
- /* rebuild aggregator node config for main VF VSI */
- ice_vf_rebuild_aggregator_node_cfg(vsi);
-}
-
-/**
* ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
* @vf: VF that control VSI is being invalidated on
*/
@@ -1293,23 +1307,6 @@ void ice_vf_vsi_release(struct ice_vf *vf)
}
/**
- * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
- * @vf: VF to set in initialized state
- *
- * After this function the VF will be ready to receive/handle the
- * VIRTCHNL_OP_GET_VF_RESOURCES message
- */
-void ice_vf_set_initialized(struct ice_vf *vf)
-{
- ice_set_vf_state_qs_dis(vf);
- clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
- clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
- clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
- set_bit(ICE_VF_STATE_INIT, vf->vf_states);
- memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
-}
-
-/**
* ice_get_vf_ctrl_vsi - Get first VF control VSI pointer
* @pf: the PF private structure
* @vsi: pointer to the VSI
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 6f3293b793b5..0c7e77c0a09f 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -32,13 +32,11 @@ int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable);
bool ice_is_vf_trusted(struct ice_vf *vf);
bool ice_vf_has_no_qs_ena(struct ice_vf *vf);
bool ice_is_vf_link_up(struct ice_vf *vf);
-void ice_vf_rebuild_host_cfg(struct ice_vf *vf);
void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf);
void ice_vf_ctrl_vsi_release(struct ice_vf *vf);
struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi);
void ice_vf_invalidate_vsi(struct ice_vf *vf);
void ice_vf_vsi_release(struct ice_vf *vf);
-void ice_vf_set_initialized(struct ice_vf *vf);
#endif /* _ICE_VF_LIB_PRIVATE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
index b1ffb81893d4..d7b10dc67f03 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -21,6 +21,99 @@ noop_vlan(struct ice_vsi __always_unused *vsi)
return 0;
}
+static void ice_port_vlan_on(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vsi->back;
+
+ if (ice_is_dvm_ena(&pf->hw)) {
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ /* setup outer VLAN ops */
+ vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+ vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
+ vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
+
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
+ vlan_ops->add_vlan = noop_vlan_arg;
+ vlan_ops->del_vlan = noop_vlan_arg;
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ } else {
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
+ vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
+ vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
+ }
+ vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
+}
+
+static void ice_port_vlan_off(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vsi->back;
+
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+
+ if (ice_is_dvm_ena(&pf->hw)) {
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+ } else {
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ }
+
+ if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
+ vlan_ops->ena_rx_filtering = noop_vlan;
+ else
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+}
+
+/**
+ * ice_vf_vsi_enable_port_vlan - Set VSI VLAN ops to support port VLAN
+ * @vsi: VF's VSI being configured
+ *
+ * The function won't create port VLAN, it only allows to create port VLAN
+ * using VLAN ops on the VF VSI.
+ */
+void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi)
+{
+ if (WARN_ON_ONCE(!vsi->vf))
+ return;
+
+ ice_port_vlan_on(vsi);
+}
+
+/**
+ * ice_vf_vsi_disable_port_vlan - Clear VSI support for creating port VLAN
+ * @vsi: VF's VSI being configured
+ *
+ * The function should be called after removing port VLAN on VSI
+ * (using VLAN ops)
+ */
+void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi)
+{
+ if (WARN_ON_ONCE(!vsi->vf))
+ return;
+
+ ice_port_vlan_off(vsi);
+}
+
/**
* ice_vf_vsi_init_vlan_ops - Initialize default VSI VLAN ops for VF VSI
* @vsi: VF's VSI being configured
@@ -39,91 +132,18 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
if (WARN_ON(!vf))
return;
- if (ice_is_dvm_ena(&pf->hw)) {
- vlan_ops = &vsi->outer_vlan_ops;
+ if (ice_vf_is_port_vlan_ena(vf))
+ ice_port_vlan_on(vsi);
+ else
+ ice_port_vlan_off(vsi);
- /* outer VLAN ops regardless of port VLAN config */
- vlan_ops->add_vlan = ice_vsi_add_vlan;
- vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
- vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
-
- if (ice_vf_is_port_vlan_ena(vf)) {
- /* setup outer VLAN ops */
- vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
- /* all Rx traffic should be in the domain of the
- * assigned port VLAN, so prevent disabling Rx VLAN
- * filtering
- */
- vlan_ops->dis_rx_filtering = noop_vlan;
- vlan_ops->ena_rx_filtering =
- ice_vsi_ena_rx_vlan_filtering;
-
- /* setup inner VLAN ops */
- vlan_ops = &vsi->inner_vlan_ops;
- vlan_ops->add_vlan = noop_vlan_arg;
- vlan_ops->del_vlan = noop_vlan_arg;
- vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
- vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
- vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
- vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
- } else {
- vlan_ops->dis_rx_filtering =
- ice_vsi_dis_rx_vlan_filtering;
-
- if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
- vlan_ops->ena_rx_filtering = noop_vlan;
- else
- vlan_ops->ena_rx_filtering =
- ice_vsi_ena_rx_vlan_filtering;
-
- vlan_ops->del_vlan = ice_vsi_del_vlan;
- vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping;
- vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
- vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion;
- vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
-
- /* setup inner VLAN ops */
- vlan_ops = &vsi->inner_vlan_ops;
-
- vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
- vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
- vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
- vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
- }
- } else {
- vlan_ops = &vsi->inner_vlan_ops;
+ vlan_ops = ice_is_dvm_ena(&pf->hw) ?
+ &vsi->outer_vlan_ops : &vsi->inner_vlan_ops;
- /* inner VLAN ops regardless of port VLAN config */
- vlan_ops->add_vlan = ice_vsi_add_vlan;
- vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
- vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
- vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
-
- if (ice_vf_is_port_vlan_ena(vf)) {
- vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
- vlan_ops->ena_rx_filtering =
- ice_vsi_ena_rx_vlan_filtering;
- /* all Rx traffic should be in the domain of the
- * assigned port VLAN, so prevent disabling Rx VLAN
- * filtering
- */
- vlan_ops->dis_rx_filtering = noop_vlan;
- } else {
- vlan_ops->dis_rx_filtering =
- ice_vsi_dis_rx_vlan_filtering;
- if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
- vlan_ops->ena_rx_filtering = noop_vlan;
- else
- vlan_ops->ena_rx_filtering =
- ice_vsi_ena_rx_vlan_filtering;
-
- vlan_ops->del_vlan = ice_vsi_del_vlan;
- vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
- vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
- vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
- vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
- }
- }
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
+ vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
index 875a4e615f39..df8aa09df3e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
@@ -13,7 +13,11 @@ void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi);
#ifdef CONFIG_PCI_IOV
void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi);
+void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi);
+void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi);
#else
static inline void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) { }
+static inline void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi) { }
+static inline void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi) { }
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_PF_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index dcf628b1fccd..b03426ac932b 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -428,7 +428,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
goto err;
}
- len = sizeof(struct virtchnl_vf_resource);
+ len = virtchnl_struct_size(vfres, vsi_res, 0);
vfres = kzalloc(len, GFP_KERNEL);
if (!vfres) {
@@ -500,7 +500,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->num_queue_pairs = vsi->num_txq;
vfres->max_vectors = vf->pf->vfs.num_msix_per;
vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
- vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ vfres->rss_lut_size = ICE_LUT_VSI_SIZE;
vfres->max_mtu = ice_vc_get_max_frame_size(vf);
vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
@@ -962,7 +962,7 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
+ if (vrl->lut_entries != ICE_LUT_VSI_SIZE) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -978,7 +978,7 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
+ if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE))
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
@@ -1724,6 +1724,8 @@ error_param:
vf->vf_id, i);
}
+ ice_lag_move_new_vf_nodes(vf);
+
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
index 5b4a0abb4607..76266e709a39 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -202,6 +202,24 @@ int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi)
return ice_vsi_manage_vlan_insertion(vsi);
}
+static void
+ice_save_vlan_info(struct ice_aqc_vsi_props *info,
+ struct ice_vsi_vlan_info *vlan)
+{
+ vlan->sw_flags2 = info->sw_flags2;
+ vlan->inner_vlan_flags = info->inner_vlan_flags;
+ vlan->outer_vlan_flags = info->outer_vlan_flags;
+}
+
+static void
+ice_restore_vlan_info(struct ice_aqc_vsi_props *info,
+ struct ice_vsi_vlan_info *vlan)
+{
+ info->sw_flags2 = vlan->sw_flags2;
+ info->inner_vlan_flags = vlan->inner_vlan_flags;
+ info->outer_vlan_flags = vlan->outer_vlan_flags;
+}
+
/**
* __ice_vsi_set_inner_port_vlan - set port VLAN VSI context settings to enable a port VLAN
* @vsi: the VSI to update
@@ -218,6 +236,7 @@ static int __ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, u16 pvid_info)
if (!ctxt)
return -ENOMEM;
+ ice_save_vlan_info(&vsi->info, &vsi->vlan_info);
ctxt->info = vsi->info;
info = &ctxt->info;
info->inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED |
@@ -259,6 +278,33 @@ int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
return __ice_vsi_set_inner_port_vlan(vsi, port_vlan_info);
}
+int ice_vsi_clear_inner_port_vlan(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_aqc_vsi_props *info;
+ struct ice_vsi_ctx *ctxt;
+ int ret;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ice_restore_vlan_info(&vsi->info, &vsi->vlan_info);
+ vsi->info.port_based_inner_vlan = 0;
+ ctxt->info = vsi->info;
+ info = &ctxt->info;
+ info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret)
+ dev_err(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
+
+ kfree(ctxt);
+ return ret;
+}
+
/**
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
@@ -647,6 +693,7 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
if (!ctxt)
return -ENOMEM;
+ ice_save_vlan_info(&vsi->info, &vsi->vlan_info);
ctxt->info = vsi->info;
ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
@@ -689,9 +736,6 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
* used if DVM is supported. Also, this function should never be called directly
* as it should be part of ice_vsi_vlan_ops if it's needed.
*
- * This function does not support clearing the port VLAN as there is currently
- * no use case for this.
- *
* Use the ice_vlan structure passed in to set this VSI in a port VLAN.
*/
int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
@@ -705,3 +749,37 @@ int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
return __ice_vsi_set_outer_port_vlan(vsi, port_vlan_info, vlan->tpid);
}
+
+/**
+ * ice_vsi_clear_outer_port_vlan - clear outer port vlan
+ * @vsi: VSI to configure
+ *
+ * The function is restoring previously set vlan config (saved in
+ * vsi->vlan_info). Setting happens in port vlan configuration.
+ */
+int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ice_restore_vlan_info(&vsi->info, &vsi->vlan_info);
+ vsi->info.port_based_outer_vlan = 0;
+ ctxt->info = vsi->info;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing outer port based VLAN failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+
+ kfree(ctxt);
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
index f459909490ec..f0d84d11bd5b 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
@@ -7,6 +7,12 @@
#include <linux/types.h>
#include "ice_vlan.h"
+struct ice_vsi_vlan_info {
+ u8 sw_flags2;
+ u8 inner_vlan_flags;
+ u8 outer_vlan_flags;
+};
+
struct ice_vsi;
int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
@@ -17,6 +23,7 @@ int ice_vsi_dis_inner_stripping(struct ice_vsi *vsi);
int ice_vsi_ena_inner_insertion(struct ice_vsi *vsi, u16 tpid);
int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi);
int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+int ice_vsi_clear_inner_port_vlan(struct ice_vsi *vsi);
int ice_vsi_ena_rx_vlan_filtering(struct ice_vsi *vsi);
int ice_vsi_dis_rx_vlan_filtering(struct ice_vsi *vsi);
@@ -28,5 +35,6 @@ int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi);
int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid);
int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi);
int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi);
#endif /* _ICE_VSI_VLAN_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
index 5b47568f6256..b2d2330dedcb 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
@@ -21,6 +21,7 @@ struct ice_vsi_vlan_ops {
int (*ena_tx_filtering)(struct ice_vsi *vsi);
int (*dis_tx_filtering)(struct ice_vsi *vsi);
int (*set_port_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan);
+ int (*clear_port_vlan)(struct ice_vsi *vsi);
};
void ice_vsi_init_vlan_ops(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index a7fe2b4ce655..2a3f0834e139 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -546,19 +546,6 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
}
/**
- * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
- * @rx_ring: Rx ring
- */
-static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
-{
- int ntc = rx_ring->next_to_clean + 1;
-
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
- prefetch(ICE_RX_DESC(rx_ring, ntc));
-}
-
-/**
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
* @rx_ring: Rx ring
* @xdp: Pointer to XDP buffer
@@ -572,8 +559,14 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
+ struct skb_shared_info *sinfo = NULL;
struct sk_buff *skb;
+ u32 nr_frags = 0;
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
net_prefetch(xdp->data_meta);
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
@@ -589,6 +582,29 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
__skb_pull(skb, metasize);
}
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
+ for (int i = 0; i < nr_frags; i++) {
+ struct skb_shared_info *skinfo = skb_shinfo(skb);
+ skb_frag_t *frag = &sinfo->frags[i];
+ struct page *page;
+ void *addr;
+
+ page = dev_alloc_page();
+ if (!page) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ addr = page_to_virt(page);
+
+ memcpy(addr, skb_frag_page(frag), skb_frag_size(frag));
+
+ __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++,
+ addr, 0, skb_frag_size(frag));
+ }
+
+out:
xsk_buff_free(xdp);
return skb;
}
@@ -597,7 +613,7 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
* ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
* @xdp_ring: XDP Tx ring
*/
-static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
{
u16 ntc = xdp_ring->next_to_clean;
struct ice_tx_desc *tx_desc;
@@ -619,7 +635,7 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
}
if (!completed_frames)
- return;
+ return 0;
if (likely(!xdp_ring->xdp_tx_active)) {
xsk_frames = completed_frames;
@@ -649,6 +665,8 @@ skip:
xdp_ring->next_to_clean -= cnt;
if (xsk_frames)
xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+
+ return completed_frames;
}
/**
@@ -666,37 +684,72 @@ skip:
static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
struct ice_tx_ring *xdp_ring)
{
+ struct skb_shared_info *sinfo = NULL;
u32 size = xdp->data_end - xdp->data;
u32 ntu = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
- dma_addr_t dma;
-
- if (ICE_DESC_UNUSED(xdp_ring) < ICE_RING_QUARTER(xdp_ring)) {
- ice_clean_xdp_irq_zc(xdp_ring);
- if (!ICE_DESC_UNUSED(xdp_ring)) {
- xdp_ring->ring_stats->tx_stats.tx_busy++;
- return ICE_XDP_CONSUMED;
- }
+ struct xdp_buff *head;
+ u32 nr_frags = 0;
+ u32 free_space;
+ u32 frag = 0;
+
+ free_space = ICE_DESC_UNUSED(xdp_ring);
+ if (free_space < ICE_RING_QUARTER(xdp_ring))
+ free_space += ice_clean_xdp_irq_zc(xdp_ring);
+
+ if (unlikely(!free_space))
+ goto busy;
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ if (free_space < nr_frags + 1)
+ goto busy;
}
- dma = xsk_buff_xdp_get_dma(xdp);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
-
- tx_buf = &xdp_ring->tx_buf[ntu];
- tx_buf->xdp = xdp;
- tx_buf->type = ICE_TX_BUF_XSK_TX;
tx_desc = ICE_TX_DESC(xdp_ring, ntu);
- tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
- 0, size, 0);
- xdp_ring->xdp_tx_active++;
+ tx_buf = &xdp_ring->tx_buf[ntu];
+ head = xdp;
+
+ for (;;) {
+ dma_addr_t dma;
+
+ dma = xsk_buff_xdp_get_dma(xdp);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
+
+ tx_buf->xdp = xdp;
+ tx_buf->type = ICE_TX_BUF_XSK_TX;
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
+ /* account for each xdp_buff from xsk_buff_pool */
+ xdp_ring->xdp_tx_active++;
+
+ if (++ntu == xdp_ring->count)
+ ntu = 0;
+
+ if (frag == nr_frags)
+ break;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_buf = &xdp_ring->tx_buf[ntu];
+
+ xdp = xsk_buff_get_frag(head);
+ size = skb_frag_size(&sinfo->frags[frag]);
+ frag++;
+ }
- if (++ntu == xdp_ring->count)
- ntu = 0;
xdp_ring->next_to_use = ntu;
+ /* update last descriptor from a frame with EOP */
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S);
return ICE_XDP_TX;
+
+busy:
+ xdp_ring->ring_stats->tx_stats.tx_busy++;
+
+ return ICE_XDP_CONSUMED;
}
/**
@@ -752,6 +805,34 @@ out_failure:
return result;
}
+static int
+ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
+ struct xdp_buff *xdp, const unsigned int size)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
+
+ if (!size)
+ return 0;
+
+ if (!xdp_buff_has_frags(first)) {
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(first);
+ }
+
+ if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
+ xsk_buff_free(first);
+ return -ENOMEM;
+ }
+
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
+ virt_to_page(xdp->data_hard_start), 0, size);
+ sinfo->xdp_frags_size += size;
+ xsk_buff_add_frag(xdp);
+
+ return 0;
+}
+
/**
* ice_clean_rx_irq_zc - consumes packets from the hardware ring
* @rx_ring: AF_XDP Rx ring
@@ -762,9 +843,14 @@ out_failure:
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool;
+ u32 ntc = rx_ring->next_to_clean;
+ u32 ntu = rx_ring->next_to_use;
+ struct xdp_buff *first = NULL;
struct ice_tx_ring *xdp_ring;
unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
+ u32 cnt = rx_ring->count;
bool failure = false;
int entries_to_alloc;
@@ -774,6 +860,9 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
xdp_ring = rx_ring->xdp_ring;
+ if (ntc != rx_ring->first_desc)
+ first = *ice_xdp_buf(rx_ring, rx_ring->first_desc);
+
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
unsigned int size, xdp_res = 0;
@@ -783,7 +872,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
u16 vlan_tag = 0;
u16 rx_ptype;
- rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ rx_desc = ICE_RX_DESC(rx_ring, ntc);
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
@@ -795,51 +884,61 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
*/
dma_rmb();
- if (unlikely(rx_ring->next_to_clean == rx_ring->next_to_use))
+ if (unlikely(ntc == ntu))
break;
- xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
+ xdp = *ice_xdp_buf(rx_ring, ntc);
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
- if (!size) {
- xdp->data = NULL;
- xdp->data_end = NULL;
- xdp->data_hard_start = NULL;
- xdp->data_meta = NULL;
- goto construct_skb;
- }
xsk_buff_set_size(xdp, size);
- xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(xdp, xsk_pool);
+
+ if (!first) {
+ first = xdp;
+ xdp_buff_clear_frags_flag(first);
+ } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
+ break;
+ }
+
+ if (++ntc == cnt)
+ ntc = 0;
- xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
+ if (ice_is_non_eop(rx_ring, rx_desc))
+ continue;
+
+ xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring);
if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
xdp_xmit |= xdp_res;
} else if (xdp_res == ICE_XDP_EXIT) {
failure = true;
+ first = NULL;
+ rx_ring->first_desc = ntc;
break;
} else if (xdp_res == ICE_XDP_CONSUMED) {
- xsk_buff_free(xdp);
+ xsk_buff_free(first);
} else if (xdp_res == ICE_XDP_PASS) {
goto construct_skb;
}
- total_rx_bytes += size;
+ total_rx_bytes += xdp_get_buff_len(first);
total_rx_packets++;
- ice_bump_ntc(rx_ring);
+ first = NULL;
+ rx_ring->first_desc = ntc;
continue;
construct_skb:
/* XDP_PASS path */
- skb = ice_construct_skb_zc(rx_ring, xdp);
+ skb = ice_construct_skb_zc(rx_ring, first);
if (!skb) {
rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
break;
}
- ice_bump_ntc(rx_ring);
+ first = NULL;
+ rx_ring->first_desc = ntc;
if (eth_skb_pad(skb)) {
skb = NULL;
@@ -858,18 +957,22 @@ construct_skb:
ice_receive_skb(rx_ring, skb, vlan_tag);
}
- entries_to_alloc = ICE_DESC_UNUSED(rx_ring);
+ rx_ring->next_to_clean = ntc;
+ entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring);
if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
- if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
- if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
- xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
+ if (xsk_uses_need_wakeup(xsk_pool)) {
+ /* ntu could have changed when allocating entries above, so
+ * use rx_ring value instead of stack based one
+ */
+ if (failure || ntc == rx_ring->next_to_use)
+ xsk_set_rx_need_wakeup(xsk_pool);
else
- xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
+ xsk_clear_rx_need_wakeup(xsk_pool);
return (int)total_rx_packets;
}
@@ -894,7 +997,7 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(desc),
0, desc->len, 0);
*total_bytes += desc->len;
@@ -921,7 +1024,7 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(&descs[i]),
0, descs[i].len, 0);
*total_bytes += descs[i].len;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 08e3df37089f..1ab787ed254d 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2615,10 +2615,10 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
struct netlink_ext_ack *extack = f->common.extack;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_VLAN))) {
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) {
NL_SET_ERR_MSG_MOD(extack,
"Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 38901d2a4680..8ebe6999a528 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -15,6 +15,7 @@
#include <linux/net_tstamp.h>
#include <linux/bitfield.h>
#include <linux/hrtimer.h>
+#include <net/xdp.h>
#include "igc_hw.h"
@@ -37,6 +38,8 @@ void igc_ethtool_set_ops(struct net_device *);
#define MAX_FLEX_FILTER 32
+#define IGC_MAX_TX_TSTAMP_REGS 4
+
enum igc_mac_filter_type {
IGC_MAC_FILTER_TYPE_DST = 0,
IGC_MAC_FILTER_TYPE_SRC
@@ -69,6 +72,15 @@ struct igc_rx_packet_stats {
u64 other_packets;
};
+struct igc_tx_timestamp_request {
+ struct sk_buff *skb; /* reference to the packet being timestamped */
+ unsigned long start; /* when the tstamp request started (jiffies) */
+ u32 mask; /* _TSYNCTXCTL_TXTT_{X} bit for this request */
+ u32 regl; /* which TXSTMPL_{X} register should be used */
+ u32 regh; /* which TXSTMPH_{X} register should be used */
+ u32 flags; /* flags that should be added to the tx_buffer */
+};
+
struct igc_ring_container {
struct igc_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
@@ -244,9 +256,8 @@ struct igc_adapter {
* ptp_tx_lock.
*/
spinlock_t ptp_tx_lock;
- struct sk_buff *ptp_tx_skb;
+ struct igc_tx_timestamp_request tx_tstamp[IGC_MAX_TX_TSTAMP_REGS];
struct hwtstamp_config tstamp_config;
- unsigned long ptp_tx_start;
unsigned int ptp_flags;
/* System time value lock */
spinlock_t tmreg_lock;
@@ -454,6 +465,10 @@ enum igc_tx_flags {
/* olinfo flags */
IGC_TX_FLAGS_IPV4 = 0x10,
IGC_TX_FLAGS_CSUM = 0x20,
+
+ IGC_TX_FLAGS_TSTAMP_1 = 0x100,
+ IGC_TX_FLAGS_TSTAMP_2 = 0x200,
+ IGC_TX_FLAGS_TSTAMP_3 = 0x400,
};
enum igc_boards {
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index 9f3827eda157..f7d6491d4c60 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -34,6 +34,9 @@ struct igc_adv_tx_context_desc {
/* Adv Transmit Descriptor Config Masks */
#define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */
+#define IGC_ADVTXD_TSTAMP_REG_1 0x00010000 /* Select register 1 for timestamp */
+#define IGC_ADVTXD_TSTAMP_REG_2 0x00020000 /* Select register 2 for timestamp */
+#define IGC_ADVTXD_TSTAMP_REG_3 0x00030000 /* Select register 3 for timestamp */
#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 2f780cc90883..b3037016f31d 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -454,6 +454,9 @@
/* Time Sync Transmit Control bit definitions */
#define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */
+#define IGC_TSYNCTXCTL_TXTT_1 0x00000002 /* Tx timestamp reg 1 valid */
+#define IGC_TSYNCTXCTL_TXTT_2 0x00000004 /* Tx timestamp reg 2 valid */
+#define IGC_TSYNCTXCTL_TXTT_3 0x00000008 /* Tx timestamp reg 3 valid */
#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
#define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */
#define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */
@@ -461,6 +464,10 @@
#define IGC_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
#define IGC_TSYNCTXCTL_TXSYNSIG 0x00000020 /* Sample TX tstamp in PHY sop */
+#define IGC_TSYNCTXCTL_TXTT_ANY ( \
+ IGC_TSYNCTXCTL_TXTT_0 | IGC_TSYNCTXCTL_TXTT_1 | \
+ IGC_TSYNCTXCTL_TXTT_2 | IGC_TSYNCTXCTL_TXTT_3)
+
/* Timer selection bits */
#define IGC_AUX_IO_TIMER_SEL_SYSTIM0 (0u << 30) /* Select SYSTIM0 for auxiliary time stamp */
#define IGC_AUX_IO_TIMER_SEL_SYSTIM1 (1u << 30) /* Select SYSTIM1 for auxiliary time stamp */
@@ -549,7 +556,7 @@
#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2)
#define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8)
-#define IGC_PTM_SHORT_CYC_DEFAULT 10 /* Default Short/interrupted cycle interval */
+#define IGC_PTM_SHORT_CYC_DEFAULT 1 /* Default short cycle interval */
#define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */
#define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 6f557e843e49..293b45717683 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1271,10 +1271,21 @@ static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
(IGC_ADVTXD_DCMD_TSE));
- /* set timestamp bit if present */
+ /* set timestamp bit if present, will select the register set
+ * based on the _TSTAMP(_X) bit.
+ */
cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
(IGC_ADVTXD_MAC_TSTAMP));
+ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_1,
+ (IGC_ADVTXD_TSTAMP_REG_1));
+
+ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_2,
+ (IGC_ADVTXD_TSTAMP_REG_2));
+
+ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_3,
+ (IGC_ADVTXD_TSTAMP_REG_3));
+
/* insert frame checksum */
cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS);
@@ -1533,6 +1544,26 @@ static int igc_tso(struct igc_ring *tx_ring,
return 1;
}
+static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *skb, u32 *flags)
+{
+ int i;
+
+ for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
+ struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i];
+
+ if (tstamp->skb)
+ continue;
+
+ tstamp->skb = skb_get(skb);
+ tstamp->start = jiffies;
+ *flags = tstamp->flags;
+
+ return true;
+ }
+
+ return false;
+}
+
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
struct igc_ring *tx_ring)
{
@@ -1614,14 +1645,12 @@ done:
* timestamping request.
*/
unsigned long flags;
+ u32 tstamp_flags;
spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
- if (!adapter->ptp_tx_skb) {
+ if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- tx_flags |= IGC_TX_FLAGS_TSTAMP;
-
- adapter->ptp_tx_skb = skb_get(skb);
- adapter->ptp_tx_start = jiffies;
+ tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags;
} else {
adapter->tx_hwtstamp_skipped++;
}
@@ -6162,6 +6191,26 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
return 0;
}
+static void igc_taprio_stats(struct net_device *dev,
+ struct tc_taprio_qopt_stats *stats)
+{
+ /* When Strict_End is enabled, the tx_overruns counter
+ * will always be zero.
+ */
+ stats->tx_overruns = 0;
+}
+
+static void igc_taprio_queue_stats(struct net_device *dev,
+ struct tc_taprio_qopt_queue_stats *queue_stats)
+{
+ struct tc_taprio_qopt_stats *stats = &queue_stats->stats;
+
+ /* When Strict_End is enabled, the tx_overruns counter
+ * will always be zero.
+ */
+ stats->tx_overruns = 0;
+}
+
static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
@@ -6173,11 +6222,20 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
size_t n;
int i;
- if (qopt->cmd == TAPRIO_CMD_DESTROY)
+ switch (qopt->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ break;
+ case TAPRIO_CMD_DESTROY:
return igc_tsn_clear_schedule(adapter);
-
- if (qopt->cmd != TAPRIO_CMD_REPLACE)
+ case TAPRIO_CMD_STATS:
+ igc_taprio_stats(adapter->netdev, &qopt->stats);
+ return 0;
+ case TAPRIO_CMD_QUEUE_STATS:
+ igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
+ return 0;
+ default:
return -EOPNOTSUPP;
+ }
if (qopt->base_time < 0)
return -ERANGE;
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index f0b979a70655..928f38792203 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -558,11 +558,16 @@ static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter)
static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter)
{
unsigned long flags;
+ int i;
spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
- dev_kfree_skb_any(adapter->ptp_tx_skb);
- adapter->ptp_tx_skb = NULL;
+ for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
+ struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i];
+
+ dev_kfree_skb_any(tstamp->skb);
+ tstamp->skb = NULL;
+ }
spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
}
@@ -659,61 +664,106 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
}
/* Requires adapter->ptp_tx_lock held by caller. */
-static void igc_ptp_tx_timeout(struct igc_adapter *adapter)
+static void igc_ptp_tx_timeout(struct igc_adapter *adapter,
+ struct igc_tx_timestamp_request *tstamp)
{
- struct igc_hw *hw = &adapter->hw;
-
- dev_kfree_skb_any(adapter->ptp_tx_skb);
- adapter->ptp_tx_skb = NULL;
+ dev_kfree_skb_any(tstamp->skb);
+ tstamp->skb = NULL;
adapter->tx_hwtstamp_timeouts++;
- /* Clear the tx valid bit in TSYNCTXCTL register to enable interrupt. */
- rd32(IGC_TXSTMPH);
+
netdev_warn(adapter->netdev, "Tx timestamp timeout\n");
}
void igc_ptp_tx_hang(struct igc_adapter *adapter)
{
+ struct igc_tx_timestamp_request *tstamp;
+ struct igc_hw *hw = &adapter->hw;
unsigned long flags;
+ bool found = false;
+ int i;
spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
- if (!adapter->ptp_tx_skb)
- goto unlock;
+ for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
+ tstamp = &adapter->tx_tstamp[i];
+
+ if (!tstamp->skb)
+ continue;
- if (time_is_after_jiffies(adapter->ptp_tx_start + IGC_PTP_TX_TIMEOUT))
- goto unlock;
+ if (time_is_after_jiffies(tstamp->start + IGC_PTP_TX_TIMEOUT))
+ continue;
- igc_ptp_tx_timeout(adapter);
+ igc_ptp_tx_timeout(adapter, tstamp);
+ found = true;
+ }
+
+ if (found) {
+ /* Reading the high register of the first set of timestamp registers
+ * clears all the equivalent bits in the TSYNCTXCTL register.
+ */
+ rd32(IGC_TXSTMPH_0);
+ }
-unlock:
spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
}
+static void igc_ptp_tx_reg_to_stamp(struct igc_adapter *adapter,
+ struct igc_tx_timestamp_request *tstamp, u64 regval)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb;
+ int adjust = 0;
+
+ skb = tstamp->skb;
+ if (!skb)
+ return;
+
+ if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval))
+ return;
+
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGC_I225_TX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGC_I225_TX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGC_I225_TX_LATENCY_1000;
+ break;
+ case SPEED_2500:
+ adjust = IGC_I225_TX_LATENCY_2500;
+ break;
+ }
+
+ shhwtstamps.hwtstamp =
+ ktime_add_ns(shhwtstamps.hwtstamp, adjust);
+
+ tstamp->skb = NULL;
+
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+}
+
/**
* igc_ptp_tx_hwtstamp - utility function which checks for TX time stamp
* @adapter: Board private structure
*
- * If we were asked to do hardware stamping and such a time stamp is
- * available, then it must have been for this skb here because we only
- * allow only one such packet into the queue.
+ * Check against the ready mask for which of the timestamp register
+ * sets are ready to be retrieved, then retrieve that and notify the
+ * rest of the stack.
*
* Context: Expects adapter->ptp_tx_lock to be held by caller.
*/
static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
{
- struct sk_buff *skb = adapter->ptp_tx_skb;
- struct skb_shared_hwtstamps shhwtstamps;
struct igc_hw *hw = &adapter->hw;
- u32 tsynctxctl;
- int adjust = 0;
u64 regval;
+ u32 mask;
+ int i;
- if (WARN_ON_ONCE(!skb))
- return;
-
- tsynctxctl = rd32(IGC_TSYNCTXCTL);
- tsynctxctl &= IGC_TSYNCTXCTL_TXTT_0;
- if (tsynctxctl) {
+ mask = rd32(IGC_TSYNCTXCTL) & IGC_TSYNCTXCTL_TXTT_ANY;
+ if (mask & IGC_TSYNCTXCTL_TXTT_0) {
regval = rd32(IGC_TXSTMPL);
regval |= (u64)rd32(IGC_TXSTMPH) << 32;
} else {
@@ -742,37 +792,30 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
txstmpl_new = rd32(IGC_TXSTMPL);
if (txstmpl_old == txstmpl_new)
- return;
+ goto done;
regval = txstmpl_new;
regval |= (u64)rd32(IGC_TXSTMPH) << 32;
}
- if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval))
- return;
- switch (adapter->link_speed) {
- case SPEED_10:
- adjust = IGC_I225_TX_LATENCY_10;
- break;
- case SPEED_100:
- adjust = IGC_I225_TX_LATENCY_100;
- break;
- case SPEED_1000:
- adjust = IGC_I225_TX_LATENCY_1000;
- break;
- case SPEED_2500:
- adjust = IGC_I225_TX_LATENCY_2500;
- break;
- }
+ igc_ptp_tx_reg_to_stamp(adapter, &adapter->tx_tstamp[0], regval);
- shhwtstamps.hwtstamp =
- ktime_add_ns(shhwtstamps.hwtstamp, adjust);
+done:
+ /* Now that the problematic first register was handled, we can
+ * use retrieve the timestamps from the other registers
+ * (starting from '1') with less complications.
+ */
+ for (i = 1; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
+ struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i];
- adapter->ptp_tx_skb = NULL;
+ if (!(tstamp->mask & mask))
+ continue;
- /* Notify the stack and free the skb after we've unlocked */
- skb_tstamp_tx(skb, &shhwtstamps);
- dev_kfree_skb_any(skb);
+ regval = rd32(tstamp->regl);
+ regval |= (u64)rd32(tstamp->regh) << 32;
+
+ igc_ptp_tx_reg_to_stamp(adapter, tstamp, regval);
+ }
}
/**
@@ -788,12 +831,8 @@ void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter)
spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
- if (!adapter->ptp_tx_skb)
- goto unlock;
-
igc_ptp_tx_hwtstamp(adapter);
-unlock:
spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
}
@@ -1006,9 +1045,34 @@ static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp,
void igc_ptp_init(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct igc_tx_timestamp_request *tstamp;
struct igc_hw *hw = &adapter->hw;
int i;
+ tstamp = &adapter->tx_tstamp[0];
+ tstamp->mask = IGC_TSYNCTXCTL_TXTT_0;
+ tstamp->regl = IGC_TXSTMPL_0;
+ tstamp->regh = IGC_TXSTMPH_0;
+ tstamp->flags = 0;
+
+ tstamp = &adapter->tx_tstamp[1];
+ tstamp->mask = IGC_TSYNCTXCTL_TXTT_1;
+ tstamp->regl = IGC_TXSTMPL_1;
+ tstamp->regh = IGC_TXSTMPH_1;
+ tstamp->flags = IGC_TX_FLAGS_TSTAMP_1;
+
+ tstamp = &adapter->tx_tstamp[2];
+ tstamp->mask = IGC_TSYNCTXCTL_TXTT_2;
+ tstamp->regl = IGC_TXSTMPL_2;
+ tstamp->regh = IGC_TXSTMPH_2;
+ tstamp->flags = IGC_TX_FLAGS_TSTAMP_2;
+
+ tstamp = &adapter->tx_tstamp[3];
+ tstamp->mask = IGC_TSYNCTXCTL_TXTT_3;
+ tstamp->regl = IGC_TXSTMPL_3;
+ tstamp->regh = IGC_TXSTMPH_3;
+ tstamp->flags = IGC_TX_FLAGS_TSTAMP_3;
+
switch (hw->mac.type) {
case igc_i225:
for (i = 0; i < IGC_N_SDP; i++) {
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index dba5a5759b1c..20e17f5fbce3 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -243,6 +243,18 @@
#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */
#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */
+/* TX Timestamp Low */
+#define IGC_TXSTMPL_0 0x0B618
+#define IGC_TXSTMPL_1 0x0B698
+#define IGC_TXSTMPL_2 0x0B6B8
+#define IGC_TXSTMPL_3 0x0B6D8
+
+/* TX Timestamp High */
+#define IGC_TXSTMPH_0 0x0B61C
+#define IGC_TXSTMPH_1 0x0B69C
+#define IGC_TXSTMPH_2 0x0B6BC
+#define IGC_TXSTMPH_3 0x0B6DC
+
#define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
#define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 63d4e32df029..b6f0376e42f4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -945,8 +945,6 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid);
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
struct ixgbe_ring *);
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
- struct ixgbe_tx_buffer *);
void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
void ixgbe_write_eitr(struct ixgbe_q_vector *);
int ixgbe_poll(struct napi_struct *napi, int budget);
@@ -997,10 +995,6 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
int ixgbe_fcoe_enable(struct net_device *netdev);
int ixgbe_fcoe_disable(struct net_device *netdev);
-#ifdef CONFIG_IXGBE_DCB
-u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
-u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
-#endif /* CONFIG_IXGBE_DCB */
int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
struct netdev_fcoe_hbainfo *info);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 4b531e8ae38a..34761e691d52 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -8,7 +8,6 @@
#include "ixgbe.h"
u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 8eb9839a3ca6..dd03b017dfc5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -10042,9 +10042,6 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue;
- if (nla_len(attr) < sizeof(mode))
- return -EINVAL;
-
mode = nla_get_u16(attr);
status = ixgbe_configure_bridge_mode(adapter, mode);
if (status)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 149c733fcc2b..130cb868774c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -486,9 +486,6 @@ static inline int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
{ return 0; }
#endif /* CONFIG_IXGBEVF_IPSEC */
-void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
-void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
-
#define ixgbevf_hw_to_netdev(hw) \
(((struct ixgbevf_adapter *)(hw)->back)->netdev)
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 8537578e1cf1..5f6ae11212ae 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -43,7 +43,7 @@
#include <linux/ioport.h>
#include <linux/iopoll.h>
#include <linux/in.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/slab.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 8662543ca5c8..674913184ebf 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -24,8 +24,8 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
@@ -269,7 +269,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
struct orion_mdio_dev *dev;
int i, ret;
- type = (enum orion_mdio_bus_type)device_get_match_data(&pdev->dev);
+ type = (uintptr_t)device_get_match_data(&pdev->dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index acf4f6ba73a6..d483b8c00ec0 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -37,7 +37,7 @@
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/tso.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
#include <net/pkt_sched.h>
#include <linux/bpf_trace.h>
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 11e603686a27..e809f91c08fb 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -16,7 +16,7 @@
#include <linux/phy.h>
#include <linux/phylink.h>
#include <net/flow_offload.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
#include <linux/bpf.h>
#include <net/xdp.h>
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 75e83ea2a926..0f9bc4f8ec3b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -593,8 +593,6 @@ static int mvpp2_dbgfs_c2_entry_init(struct dentry *parent,
sprintf(c2_entry_name, "%03d", id);
c2_entry_dir = debugfs_create_dir(c2_entry_name, parent);
- if (!c2_entry_dir)
- return -ENOMEM;
entry = &priv->dbgfs_entries->c2_entries[id];
@@ -626,8 +624,6 @@ static int mvpp2_dbgfs_flow_tbl_entry_init(struct dentry *parent,
sprintf(flow_tbl_entry_name, "%03d", id);
flow_tbl_entry_dir = debugfs_create_dir(flow_tbl_entry_name, parent);
- if (!flow_tbl_entry_dir)
- return -ENOMEM;
entry = &priv->dbgfs_entries->flt_entries[id];
@@ -646,12 +642,8 @@ static int mvpp2_dbgfs_cls_init(struct dentry *parent, struct mvpp2 *priv)
int i, ret;
cls_dir = debugfs_create_dir("classifier", parent);
- if (!cls_dir)
- return -ENOMEM;
c2_dir = debugfs_create_dir("c2", cls_dir);
- if (!c2_dir)
- return -ENOMEM;
for (i = 0; i < MVPP22_CLS_C2_N_ENTRIES; i++) {
ret = mvpp2_dbgfs_c2_entry_init(c2_dir, priv, i);
@@ -660,8 +652,6 @@ static int mvpp2_dbgfs_cls_init(struct dentry *parent, struct mvpp2 *priv)
}
flow_tbl_dir = debugfs_create_dir("flow_table", cls_dir);
- if (!flow_tbl_dir)
- return -ENOMEM;
for (i = 0; i < MVPP2_CLS_FLOWS_TBL_SIZE; i++) {
ret = mvpp2_dbgfs_flow_tbl_entry_init(flow_tbl_dir, priv, i);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 1fec84b4c068..eb74ccddb440 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -24,7 +24,6 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/phy/phy.h>
@@ -36,6 +35,7 @@
#include <uapi/linux/ppp_defs.h>
#include <net/ip.h>
#include <net/ipv6.h>
+#include <net/page_pool/helpers.h>
#include <net/tso.h>
#include <linux/bpf_trace.h>
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h b/drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h
new file mode 100644
index 000000000000..0c741e752db6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Marvell.
+ */
+#ifndef __OCTEP_CP_VERSION_H__
+#define __OCTEP_CP_VERSION_H__
+
+#define OCTEP_CP_VERSION(a, b, c) ((((a) & 0xff) << 16) + \
+ (((b) & 0xff) << 8) + \
+ ((c) & 0xff))
+
+#endif /* __OCTEP_CP_VERSION_H__ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
index dab61cc1acb5..9d53c1402cb4 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
@@ -37,7 +37,9 @@
#define OCTEP_CTRL_MBOX_INFO_MAGIC_NUM(m) (m)
#define OCTEP_CTRL_MBOX_INFO_BARMEM_SZ(m) ((m) + 8)
+#define OCTEP_CTRL_MBOX_INFO_HOST_VERSION(m) ((m) + 16)
#define OCTEP_CTRL_MBOX_INFO_HOST_STATUS(m) ((m) + 24)
+#define OCTEP_CTRL_MBOX_INFO_FW_VERSION(m) ((m) + 136)
#define OCTEP_CTRL_MBOX_INFO_FW_STATUS(m) ((m) + 144)
#define OCTEP_CTRL_MBOX_H2FQ_INFO(m) ((m) + OCTEP_CTRL_MBOX_INFO_SZ)
@@ -71,7 +73,7 @@ static u32 octep_ctrl_mbox_circq_depth(u32 pi, u32 ci, u32 sz)
int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox)
{
- u64 magic_num, status;
+ u64 magic_num, status, fw_versions;
if (!mbox)
return -EINVAL;
@@ -93,6 +95,9 @@ int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox)
return -EINVAL;
}
+ fw_versions = readq(OCTEP_CTRL_MBOX_INFO_FW_VERSION(mbox->barmem));
+ mbox->min_fw_version = ((fw_versions & 0xffffffff00000000ull) >> 32);
+ mbox->max_fw_version = (fw_versions & 0xffffffff);
mbox->barmem_sz = readl(OCTEP_CTRL_MBOX_INFO_BARMEM_SZ(mbox->barmem));
writeq(OCTEP_CTRL_MBOX_STATUS_INIT,
@@ -113,6 +118,7 @@ int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox)
OCTEP_CTRL_MBOX_TOTAL_INFO_SZ +
mbox->h2fq.sz;
+ writeq(mbox->version, OCTEP_CTRL_MBOX_INFO_HOST_VERSION(mbox->barmem));
/* ensure ready state is seen after everything is initialized */
wmb();
writeq(OCTEP_CTRL_MBOX_STATUS_READY,
@@ -258,6 +264,7 @@ int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox)
if (!mbox->barmem)
return -EINVAL;
+ writeq(0, OCTEP_CTRL_MBOX_INFO_HOST_VERSION(mbox->barmem));
writeq(OCTEP_CTRL_MBOX_STATUS_INVALID,
OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem));
/* ensure uninit state is written before uninitialization */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
index 9c4ff0fba6a0..7f8135788efc 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
@@ -121,6 +121,8 @@ struct octep_ctrl_mbox_q {
};
struct octep_ctrl_mbox {
+ /* control plane version */
+ u64 version;
/* size of bar memory */
u32 barmem_sz;
/* pointer to BAR memory */
@@ -133,6 +135,10 @@ struct octep_ctrl_mbox {
struct mutex h2fq_lock;
/* lock for f2hq */
struct mutex f2hq_lock;
+ /* Min control plane version supported by firmware */
+ u32 min_fw_version;
+ /* Max control plane version supported by firmware */
+ u32 max_fw_version;
};
/* Initialize control mbox.
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
index 565320ec24f8..17bfd5cdf462 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
@@ -14,6 +14,9 @@
#include "octep_main.h"
#include "octep_ctrl_net.h"
+/* Control plane version */
+#define OCTEP_CP_VERSION_CURRENT OCTEP_CP_VERSION(1, 0, 0)
+
static const u32 req_hdr_sz = sizeof(union octep_ctrl_net_req_hdr);
static const u32 mtu_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mtu);
static const u32 mac_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mac);
@@ -21,6 +24,18 @@ static const u32 state_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_state);
static const u32 link_info_sz = sizeof(struct octep_ctrl_net_link_info);
static atomic_t ctrl_net_msg_id;
+/* Control plane version in which OCTEP_CTRL_NET_H2F_CMD was added */
+static const u32 octep_ctrl_net_h2f_cmd_versions[OCTEP_CTRL_NET_H2F_CMD_MAX] = {
+ [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_LINK_INFO] =
+ OCTEP_CP_VERSION(1, 0, 0)
+};
+
+/* Control plane version in which OCTEP_CTRL_NET_F2H_CMD was added */
+static const u32 octep_ctrl_net_f2h_cmd_versions[OCTEP_CTRL_NET_F2H_CMD_MAX] = {
+ [OCTEP_CTRL_NET_F2H_CMD_INVALID ... OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS] =
+ OCTEP_CP_VERSION(1, 0, 0)
+};
+
static void init_send_req(struct octep_ctrl_mbox_msg *msg, void *buf,
u16 sz, int vfid)
{
@@ -41,7 +56,13 @@ static int octep_send_mbox_req(struct octep_device *oct,
struct octep_ctrl_net_wait_data *d,
bool wait_for_response)
{
- int err, ret;
+ int err, ret, cmd;
+
+ /* check if firmware is compatible for this request */
+ cmd = d->data.req.hdr.s.cmd;
+ if (octep_ctrl_net_h2f_cmd_versions[cmd] > oct->ctrl_mbox.max_fw_version ||
+ octep_ctrl_net_h2f_cmd_versions[cmd] < oct->ctrl_mbox.min_fw_version)
+ return -EOPNOTSUPP;
err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &d->msg);
if (err < 0)
@@ -84,12 +105,16 @@ int octep_ctrl_net_init(struct octep_device *oct)
/* Initialize control mbox */
ctrl_mbox = &oct->ctrl_mbox;
+ ctrl_mbox->version = OCTEP_CP_VERSION_CURRENT;
ctrl_mbox->barmem = CFG_GET_CTRL_MBOX_MEM_ADDR(oct->conf);
ret = octep_ctrl_mbox_init(ctrl_mbox);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize control mbox\n");
return ret;
}
+ dev_info(&pdev->dev, "Control plane versions host: %llx, firmware: %x:%x\n",
+ ctrl_mbox->version, ctrl_mbox->min_fw_version,
+ ctrl_mbox->max_fw_version);
oct->ctrl_mbox_ifstats_offset = ctrl_mbox->barmem_sz;
return 0;
@@ -273,9 +298,17 @@ static int process_mbox_notify(struct octep_device *oct,
{
struct net_device *netdev = oct->netdev;
struct octep_ctrl_net_f2h_req *req;
+ int cmd;
req = (struct octep_ctrl_net_f2h_req *)msg->sg_list[0].msg;
- switch (req->hdr.s.cmd) {
+ cmd = req->hdr.s.cmd;
+
+ /* check if we support this command */
+ if (octep_ctrl_net_f2h_cmd_versions[cmd] > OCTEP_CP_VERSION_CURRENT ||
+ octep_ctrl_net_f2h_cmd_versions[cmd] < OCTEP_CP_VERSION_CURRENT)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS:
if (netif_running(netdev)) {
if (req->link.state) {
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
index 37880dd79116..1c2ef4ee31d9 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
@@ -7,6 +7,8 @@
#ifndef __OCTEP_CTRL_NET_H__
#define __OCTEP_CTRL_NET_H__
+#include "octep_cp_version.h"
+
#define OCTEP_CTRL_NET_INVALID_VFID (-1)
/* Supported commands */
@@ -39,12 +41,14 @@ enum octep_ctrl_net_h2f_cmd {
OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS,
OCTEP_CTRL_NET_H2F_CMD_RX_STATE,
OCTEP_CTRL_NET_H2F_CMD_LINK_INFO,
+ OCTEP_CTRL_NET_H2F_CMD_MAX
};
/* Supported fw to host commands */
enum octep_ctrl_net_f2h_cmd {
OCTEP_CTRL_NET_F2H_CMD_INVALID = 0,
OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS,
+ OCTEP_CTRL_NET_F2H_CMD_MAX
};
union octep_ctrl_net_req_hdr {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 592037f4e55b..e06f77ad6106 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -55,6 +55,7 @@ static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
[LMAC_MODE_50G_R] = "50G_R",
[LMAC_MODE_100G_R] = "100G_R",
[LMAC_MODE_USXGMII] = "USXGMII",
+ [LMAC_MODE_USGMII] = "USGMII",
};
/* CGX PHY management internal APIs */
@@ -223,24 +224,6 @@ int cgx_get_link_info(void *cgxd, int lmac_id,
return 0;
}
-static u64 mac2u64 (u8 *mac_addr)
-{
- u64 mac = 0;
- int index;
-
- for (index = ETH_ALEN - 1; index >= 0; index--)
- mac |= ((u64)*mac_addr++) << (8 * index);
- return mac;
-}
-
-static void cfg2mac(u64 cfg, u8 *mac_addr)
-{
- int i, index = 0;
-
- for (i = ETH_ALEN - 1; i >= 0; i--, index++)
- mac_addr[i] = (cfg >> (8 * index)) & 0xFF;
-}
-
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
@@ -249,13 +232,16 @@ int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
int index, id;
u64 cfg;
+ if (!lmac)
+ return -ENODEV;
+
/* access mac_ops to know csr_offset */
mac_ops = cgx_dev->mac_ops;
/* copy 6bytes from macaddr */
/* memcpy(&cfg, mac_addr, 6); */
- cfg = mac2u64 (mac_addr);
+ cfg = ether_addr_to_u64(mac_addr);
id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
@@ -322,7 +308,7 @@ int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
index = id * lmac->mac_to_index_bmap.max + idx;
- cfg = mac2u64 (mac_addr);
+ cfg = ether_addr_to_u64(mac_addr);
cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
cfg |= ((u64)lmac_id << 49);
cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
@@ -405,7 +391,7 @@ int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
cfg &= ~CGX_RX_DMAC_ADR_MASK;
- cfg |= mac2u64 (mac_addr);
+ cfg |= ether_addr_to_u64(mac_addr);
cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
return 0;
@@ -441,7 +427,7 @@ int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
/* Read MAC address to check whether it is ucast or mcast */
cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
- cfg2mac(cfg, mac);
+ u64_to_ether_addr(cfg, mac);
if (is_multicast_ether_addr(mac))
lmac->mcast_filters_count--;
@@ -567,15 +553,16 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
struct cgx *cgx = cgx_get_pdata(cgx_id);
struct lmac *lmac = lmac_pdata(lmac_id, cgx);
- u16 max_dmac = lmac->mac_to_index_bmap.max;
struct mac_ops *mac_ops;
+ u16 max_dmac;
int index, i;
u64 cfg = 0;
int id;
- if (!cgx)
+ if (!cgx || !lmac)
return;
+ max_dmac = lmac->mac_to_index_bmap.max;
id = get_sequence_id_of_lmac(cgx, lmac_id);
mac_ops = cgx->mac_ops;
@@ -748,7 +735,7 @@ int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
int corr_reg, uncorr_reg;
struct cgx *cgx = cgxd;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 574114179688..6f7d1dee5830 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -110,6 +110,7 @@ enum LMAC_TYPE {
LMAC_MODE_50G_R = 8,
LMAC_MODE_100G_R = 9,
LMAC_MODE_USXGMII = 10,
+ LMAC_MODE_USGMII = 11,
LMAC_MODE_MAX,
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index eba307eee2b2..6b5b06c2b4e9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -136,6 +136,7 @@ M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
msg_rsp) \
M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
+M(PTP_GET_CAP, 0x00c, ptp_get_cap, msg_req, ptp_get_cap_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
@@ -235,7 +236,7 @@ M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \
npc_install_flow_req, npc_install_flow_rsp) \
M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
- npc_delete_flow_req, msg_rsp) \
+ npc_delete_flow_req, npc_delete_flow_rsp) \
M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
npc_mcam_read_entry_req, \
npc_mcam_read_entry_rsp) \
@@ -1437,6 +1438,12 @@ struct npc_get_kex_cfg_rsp {
u8 mkex_pfl_name[MKEX_NAME_LEN];
};
+struct ptp_get_cap_rsp {
+ struct mbox_msghdr hdr;
+#define PTP_CAP_HW_ATOMIC_UPDATE BIT_ULL(0)
+ u64 cap;
+};
+
struct flow_msg {
unsigned char dmac[6];
unsigned char smac[6];
@@ -1451,6 +1458,10 @@ struct flow_msg {
__be32 ip4dst;
__be32 ip6dst[4];
};
+ union {
+ __be32 spi;
+ };
+
u8 tos;
u8 ip_ver;
u8 ip_proto;
@@ -1461,6 +1472,7 @@ struct flow_msg {
u8 ip_flag;
u8 next_header;
};
+ __be16 vlan_itci;
};
struct npc_install_flow_req {
@@ -1491,6 +1503,8 @@ struct npc_install_flow_req {
u8 vtag0_op;
u16 vtag1_def;
u8 vtag1_op;
+ /* old counter value */
+ u16 cntr_val;
};
struct npc_install_flow_rsp {
@@ -1506,6 +1520,11 @@ struct npc_delete_flow_req {
u8 all; /* PF + VFs */
};
+struct npc_delete_flow_rsp {
+ struct mbox_msghdr hdr;
+ u16 cntr_val;
+};
+
struct npc_mcam_read_entry_req {
struct mbox_msghdr hdr;
u16 entry; /* MCAM entry to read */
@@ -1556,6 +1575,8 @@ enum ptp_op {
PTP_OP_GET_TSTMP = 2,
PTP_OP_SET_THRESH = 3,
PTP_OP_EXTTS_ON = 4,
+ PTP_OP_ADJTIME = 5,
+ PTP_OP_SET_CLOCK = 6,
};
struct ptp_req {
@@ -1564,11 +1585,14 @@ struct ptp_req {
s64 scaled_ppm;
u64 thresh;
int extts_on;
+ s64 delta;
+ u64 clk;
};
struct ptp_rsp {
struct mbox_msghdr hdr;
u64 clk;
+ u64 tsc;
};
struct npc_get_field_status_req {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 9beeead56d7b..de9fbd98dfb7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -184,6 +184,7 @@ enum key_fields {
NPC_VLAN_ETYPE_CTAG, /* 0x8100 */
NPC_VLAN_ETYPE_STAG, /* 0x88A8 */
NPC_OUTER_VID,
+ NPC_INNER_VID,
NPC_TOS,
NPC_IPFRAG_IPV4,
NPC_SIP_IPV4,
@@ -204,6 +205,7 @@ enum key_fields {
NPC_DPORT_UDP,
NPC_SPORT_SCTP,
NPC_DPORT_SCTP,
+ NPC_IPSEC_SPI,
NPC_HEADER_FIELDS_MAX,
NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
NPC_PF_FUNC, /* Valid when Tx */
@@ -229,6 +231,8 @@ enum key_fields {
NPC_VLAN_TAG1,
/* outer vlan tci for double tagged frame */
NPC_VLAN_TAG2,
+ /* inner vlan tci for double tagged frame */
+ NPC_VLAN_TAG3,
/* other header fields programmed to extract but not of our interest */
NPC_UNKNOWN,
NPC_KEY_FIELDS_MAX,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index 0ee420a489fc..ffbd22797163 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -12,8 +12,8 @@
#include <linux/hrtimer.h>
#include <linux/ktime.h>
-#include "ptp.h"
#include "mbox.h"
+#include "ptp.h"
#include "rvu.h"
#define DRV_NAME "Marvell PTP Driver"
@@ -40,6 +40,7 @@
#define PTP_CLOCK_CFG_TSTMP_EDGE BIT_ULL(9)
#define PTP_CLOCK_CFG_TSTMP_EN BIT_ULL(8)
#define PTP_CLOCK_CFG_TSTMP_IN_MASK GENMASK_ULL(15, 10)
+#define PTP_CLOCK_CFG_ATOMIC_OP_MASK GENMASK_ULL(28, 26)
#define PTP_CLOCK_CFG_PPS_EN BIT_ULL(30)
#define PTP_CLOCK_CFG_PPS_INV BIT_ULL(31)
@@ -53,36 +54,62 @@
#define PTP_TIMESTAMP 0xF20ULL
#define PTP_CLOCK_SEC 0xFD0ULL
#define PTP_SEC_ROLLOVER 0xFD8ULL
+/* Atomic update related CSRs */
+#define PTP_FRNS_TIMESTAMP 0xFE0ULL
+#define PTP_NXT_ROLLOVER_SET 0xFE8ULL
+#define PTP_CURR_ROLLOVER_SET 0xFF0ULL
+#define PTP_NANO_TIMESTAMP 0xFF8ULL
+#define PTP_SEC_TIMESTAMP 0x1000ULL
#define CYCLE_MULT 1000
+#define is_rev_A0(ptp) (((ptp)->pdev->revision & 0x0F) == 0x0)
+#define is_rev_A1(ptp) (((ptp)->pdev->revision & 0x0F) == 0x1)
+
+/* PTP atomic update operation type */
+enum atomic_opcode {
+ ATOMIC_SET = 1,
+ ATOMIC_INC = 3,
+ ATOMIC_DEC = 4
+};
+
static struct ptp *first_ptp_block;
static const struct pci_device_id ptp_id_table[];
-static bool is_ptp_dev_cnf10kb(struct ptp *ptp)
+static bool is_ptp_dev_cnf10ka(struct ptp *ptp)
{
- return (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_B_PTP) ? true : false;
+ return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP;
}
-static bool is_ptp_dev_cn10k(struct ptp *ptp)
+static bool is_ptp_dev_cn10ka(struct ptp *ptp)
{
- return (ptp->pdev->device == PCI_DEVID_CN10K_PTP) ? true : false;
+ return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP;
}
static bool cn10k_ptp_errata(struct ptp *ptp)
{
- if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
- ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
+ if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) &&
+ (is_rev_A0(ptp) || is_rev_A1(ptp)))
return true;
+
return false;
}
-static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp)
+static bool is_tstmp_atomic_update_supported(struct rvu *rvu)
{
- if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
- ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
- return true;
- return false;
+ struct ptp *ptp = rvu->ptp;
+
+ if (is_rvu_otx2(rvu))
+ return false;
+
+ /* On older silicon variants of CN10K, atomic update feature
+ * is not available.
+ */
+ if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) &&
+ (is_rev_A0(ptp) || is_rev_A1(ptp)))
+ return false;
+
+ return true;
}
static enum hrtimer_restart ptp_reset_thresh(struct hrtimer *hrtimer)
@@ -222,6 +249,65 @@ void ptp_put(struct ptp *ptp)
pci_dev_put(ptp->pdev);
}
+static void ptp_atomic_update(struct ptp *ptp, u64 timestamp)
+{
+ u64 regval, curr_rollover_set, nxt_rollover_set;
+
+ /* First setup NSECs and SECs */
+ writeq(timestamp, ptp->reg_base + PTP_NANO_TIMESTAMP);
+ writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP);
+ writeq(timestamp / NSEC_PER_SEC,
+ ptp->reg_base + PTP_SEC_TIMESTAMP);
+
+ nxt_rollover_set = roundup(timestamp, NSEC_PER_SEC);
+ curr_rollover_set = nxt_rollover_set - NSEC_PER_SEC;
+ writeq(nxt_rollover_set, ptp->reg_base + PTP_NXT_ROLLOVER_SET);
+ writeq(curr_rollover_set, ptp->reg_base + PTP_CURR_ROLLOVER_SET);
+
+ /* Now, initiate atomic update */
+ regval = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
+ regval |= (ATOMIC_SET << 26);
+ writeq(regval, ptp->reg_base + PTP_CLOCK_CFG);
+}
+
+static void ptp_atomic_adjtime(struct ptp *ptp, s64 delta)
+{
+ bool neg_adj = false, atomic_inc_dec = false;
+ u64 regval, ptp_clock_hi;
+
+ if (delta < 0) {
+ delta = -delta;
+ neg_adj = true;
+ }
+
+ /* use atomic inc/dec when delta < 1 second */
+ if (delta < NSEC_PER_SEC)
+ atomic_inc_dec = true;
+
+ if (!atomic_inc_dec) {
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ if (neg_adj) {
+ if (ptp_clock_hi > delta)
+ ptp_clock_hi -= delta;
+ else
+ ptp_clock_hi = delta - ptp_clock_hi;
+ } else {
+ ptp_clock_hi += delta;
+ }
+ ptp_atomic_update(ptp, ptp_clock_hi);
+ } else {
+ writeq(delta, ptp->reg_base + PTP_NANO_TIMESTAMP);
+ writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP);
+
+ /* initiate atomic inc/dec */
+ regval = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
+ regval |= neg_adj ? (ATOMIC_DEC << 26) : (ATOMIC_INC << 26);
+ writeq(regval, ptp->reg_base + PTP_CLOCK_CFG);
+ }
+}
+
static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
{
bool neg_adj = false;
@@ -277,8 +363,9 @@ static int ptp_get_clock(struct ptp *ptp, u64 *clk)
return 0;
}
-void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
+void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts)
{
+ struct ptp *ptp = rvu->ptp;
struct pci_dev *pdev;
u64 clock_comp;
u64 clock_cfg;
@@ -297,8 +384,14 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
ptp->clock_rate = sclk * 1000000;
/* Program the seconds rollover value to 1 second */
- if (is_ptp_dev_cnf10kb(ptp))
+ if (is_tstmp_atomic_update_supported(rvu)) {
+ writeq(0, ptp->reg_base + PTP_NANO_TIMESTAMP);
+ writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP);
+ writeq(0, ptp->reg_base + PTP_SEC_TIMESTAMP);
+ writeq(0, ptp->reg_base + PTP_CURR_ROLLOVER_SET);
+ writeq(0x3b9aca00, ptp->reg_base + PTP_NXT_ROLLOVER_SET);
writeq(0x3b9aca00, ptp->reg_base + PTP_SEC_ROLLOVER);
+ }
/* Enable PTP clock */
clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
@@ -320,6 +413,10 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
+ clock_cfg |= (ATOMIC_SET << 26);
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
/* Set 50% duty cycle for 1Hz output */
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
@@ -350,7 +447,7 @@ static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
{
u64 timestamp;
- if (is_ptp_dev_cn10k(ptp)) {
+ if (is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) {
timestamp = readq(ptp->reg_base + PTP_TIMESTAMP);
*clk = (timestamp >> 32) * NSEC_PER_SEC + (timestamp & 0xFFFFFFFF);
} else {
@@ -414,14 +511,12 @@ static int ptp_probe(struct pci_dev *pdev,
first_ptp_block = ptp;
spin_lock_init(&ptp->ptp_lock);
- if (is_ptp_tsfmt_sec_nsec(ptp))
- ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
- else
- ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
-
if (cn10k_ptp_errata(ptp)) {
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ptp->hrtimer.function = ptp_reset_thresh;
+ } else {
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
}
return 0;
@@ -521,6 +616,12 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
case PTP_OP_EXTTS_ON:
err = ptp_extts_on(rvu->ptp, req->extts_on);
break;
+ case PTP_OP_ADJTIME:
+ ptp_atomic_adjtime(rvu->ptp, req->delta);
+ break;
+ case PTP_OP_SET_CLOCK:
+ ptp_atomic_update(rvu->ptp, (u64)req->clk);
+ break;
default:
err = -EINVAL;
break;
@@ -528,3 +629,17 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
return err;
}
+
+int rvu_mbox_handler_ptp_get_cap(struct rvu *rvu, struct msg_req *req,
+ struct ptp_get_cap_rsp *rsp)
+{
+ if (!rvu->ptp)
+ return -ENODEV;
+
+ if (is_tstmp_atomic_update_supported(rvu))
+ rsp->cap |= PTP_CAP_HW_ATOMIC_UPDATE;
+ else
+ rsp->cap &= ~BIT_ULL_MASK(0);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
index b9d92abc3844..1229344c7279 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -23,9 +23,10 @@ struct ptp {
u32 clock_period;
};
+struct rvu;
struct ptp *ptp_get(void);
void ptp_put(struct ptp *ptp);
-void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts);
+void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts);
extern struct pci_driver ptp_driver;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 73df2d564545..22c395c7d040 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -3322,7 +3322,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&rvu->rswitch.switch_lock);
if (rvu->fwdata)
- ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
+ ptp_start(rvu, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
rvu->fwdata->ptp_ext_tstamp);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index e8e65fd7888d..c4d999ef5ab4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -17,6 +17,7 @@
#include "mbox.h"
#include "npc.h"
#include "rvu_reg.h"
+#include "ptp.h"
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
@@ -26,6 +27,7 @@
#define PCI_SUBSYS_DEVID_98XX 0xB100
#define PCI_SUBSYS_DEVID_96XX 0xB200
#define PCI_SUBSYS_DEVID_CN10K_A 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00
#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
@@ -634,6 +636,16 @@ static inline bool is_rvu_otx2(struct rvu *rvu)
midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
}
+static inline bool is_cnf10ka_a0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A &&
+ (pdev->revision & 0x0F) == 0x0)
+ return true;
+ return false;
+}
+
static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu)
{
u64 npc_const3;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 095b2cc4a699..f2b1edf1bb43 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -236,6 +236,11 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
linfo = &event->link_uinfo;
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
+ if (!pfmap) {
+ dev_err(rvu->dev, "CGX port%d:%d not mapped with PF\n",
+ event->cgx_id, event->lmac_id);
+ return;
+ }
do {
pfid = find_first_bit(&pfmap,
@@ -345,7 +350,7 @@ int rvu_cgx_init(struct rvu *rvu)
rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
if (!rvu->cgx_cnt_max) {
dev_info(rvu->dev, "No CGX devices found!\n");
- return -ENODEV;
+ return 0;
}
rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
@@ -686,7 +691,7 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
- int rc = 0, i;
+ int rc = 0;
u64 cfg;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -697,8 +702,7 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
rsp->hdr.rc = rc;
cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
/* copy 48 bit mac address to req->mac_addr */
- for (i = 0; i < ETH_ALEN; i++)
- rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
+ u64_to_ether_addr(cfg, rsp->mac_addr);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 3b26893efdf8..d30e84803481 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -2787,6 +2787,11 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "mask 0x%x\n",
ntohs(rule->mask.vlan_tci));
break;
+ case NPC_INNER_VID:
+ seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci));
+ seq_printf(s, "mask 0x%x\n",
+ ntohs(rule->mask.vlan_itci));
+ break;
case NPC_TOS:
seq_printf(s, "%d ", rule->packet.tos);
seq_printf(s, "mask 0x%x\n", rule->mask.tos);
@@ -2827,6 +2832,10 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "%d ", ntohs(rule->packet.dport));
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
break;
+ case NPC_IPSEC_SPI:
+ seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
+ seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
+ break;
default:
seq_puts(s, "\n");
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 952319453701..237f82082ebe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -20,6 +20,7 @@ static const char * const npc_flow_names[] = {
[NPC_VLAN_ETYPE_CTAG] = "vlan ether type ctag",
[NPC_VLAN_ETYPE_STAG] = "vlan ether type stag",
[NPC_OUTER_VID] = "outer vlan id",
+ [NPC_INNER_VID] = "inner vlan id",
[NPC_TOS] = "tos",
[NPC_IPFRAG_IPV4] = "fragmented IPv4 header ",
[NPC_SIP_IPV4] = "ipv4 source ip",
@@ -41,6 +42,7 @@ static const char * const npc_flow_names[] = {
[NPC_SPORT_SCTP] = "sctp source port",
[NPC_DPORT_SCTP] = "sctp destination port",
[NPC_LXMB] = "Mcast/Bcast header ",
+ [NPC_IPSEC_SPI] = "SPI ",
[NPC_UNKNOWN] = "unknown",
};
@@ -327,6 +329,8 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
*/
struct npc_key_field *vlan_tag1;
struct npc_key_field *vlan_tag2;
+ /* Inner VLAN TCI for double tagged frames */
+ struct npc_key_field *vlan_tag3;
u64 *features;
u8 start_lid;
int i;
@@ -349,6 +353,7 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
etype_tag2 = &key_fields[NPC_ETYPE_TAG2];
vlan_tag1 = &key_fields[NPC_VLAN_TAG1];
vlan_tag2 = &key_fields[NPC_VLAN_TAG2];
+ vlan_tag3 = &key_fields[NPC_VLAN_TAG3];
/* if key profile programmed does not extract Ethertype at all */
if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) {
@@ -430,6 +435,12 @@ vlan_tci:
goto done;
}
*features |= BIT_ULL(NPC_OUTER_VID);
+
+ /* If key profile extracts inner vlan tci */
+ if (vlan_tag3->nr_kws) {
+ key_fields[NPC_INNER_VID] = *vlan_tag3;
+ *features |= BIT_ULL(NPC_INNER_VID);
+ }
done:
return;
}
@@ -512,7 +523,12 @@ do { \
NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
+ NPC_SCAN_HDR(NPC_VLAN_TAG3, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 6, 2);
NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
+
+ NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LD, NPC_LT_LD_AH, 4, 4);
+ NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LE, NPC_LT_LE_ESP, 0, 4);
+
/* SMAC follows the DMAC(which is 6 bytes) */
NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6);
/* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
@@ -564,6 +580,11 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
*features &= ~BIT_ULL(NPC_OUTER_VID);
+ /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) &&
+ (*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH))))
+ *features |= BIT_ULL(NPC_IPSEC_SPI);
+
/* for vlan ethertypes corresponding layer type should be in the key */
if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
*features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) |
@@ -930,8 +951,13 @@ do { \
NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0,
ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0,
+ ntohl(mask->spi), 0);
+
NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0,
ntohs(mask->vlan_tci), 0);
+ NPC_WRITE_FLOW(NPC_INNER_VID, vlan_itci, ntohs(pkt->vlan_itci), 0,
+ ntohs(mask->vlan_itci), 0);
NPC_WRITE_FLOW(NPC_IPFRAG_IPV6, next_header, pkt->next_header, 0,
mask->next_header, 0);
@@ -1192,7 +1218,7 @@ find_rule:
write_req.enable_entry = (u8)enable;
/* if counter is available then clear and use it */
if (req->set_cntr && rule->has_cntr) {
- rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00);
+ rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), req->cntr_val);
write_req.set_cntr = 1;
write_req.cntr = rule->cntr;
}
@@ -1407,12 +1433,13 @@ static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule,
int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
struct npc_delete_flow_req *req,
- struct msg_rsp *rsp)
+ struct npc_delete_flow_rsp *rsp)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_npc_mcam_rule *iter, *tmp;
u16 pcifunc = req->hdr.pcifunc;
struct list_head del_list;
+ int blkaddr;
INIT_LIST_HEAD(&del_list);
@@ -1428,6 +1455,10 @@ int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
list_move_tail(&iter->list, &del_list);
/* single rule */
} else if (req->entry == iter->entry) {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr)
+ rsp->cntr_val = rvu_read64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(iter->cntr));
list_move_tail(&iter->list, &del_list);
break;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
index 7e20282c12d0..d2661e7fabdb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -391,22 +391,6 @@ int rvu_mbox_handler_npc_get_field_hash_info(struct rvu *rvu,
}
/**
- * rvu_npc_exact_mac2u64 - utility function to convert mac address to u64.
- * @mac_addr: MAC address.
- * Return: mdata for exact match table.
- */
-static u64 rvu_npc_exact_mac2u64(u8 *mac_addr)
-{
- u64 mac = 0;
- int index;
-
- for (index = ETH_ALEN - 1; index >= 0; index--)
- mac |= ((u64)*mac_addr++) << (8 * index);
-
- return mac;
-}
-
-/**
* rvu_exact_prepare_mdata - Make mdata for mcam entry
* @mac: MAC address
* @chan: Channel number.
@@ -416,7 +400,7 @@ static u64 rvu_npc_exact_mac2u64(u8 *mac_addr)
*/
static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask)
{
- u64 ldata = rvu_npc_exact_mac2u64(mac);
+ u64 ldata = ether_addr_to_u64(mac);
/* Please note that mask is 48bit which excludes chan and ctype.
* Increase mask bits if we need to include them as well.
@@ -604,7 +588,7 @@ static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable,
u8 ctype, u16 chan, u8 *mac_addr)
{
- u64 ldata = rvu_npc_exact_mac2u64(mac_addr);
+ u64 ldata = ether_addr_to_u64(mac_addr);
/* Enable or disable */
u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
index 592b317f4637..854045ed3b06 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
@@ -158,6 +158,7 @@ void rvu_switch_enable(struct rvu *rvu)
struct npc_mcam_alloc_entry_req alloc_req = { 0 };
struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 };
struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_delete_flow_rsp uninstall_rsp = { 0 };
struct npc_mcam_free_entry_req free_req = { 0 };
struct rvu_switch *rswitch = &rvu->rswitch;
struct msg_rsp rsp;
@@ -197,7 +198,7 @@ void rvu_switch_enable(struct rvu *rvu)
uninstall_rules:
uninstall_req.start = rswitch->start_entry;
uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
- rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
kfree(rswitch->entry2pcifunc);
free_entries:
free_req.all = 1;
@@ -209,6 +210,7 @@ exit:
void rvu_switch_disable(struct rvu *rvu)
{
struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_delete_flow_rsp uninstall_rsp = { 0 };
struct npc_mcam_free_entry_req free_req = { 0 };
struct rvu_switch *rswitch = &rvu->rswitch;
struct rvu_hwinfo *hw = rvu->hw;
@@ -250,7 +252,7 @@ void rvu_switch_disable(struct rvu *rvu)
uninstall_req.start = rswitch->start_entry;
uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
free_req.all = 1;
- rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
rswitch->used_entries = 0;
kfree(rswitch->entry2pcifunc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index b9712040a0bc..8511906cb4e2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -7,6 +7,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <net/page_pool/helpers.h>
#include <net/tso.h>
#include <linux/bitfield.h>
@@ -774,6 +775,7 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf)
rsp->schq_list[lvl][schq];
pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
+ pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
return 0;
}
@@ -1904,31 +1906,16 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t
}
}
- if ((changed & NETIF_F_HW_TC) && tc) {
- if (!pfvf->flow_cfg->max_flows) {
- netdev_err(netdev,
- "Can't enable TC, MCAM entries not allocated\n");
- return -EINVAL;
- }
- }
-
if ((changed & NETIF_F_HW_TC) && !tc &&
- pfvf->flow_cfg && pfvf->flow_cfg->nr_flows) {
+ otx2_tc_flower_rule_cnt(pfvf)) {
netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
return -EBUSY;
}
if ((changed & NETIF_F_NTUPLE) && ntuple &&
- (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
- netdev_err(netdev,
- "Can't enable NTUPLE when TC is active, disable TC and retry\n");
- return -EINVAL;
- }
-
- if ((changed & NETIF_F_HW_TC) && tc &&
- (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
+ otx2_tc_flower_rule_cnt(pfvf) && !(changed & NETIF_F_HW_TC)) {
netdev_err(netdev,
- "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
+ "Can't enable NTUPLE when TC flower offload is active, disable TC rules and retry\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index ba8091131ec0..4c6032ee7800 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -224,6 +224,7 @@ struct otx2_hw {
/* NIX */
u8 txschq_link_cfg_lvl;
+ u8 txschq_aggr_lvl_rr_prio;
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer;
u32 dwrr_mtu;
@@ -325,6 +326,7 @@ struct otx2_ptp {
struct ptp_pin_desc extts_config;
u64 (*convert_rx_ptp_tstmp)(u64 timestamp);
u64 (*convert_tx_ptp_tstmp)(u64 timestamp);
+ u64 (*ptp_tstamp2nsec)(const struct timecounter *time_counter, u64 timestamp);
struct delayed_work synctstamp_work;
u64 tstamp;
u32 base_ns;
@@ -360,13 +362,8 @@ struct otx2_flow_config {
struct list_head flow_list;
u32 dmacflt_max_flows;
u16 max_flows;
-};
-
-struct otx2_tc_info {
- /* hash table to store TC offloaded flows */
- struct rhashtable flow_table;
- struct rhashtable_params flow_ht_params;
- unsigned long *tc_entries_bitmap;
+ struct list_head flow_list_tc;
+ bool ntuple;
};
struct dev_hw_ops {
@@ -491,7 +488,6 @@ struct otx2_nic {
/* NPC MCAM */
struct otx2_flow_config *flow_cfg;
struct otx2_mac_table *mac_table;
- struct otx2_tc_info tc_info;
u64 reset_count;
struct work_struct reset_task;
@@ -945,6 +941,15 @@ static inline u64 otx2_convert_rate(u64 rate)
return converted_rate;
}
+static inline int otx2_tc_flower_rule_cnt(struct otx2_nic *pfvf)
+{
+ /* return here if MCAM entries not allocated */
+ if (!pfvf->flow_cfg)
+ return 0;
+
+ return pfvf->flow_cfg->nr_flows;
+}
+
/* MSI-X APIs */
void otx2_free_cints(struct otx2_nic *pfvf, int n);
void otx2_set_cints_affinity(struct otx2_nic *pfvf);
@@ -1063,7 +1068,6 @@ int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
-int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic);
/* CGX/RPM DMAC filters support */
int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 63ef7c41d18d..4e1130496573 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -41,7 +41,6 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
return 0;
otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
- otx2_tc_alloc_ent_bitmap(pfvf);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index c47d91da32dc..9efcec549834 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -764,6 +764,7 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
+ pfvf->flow_cfg->ntuple = ntuple;
switch (nfc->cmd) {
case ETHTOOL_SRXFH:
ret = otx2_set_rss_hash_opts(pfvf, nfc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 2d7713a1a153..4762dbea64a1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -276,6 +276,7 @@ int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
flow_cfg = pfvf->flow_cfg;
INIT_LIST_HEAD(&flow_cfg->flow_list);
+ INIT_LIST_HEAD(&flow_cfg->flow_list_tc);
flow_cfg->max_flows = 0;
return 0;
@@ -298,6 +299,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
return -ENOMEM;
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
+ INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
/* Allocate bare minimum number of MCAM entries needed for
* unicast and ntuple filters.
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 9551b422622a..70b9065f7d10 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -16,6 +16,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/bitfield.h>
+#include <net/page_pool/types.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -2027,7 +2028,7 @@ u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
#endif
int txq;
- qos_enabled = (netdev->real_num_tx_queues > pf->hw.tx_queues) ? true : false;
+ qos_enabled = netdev->real_num_tx_queues > pf->hw.tx_queues;
if (unlikely(qos_enabled)) {
/* This smp_load_acquire() pairs with smp_store_release() in
* otx2_qos_root_add() called from htb offload root creation
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 896b2f9bac34..3a72b0793d4a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -10,6 +10,65 @@
#include "otx2_common.h"
#include "otx2_ptp.h"
+static bool is_tstmp_atomic_update_supported(struct otx2_ptp *ptp)
+{
+ struct ptp_get_cap_rsp *rsp;
+ struct msg_req *req;
+ int err;
+
+ if (!ptp->nic)
+ return false;
+
+ mutex_lock(&ptp->nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_ptp_get_cap(&ptp->nic->mbox);
+ if (!req) {
+ mutex_unlock(&ptp->nic->mbox.lock);
+ return false;
+ }
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err) {
+ mutex_unlock(&ptp->nic->mbox.lock);
+ return false;
+ }
+ rsp = (struct ptp_get_cap_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ mutex_unlock(&ptp->nic->mbox.lock);
+
+ if (IS_ERR(rsp))
+ return false;
+
+ if (rsp->cap & PTP_CAP_HW_ATOMIC_UPDATE)
+ return true;
+
+ return false;
+}
+
+static int otx2_ptp_hw_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct otx2_nic *pfvf = ptp->nic;
+ struct ptp_req *req;
+ int rc;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+ req->op = PTP_OP_ADJTIME;
+ req->delta = delta;
+ rc = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return rc;
+}
+
static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp)
{
struct ptp_req *req;
@@ -37,6 +96,49 @@ static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp)
return rsp->clk;
}
+static int otx2_ptp_hw_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ u64 tstamp;
+
+ tstamp = otx2_ptp_get_clock(ptp);
+
+ *ts = ns_to_timespec64(tstamp);
+ return 0;
+}
+
+static int otx2_ptp_hw_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct otx2_nic *pfvf = ptp->nic;
+ struct ptp_req *req;
+ u64 nsec;
+ int rc;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ nsec = timespec64_to_ns(ts);
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->op = PTP_OP_SET_CLOCK;
+ req->clk = nsec;
+ rc = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return rc;
+}
+
static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
@@ -124,16 +226,7 @@ static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
return rsp->clk;
}
-static void otx2_get_ptpclock(struct otx2_ptp *ptp, u64 *tstamp)
-{
- struct otx2_nic *pfvf = ptp->nic;
-
- mutex_lock(&pfvf->mbox.lock);
- *tstamp = timecounter_read(&ptp->time_counter);
- mutex_unlock(&pfvf->mbox.lock);
-}
-
-static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+static int otx2_ptp_tc_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
@@ -146,32 +239,33 @@ static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
return 0;
}
-static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
- struct timespec64 *ts)
+static int otx2_ptp_tc_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
u64 tstamp;
- otx2_get_ptpclock(ptp, &tstamp);
+ mutex_lock(&ptp->nic->mbox.lock);
+ tstamp = timecounter_read(&ptp->time_counter);
+ mutex_unlock(&ptp->nic->mbox.lock);
*ts = ns_to_timespec64(tstamp);
return 0;
}
-static int otx2_ptp_settime(struct ptp_clock_info *ptp_info,
- const struct timespec64 *ts)
+static int otx2_ptp_tc_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
- struct otx2_nic *pfvf = ptp->nic;
u64 nsec;
nsec = timespec64_to_ns(ts);
- mutex_lock(&pfvf->mbox.lock);
+ mutex_lock(&ptp->nic->mbox.lock);
timecounter_init(&ptp->time_counter, &ptp->cycle_counter, nsec);
- mutex_unlock(&pfvf->mbox.lock);
+ mutex_unlock(&ptp->nic->mbox.lock);
return 0;
}
@@ -190,6 +284,12 @@ static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
return 0;
}
+static u64 otx2_ptp_hw_tstamp2time(const struct timecounter *time_counter, u64 tstamp)
+{
+ /* On HW which supports atomic updates, timecounter is not initialized */
+ return tstamp;
+}
+
static void otx2_ptp_extts_check(struct work_struct *work)
{
struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
@@ -204,7 +304,7 @@ static void otx2_ptp_extts_check(struct work_struct *work)
if (tstmp != ptp->last_extts) {
event.type = PTP_CLOCK_EXTTS;
event.index = 0;
- event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp);
+ event.timestamp = ptp->ptp_tstamp2nsec(&ptp->time_counter, tstmp);
ptp_clock_event(ptp->ptp_clock, &event);
new_thresh = tstmp % 500000000;
if (ptp->thresh != new_thresh) {
@@ -229,7 +329,7 @@ static void otx2_sync_tstamp(struct work_struct *work)
tstamp = otx2_ptp_get_clock(ptp);
mutex_unlock(&pfvf->mbox.lock);
- ptp->tstamp = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
+ ptp->tstamp = ptp->ptp_tstamp2nsec(&ptp->time_counter, tstamp);
ptp->base_ns = tstamp % NSEC_PER_SEC;
schedule_delayed_work(&ptp->synctstamp_work, msecs_to_jiffies(250));
@@ -302,15 +402,6 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
ptp_ptr->nic = pfvf;
- cc = &ptp_ptr->cycle_counter;
- cc->read = ptp_cc_read;
- cc->mask = CYCLECOUNTER_MASK(64);
- cc->mult = 1;
- cc->shift = 0;
-
- timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter,
- ktime_to_ns(ktime_get_real()));
-
snprintf(ptp_ptr->extts_config.name, sizeof(ptp_ptr->extts_config.name), "TSTAMP");
ptp_ptr->extts_config.index = 0;
ptp_ptr->extts_config.func = PTP_PF_NONE;
@@ -324,13 +415,33 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
.pps = 0,
.pin_config = &ptp_ptr->extts_config,
.adjfine = otx2_ptp_adjfine,
- .adjtime = otx2_ptp_adjtime,
- .gettime64 = otx2_ptp_gettime,
- .settime64 = otx2_ptp_settime,
.enable = otx2_ptp_enable,
.verify = otx2_ptp_verify_pin,
};
+ /* Check whether hardware supports atomic updates to timestamp */
+ if (is_tstmp_atomic_update_supported(ptp_ptr)) {
+ ptp_ptr->ptp_info.adjtime = otx2_ptp_hw_adjtime;
+ ptp_ptr->ptp_info.gettime64 = otx2_ptp_hw_gettime;
+ ptp_ptr->ptp_info.settime64 = otx2_ptp_hw_settime;
+
+ ptp_ptr->ptp_tstamp2nsec = otx2_ptp_hw_tstamp2time;
+ } else {
+ ptp_ptr->ptp_info.adjtime = otx2_ptp_tc_adjtime;
+ ptp_ptr->ptp_info.gettime64 = otx2_ptp_tc_gettime;
+ ptp_ptr->ptp_info.settime64 = otx2_ptp_tc_settime;
+
+ cc = &ptp_ptr->cycle_counter;
+ cc->read = ptp_cc_read;
+ cc->mask = CYCLECOUNTER_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+ ptp_ptr->ptp_tstamp2nsec = timecounter_cyc2time;
+
+ timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter,
+ ktime_to_ns(ktime_get_real()));
+ }
+
INIT_DELAYED_WORK(&ptp_ptr->extts_work, otx2_ptp_extts_check);
ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev);
@@ -387,7 +498,7 @@ int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
if (!pfvf->ptp)
return -ENODEV;
- *tsns = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
+ *tsns = pfvf->ptp->ptp_tstamp2nsec(&pfvf->ptp->time_counter, tstamp);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 5e56b6c3e60a..fab9d85bfb37 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -34,9 +34,8 @@ struct otx2_tc_flow_stats {
};
struct otx2_tc_flow {
- struct rhash_head node;
+ struct list_head list;
unsigned long cookie;
- unsigned int bitpos;
struct rcu_head rcu;
struct otx2_tc_flow_stats stats;
spinlock_t lock; /* lock for stats */
@@ -44,31 +43,10 @@ struct otx2_tc_flow {
u16 entry;
u16 leaf_profile;
bool is_act_police;
+ u32 prio;
+ struct npc_install_flow_req req;
};
-int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
-{
- struct otx2_tc_info *tc = &nic->tc_info;
-
- if (!nic->flow_cfg->max_flows)
- return 0;
-
- /* Max flows changed, free the existing bitmap */
- kfree(tc->tc_entries_bitmap);
-
- tc->tc_entries_bitmap =
- kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows),
- sizeof(long), GFP_KERNEL);
- if (!tc->tc_entries_bitmap) {
- netdev_err(nic->netdev,
- "Unable to alloc TC flow entries bitmap\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
-
static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
u32 *burst_exp, u32 *burst_mantissa)
{
@@ -461,6 +439,62 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
return 0;
}
+static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec,
+ struct flow_msg *flow_mask, struct flow_rule *rule,
+ struct npc_install_flow_req *req, bool is_inner)
+{
+ struct flow_match_vlan match;
+ u16 vlan_tci, vlan_tci_mask;
+
+ if (is_inner)
+ flow_rule_match_cvlan(rule, &match);
+ else
+ flow_rule_match_vlan(rule, &match);
+
+ if (!eth_type_vlan(match.key->vlan_tpid)) {
+ netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
+ ntohs(match.key->vlan_tpid));
+ return -EOPNOTSUPP;
+ }
+
+ if (!match.mask->vlan_id) {
+ struct flow_action_entry *act;
+ int i;
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id == FLOW_ACTION_DROP) {
+ netdev_err(nic->netdev,
+ "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n",
+ ntohs(match.key->vlan_tpid), match.key->vlan_id);
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+
+ if (match.mask->vlan_id ||
+ match.mask->vlan_dei ||
+ match.mask->vlan_priority) {
+ vlan_tci = match.key->vlan_id |
+ match.key->vlan_dei << 12 |
+ match.key->vlan_priority << 13;
+
+ vlan_tci_mask = match.mask->vlan_id |
+ match.mask->vlan_dei << 12 |
+ match.mask->vlan_priority << 13;
+ if (is_inner) {
+ flow_spec->vlan_itci = htons(vlan_tci);
+ flow_mask->vlan_itci = htons(vlan_tci_mask);
+ req->features |= BIT_ULL(NPC_INNER_VID);
+ } else {
+ flow_spec->vlan_tci = htons(vlan_tci);
+ flow_mask->vlan_tci = htons(vlan_tci_mask);
+ req->features |= BIT_ULL(NPC_OUTER_VID);
+ }
+ }
+
+ return 0;
+}
+
static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
struct flow_cls_offload *f,
struct npc_install_flow_req *req)
@@ -476,15 +510,17 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
dissector = rule->match.dissector;
if ((dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_VLAN) |
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_IP)))) {
- netdev_info(nic->netdev, "unsupported flow used key 0x%x",
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_CVLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_IPSEC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) {
+ netdev_info(nic->netdev, "unsupported flow used key 0x%llx",
dissector->used_keys);
return -EOPNOTSUPP;
}
@@ -504,6 +540,8 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
match.key->ip_proto != IPPROTO_UDP &&
match.key->ip_proto != IPPROTO_SCTP &&
match.key->ip_proto != IPPROTO_ICMP &&
+ match.key->ip_proto != IPPROTO_ESP &&
+ match.key->ip_proto != IPPROTO_AH &&
match.key->ip_proto != IPPROTO_ICMPV6)) {
netdev_info(nic->netdev,
"ip_proto=0x%x not supported\n",
@@ -523,6 +561,10 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
else if (ip_proto == IPPROTO_ICMPV6)
req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
+ else if (ip_proto == IPPROTO_ESP)
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
+ else if (ip_proto == IPPROTO_AH)
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
@@ -567,6 +609,26 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) {
+ struct flow_match_ipsec match;
+
+ flow_rule_match_ipsec(rule, &match);
+ if (!match.mask->spi) {
+ NL_SET_ERR_MSG_MOD(extack, "spi index not specified");
+ return -EOPNOTSUPP;
+ }
+ if (ip_proto != IPPROTO_ESP &&
+ ip_proto != IPPROTO_AH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "SPI index is valid only for ESP/AH proto");
+ return -EOPNOTSUPP;
+ }
+
+ flow_spec->spi = match.key->spi;
+ flow_mask->spi = match.mask->spi;
+ req->features |= BIT_ULL(NPC_IPSEC_SPI);
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
struct flow_match_ip match;
@@ -586,47 +648,19 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_match_vlan match;
- u16 vlan_tci, vlan_tci_mask;
-
- flow_rule_match_vlan(rule, &match);
-
- if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) {
- netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
- ntohs(match.key->vlan_tpid));
- return -EOPNOTSUPP;
- }
+ int ret;
- if (!match.mask->vlan_id) {
- struct flow_action_entry *act;
- int i;
-
- flow_action_for_each(i, act, &rule->action) {
- if (act->id == FLOW_ACTION_DROP) {
- netdev_err(nic->netdev,
- "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n",
- ntohs(match.key->vlan_tpid),
- match.key->vlan_id);
- return -EOPNOTSUPP;
- }
- }
- }
-
- if (match.mask->vlan_id ||
- match.mask->vlan_dei ||
- match.mask->vlan_priority) {
- vlan_tci = match.key->vlan_id |
- match.key->vlan_dei << 12 |
- match.key->vlan_priority << 13;
+ ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false);
+ if (ret)
+ return ret;
+ }
- vlan_tci_mask = match.mask->vlan_id |
- match.mask->vlan_dei << 12 |
- match.mask->vlan_priority << 13;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
+ int ret;
- flow_spec->vlan_tci = htons(vlan_tci);
- flow_mask->vlan_tci = htons(vlan_tci_mask);
- req->features |= BIT_ULL(NPC_OUTER_VID);
- }
+ ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true);
+ if (ret)
+ return ret;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
@@ -707,8 +741,117 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
}
-static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
+static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct otx2_tc_flow *iter, *tmp;
+
+ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
+ return;
+
+ list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ flow_cfg->nr_flows--;
+ }
+}
+
+static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg,
+ unsigned long cookie)
{
+ struct otx2_tc_flow *tmp;
+
+ list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
+ if (tmp->cookie == cookie)
+ return tmp;
+ }
+
+ return NULL;
+}
+
+static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg,
+ int index)
+{
+ struct otx2_tc_flow *tmp;
+ int i = 0;
+
+ list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
+ if (i == index)
+ return tmp;
+ i++;
+ }
+
+ return NULL;
+}
+
+static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node)
+{
+ struct list_head *pos, *n;
+ struct otx2_tc_flow *tmp;
+
+ list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
+ tmp = list_entry(pos, struct otx2_tc_flow, list);
+ if (node == tmp) {
+ list_del(&node->list);
+ return;
+ }
+ }
+}
+
+static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node)
+{
+ struct list_head *pos, *n;
+ struct otx2_tc_flow *tmp;
+ int index = 0;
+
+ /* If the flow list is empty then add the new node */
+ if (list_empty(&flow_cfg->flow_list_tc)) {
+ list_add(&node->list, &flow_cfg->flow_list_tc);
+ return index;
+ }
+
+ list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
+ tmp = list_entry(pos, struct otx2_tc_flow, list);
+ if (node->prio < tmp->prio)
+ break;
+ index++;
+ }
+
+ list_add(&node->list, pos->prev);
+ return index;
+}
+
+static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req)
+{
+ struct npc_install_flow_req *tmp_req;
+ int err;
+
+ mutex_lock(&nic->mbox.lock);
+ tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
+ if (!tmp_req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ memcpy(tmp_req, req, sizeof(struct npc_install_flow_req));
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&nic->mbox);
+ if (err) {
+ netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n",
+ req->entry);
+ mutex_unlock(&nic->mbox.lock);
+ return -EFAULT;
+ }
+
+ mutex_unlock(&nic->mbox.lock);
+ return 0;
+}
+
+static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val)
+{
+ struct npc_delete_flow_rsp *rsp;
struct npc_delete_flow_req *req;
int err;
@@ -729,22 +872,113 @@ static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
mutex_unlock(&nic->mbox.lock);
return -EFAULT;
}
+
+ if (cntr_val) {
+ rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n",
+ entry);
+ mutex_unlock(&nic->mbox.lock);
+ return -EFAULT;
+ }
+
+ *cntr_val = rsp->cntr_val;
+ }
+
mutex_unlock(&nic->mbox.lock);
+ return 0;
+}
+
+static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic,
+ struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node)
+{
+ struct list_head *pos, *n;
+ struct otx2_tc_flow *tmp;
+ int i = 0, index = 0;
+ u16 cntr_val = 0;
+
+ /* Find and delete the entry from the list and re-install
+ * all the entries from beginning to the index of the
+ * deleted entry to higher mcam indexes.
+ */
+ list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
+ tmp = list_entry(pos, struct otx2_tc_flow, list);
+ if (node == tmp) {
+ list_del(&tmp->list);
+ break;
+ }
+
+ otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
+ tmp->entry++;
+ tmp->req.entry = tmp->entry;
+ tmp->req.cntr_val = cntr_val;
+ index++;
+ }
+
+ list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
+ if (i == index)
+ break;
+
+ tmp = list_entry(pos, struct otx2_tc_flow, list);
+ otx2_add_mcam_flow_entry(nic, &tmp->req);
+ i++;
+ }
return 0;
}
+static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic,
+ struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node)
+{
+ int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1;
+ struct otx2_tc_flow *tmp;
+ int list_idx, i;
+ u16 cntr_val = 0;
+
+ /* Find the index of the entry(list_idx) whose priority
+ * is greater than the new entry and re-install all
+ * the entries from beginning to list_idx to higher
+ * mcam indexes.
+ */
+ list_idx = otx2_tc_add_to_flow_list(flow_cfg, node);
+ for (i = 0; i < list_idx; i++) {
+ tmp = otx2_tc_get_entry_by_index(flow_cfg, i);
+ if (!tmp)
+ return -ENOMEM;
+
+ otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
+ tmp->entry = flow_cfg->flow_ent[mcam_idx];
+ tmp->req.entry = tmp->entry;
+ tmp->req.cntr_val = cntr_val;
+ otx2_add_mcam_flow_entry(nic, &tmp->req);
+ mcam_idx++;
+ }
+
+ return mcam_idx;
+}
+
+static int otx2_tc_update_mcam_table(struct otx2_nic *nic,
+ struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node,
+ bool add_req)
+{
+ if (add_req)
+ return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node);
+
+ return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node);
+}
+
static int otx2_tc_del_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
struct otx2_flow_config *flow_cfg = nic->flow_cfg;
- struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *flow_node;
int err;
- flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
- &tc_flow_cmd->cookie,
- tc_info->flow_ht_params);
+ flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
if (!flow_node) {
netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
tc_flow_cmd->cookie);
@@ -772,16 +1006,10 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
mutex_unlock(&nic->mbox.lock);
}
- otx2_del_mcam_flow_entry(nic, flow_node->entry);
-
- WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
- &flow_node->node,
- nic->tc_info.flow_ht_params));
+ otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
+ otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
kfree_rcu(flow_node, rcu);
-
- clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
flow_cfg->nr_flows--;
-
return 0;
}
@@ -790,15 +1018,14 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
{
struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
struct otx2_flow_config *flow_cfg = nic->flow_cfg;
- struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *new_node, *old_node;
struct npc_install_flow_req *req, dummy;
- int rc, err;
+ int rc, err, mcam_idx;
if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
return -ENOMEM;
- if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) {
+ if (flow_cfg->nr_flows == flow_cfg->max_flows) {
NL_SET_ERR_MSG_MOD(extack,
"Free MCAM entry not available to add the flow");
return -ENOMEM;
@@ -810,6 +1037,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
return -ENOMEM;
spin_lock_init(&new_node->lock);
new_node->cookie = tc_flow_cmd->cookie;
+ new_node->prio = tc_flow_cmd->common.prio;
memset(&dummy, 0, sizeof(struct npc_install_flow_req));
@@ -820,12 +1048,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
}
/* If a flow exists with the same cookie, delete it */
- old_node = rhashtable_lookup_fast(&tc_info->flow_table,
- &tc_flow_cmd->cookie,
- tc_info->flow_ht_params);
+ old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
if (old_node)
otx2_tc_del_flow(nic, tc_flow_cmd);
+ mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true);
mutex_lock(&nic->mbox.lock);
req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
if (!req) {
@@ -836,11 +1063,8 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
-
- new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
- flow_cfg->max_flows);
req->channel = nic->hw.rx_chan_base;
- req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1];
+ req->entry = flow_cfg->flow_ent[mcam_idx];
req->intf = NIX_INTF_RX;
req->set_cntr = 1;
new_node->entry = req->entry;
@@ -850,26 +1074,18 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
mutex_unlock(&nic->mbox.lock);
- kfree_rcu(new_node, rcu);
goto free_leaf;
}
- mutex_unlock(&nic->mbox.lock);
- /* add new flow to flow-table */
- rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node,
- nic->tc_info.flow_ht_params);
- if (rc) {
- otx2_del_mcam_flow_entry(nic, req->entry);
- kfree_rcu(new_node, rcu);
- goto free_leaf;
- }
+ mutex_unlock(&nic->mbox.lock);
+ memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req));
- set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
flow_cfg->nr_flows++;
-
return 0;
free_leaf:
+ otx2_tc_del_from_flow_list(flow_cfg, new_node);
+ kfree_rcu(new_node, rcu);
if (new_node->is_act_police) {
mutex_lock(&nic->mbox.lock);
@@ -896,16 +1112,13 @@ free_leaf:
static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
- struct otx2_tc_info *tc_info = &nic->tc_info;
struct npc_mcam_get_stats_req *req;
struct npc_mcam_get_stats_rsp *rsp;
struct otx2_tc_flow_stats *stats;
struct otx2_tc_flow *flow_node;
int err;
- flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
- &tc_flow_cmd->cookie,
- tc_info->flow_ht_params);
+ flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie);
if (!flow_node) {
netdev_info(nic->netdev, "tc flow not found for cookie %lx",
tc_flow_cmd->cookie);
@@ -1053,12 +1266,20 @@ static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct otx2_nic *nic = cb_priv;
+ bool ntuple;
if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
return -EOPNOTSUPP;
+ ntuple = nic->netdev->features & NETIF_F_NTUPLE;
switch (type) {
case TC_SETUP_CLSFLOWER:
+ if (ntuple) {
+ netdev_warn(nic->netdev,
+ "Can't install TC flower offload rule when NTUPLE is active");
+ return -EOPNOTSUPP;
+ }
+
return otx2_setup_tc_cls_flower(nic, type_data);
case TC_SETUP_CLSMATCHALL:
return otx2_setup_tc_ingress_matchall(nic, type_data);
@@ -1143,18 +1364,8 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
}
EXPORT_SYMBOL(otx2_setup_tc);
-static const struct rhashtable_params tc_flow_ht_params = {
- .head_offset = offsetof(struct otx2_tc_flow, node),
- .key_offset = offsetof(struct otx2_tc_flow, cookie),
- .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie),
- .automatic_shrinking = true,
-};
-
int otx2_init_tc(struct otx2_nic *nic)
{
- struct otx2_tc_info *tc = &nic->tc_info;
- int err;
-
/* Exclude receive queue 0 being used for police action */
set_bit(0, &nic->rq_bmap);
@@ -1164,25 +1375,12 @@ int otx2_init_tc(struct otx2_nic *nic)
return -EINVAL;
}
- err = otx2_tc_alloc_ent_bitmap(nic);
- if (err)
- return err;
-
- tc->flow_ht_params = tc_flow_ht_params;
- err = rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
- if (err) {
- kfree(tc->tc_entries_bitmap);
- tc->tc_entries_bitmap = NULL;
- }
- return err;
+ return 0;
}
EXPORT_SYMBOL(otx2_init_tc);
void otx2_shutdown_tc(struct otx2_nic *nic)
{
- struct otx2_tc_info *tc = &nic->tc_info;
-
- kfree(tc->tc_entries_bitmap);
- rhashtable_destroy(&tc->flow_table);
+ otx2_destroy_tc_flow_list(nic);
}
EXPORT_SYMBOL(otx2_shutdown_tc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index d3a76c5ccda8..1e77bbf5d22a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -19,6 +19,9 @@
#define OTX2_QOS_CLASS_NONE 0
#define OTX2_QOS_DEFAULT_PRIO 0xF
#define OTX2_QOS_INVALID_SQ 0xFFFF
+#define OTX2_QOS_INVALID_TXSCHQ_IDX 0xFFFF
+#define CN10K_MAX_RR_WEIGHT GENMASK_ULL(13, 0)
+#define OTX2_MAX_RR_QUANTUM GENMASK_ULL(23, 0)
static void otx2_qos_update_tx_netdev_queues(struct otx2_nic *pfvf)
{
@@ -65,11 +68,24 @@ static void otx2_qos_get_regaddr(struct otx2_qos_node *node,
}
}
+static int otx2_qos_quantum_to_dwrr_weight(struct otx2_nic *pfvf, u32 quantum)
+{
+ u32 weight;
+
+ weight = quantum / pfvf->hw.dwrr_mtu;
+ if (quantum % pfvf->hw.dwrr_mtu)
+ weight += 1;
+
+ return weight;
+}
+
static void otx2_config_sched_shaping(struct otx2_nic *pfvf,
struct otx2_qos_node *node,
struct nix_txschq_config *cfg,
int *num_regs)
{
+ u32 rr_weight;
+ u32 quantum;
u64 maxrate;
otx2_qos_get_regaddr(node, cfg, *num_regs);
@@ -86,8 +102,17 @@ static void otx2_config_sched_shaping(struct otx2_nic *pfvf,
return;
}
- /* configure priority */
- cfg->regval[*num_regs] = (node->schq - node->parent->prio_anchor) << 24;
+ /* configure priority/quantum */
+ if (node->is_static) {
+ cfg->regval[*num_regs] =
+ (node->schq - node->parent->prio_anchor) << 24;
+ } else {
+ quantum = node->quantum ?
+ node->quantum : pfvf->tx_max_pktlen;
+ rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
+ cfg->regval[*num_regs] = node->parent->child_dwrr_prio << 24 |
+ rr_weight;
+ }
(*num_regs)++;
/* configure PIR */
@@ -195,9 +220,8 @@ static int otx2_qos_txschq_set_parent_topology(struct otx2_nic *pfvf,
cfg->reg[0] = NIX_AF_TL1X_TOPOLOGY(parent->schq);
cfg->regval[0] = (u64)parent->prio_anchor << 32;
- if (parent->level == NIX_TXSCH_LVL_TL1)
- cfg->regval[0] |= (u64)TXSCH_TL1_DFLT_RR_PRIO << 1;
-
+ cfg->regval[0] |= ((parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) ?
+ parent->child_dwrr_prio : 0) << 1;
cfg->num_regs++;
rc = otx2_sync_mbox_msg(&pfvf->mbox);
@@ -315,9 +339,14 @@ static void otx2_qos_fill_cfg_tl(struct otx2_qos_node *parent,
list_for_each_entry(node, &parent->child_list, list) {
otx2_qos_fill_cfg_tl(node, cfg);
- cfg->schq_contig[node->level]++;
otx2_qos_fill_cfg_schq(node, cfg);
}
+
+ /* Assign the required number of transmit schedular queues under the
+ * given class
+ */
+ cfg->schq_contig[parent->level - 1] += parent->child_dwrr_cnt +
+ parent->max_static_prio + 1;
}
static void otx2_qos_prepare_txschq_cfg(struct otx2_nic *pfvf,
@@ -378,10 +407,12 @@ otx2_qos_alloc_root(struct otx2_nic *pfvf)
return ERR_PTR(-ENOMEM);
node->parent = NULL;
- if (!is_otx2_vf(pfvf->pcifunc))
+ if (!is_otx2_vf(pfvf->pcifunc)) {
node->level = NIX_TXSCH_LVL_TL1;
- else
+ } else {
node->level = NIX_TXSCH_LVL_TL2;
+ node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
+ }
WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
node->classid = OTX2_QOS_ROOT_CLASSID;
@@ -401,9 +432,13 @@ static int otx2_qos_add_child_node(struct otx2_qos_node *parent,
struct otx2_qos_node *tmp_node;
struct list_head *tmp;
+ if (node->prio > parent->max_static_prio)
+ parent->max_static_prio = node->prio;
+
for (tmp = head->next; tmp != head; tmp = tmp->next) {
tmp_node = list_entry(tmp, struct otx2_qos_node, list);
- if (tmp_node->prio == node->prio)
+ if (tmp_node->prio == node->prio &&
+ tmp_node->is_static)
return -EEXIST;
if (tmp_node->prio > node->prio) {
list_add_tail(&node->list, tmp);
@@ -434,6 +469,10 @@ static int otx2_qos_alloc_txschq_node(struct otx2_nic *pfvf,
txschq_node->rate = 0;
txschq_node->ceil = 0;
txschq_node->prio = 0;
+ txschq_node->quantum = 0;
+ txschq_node->is_static = true;
+ txschq_node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
+ txschq_node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
mutex_lock(&pfvf->qos.qos_lock);
list_add_tail(&txschq_node->list, &node->child_schq_list);
@@ -459,7 +498,7 @@ static struct otx2_qos_node *
otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf,
struct otx2_qos_node *parent,
u16 classid, u32 prio, u64 rate, u64 ceil,
- u16 qid)
+ u32 quantum, u16 qid, bool static_cfg)
{
struct otx2_qos_node *node;
int err;
@@ -476,6 +515,10 @@ otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf,
node->rate = otx2_convert_rate(rate);
node->ceil = otx2_convert_rate(ceil);
node->prio = prio;
+ node->quantum = quantum;
+ node->is_static = static_cfg;
+ node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
+ node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
__set_bit(qid, pfvf->qos.qos_sq_bmap);
@@ -622,12 +665,28 @@ static int otx2_qos_txschq_alloc(struct otx2_nic *pfvf,
}
pfvf->qos.link_cfg_lvl = rsp->link_cfg_lvl;
+ pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
out:
mutex_unlock(&mbox->lock);
return rc;
}
+static void otx2_qos_free_unused_txschq(struct otx2_nic *pfvf,
+ struct otx2_qos_cfg *cfg)
+{
+ int lvl, idx, schq;
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
+ if (!cfg->schq_index_used[lvl][idx]) {
+ schq = cfg->schq_contig_list[lvl][idx];
+ otx2_txschq_free_one(pfvf, lvl, schq);
+ }
+ }
+ }
+}
+
static void otx2_qos_txschq_fill_cfg_schq(struct otx2_nic *pfvf,
struct otx2_qos_node *node,
struct otx2_qos_cfg *cfg)
@@ -652,9 +711,11 @@ static void otx2_qos_txschq_fill_cfg_tl(struct otx2_nic *pfvf,
list_for_each_entry(tmp, &node->child_list, list) {
otx2_qos_txschq_fill_cfg_tl(pfvf, tmp, cfg);
cnt = cfg->static_node_pos[tmp->level];
- tmp->schq = cfg->schq_contig_list[tmp->level][cnt];
+ tmp->schq = cfg->schq_contig_list[tmp->level][tmp->txschq_idx];
+ cfg->schq_index_used[tmp->level][tmp->txschq_idx] = true;
if (cnt == 0)
- node->prio_anchor = tmp->schq;
+ node->prio_anchor =
+ cfg->schq_contig_list[tmp->level][0];
cfg->static_node_pos[tmp->level]++;
otx2_qos_txschq_fill_cfg_schq(pfvf, tmp, cfg);
}
@@ -667,9 +728,87 @@ static void otx2_qos_txschq_fill_cfg(struct otx2_nic *pfvf,
mutex_lock(&pfvf->qos.qos_lock);
otx2_qos_txschq_fill_cfg_tl(pfvf, node, cfg);
otx2_qos_txschq_fill_cfg_schq(pfvf, node, cfg);
+ otx2_qos_free_unused_txschq(pfvf, cfg);
mutex_unlock(&pfvf->qos.qos_lock);
}
+static void __otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
+ struct otx2_qos_node *tmp,
+ unsigned long *child_idx_bmap,
+ int child_cnt)
+{
+ int idx;
+
+ if (tmp->txschq_idx != OTX2_QOS_INVALID_TXSCHQ_IDX)
+ return;
+
+ /* assign static nodes 1:1 prio mapping first, then remaining nodes */
+ for (idx = 0; idx < child_cnt; idx++) {
+ if (tmp->is_static && tmp->prio == idx &&
+ !test_bit(idx, child_idx_bmap)) {
+ tmp->txschq_idx = idx;
+ set_bit(idx, child_idx_bmap);
+ return;
+ } else if (!tmp->is_static && idx >= tmp->prio &&
+ !test_bit(idx, child_idx_bmap)) {
+ tmp->txschq_idx = idx;
+ set_bit(idx, child_idx_bmap);
+ return;
+ }
+ }
+}
+
+static int otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node)
+{
+ unsigned long *child_idx_bmap;
+ struct otx2_qos_node *tmp;
+ int child_cnt;
+
+ list_for_each_entry(tmp, &node->child_list, list)
+ tmp->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
+
+ /* allocate child index array */
+ child_cnt = node->child_dwrr_cnt + node->max_static_prio + 1;
+ child_idx_bmap = kcalloc(BITS_TO_LONGS(child_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!child_idx_bmap)
+ return -ENOMEM;
+
+ list_for_each_entry(tmp, &node->child_list, list)
+ otx2_qos_assign_base_idx_tl(pfvf, tmp);
+
+ /* assign base index of static priority children first */
+ list_for_each_entry(tmp, &node->child_list, list) {
+ if (!tmp->is_static)
+ continue;
+ __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
+ child_cnt);
+ }
+
+ /* assign base index of dwrr priority children */
+ list_for_each_entry(tmp, &node->child_list, list)
+ __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
+ child_cnt);
+
+ kfree(child_idx_bmap);
+
+ return 0;
+}
+
+static int otx2_qos_assign_base_idx(struct otx2_nic *pfvf,
+ struct otx2_qos_node *node)
+{
+ int ret = 0;
+
+ mutex_lock(&pfvf->qos.qos_lock);
+ ret = otx2_qos_assign_base_idx_tl(pfvf, node);
+ mutex_unlock(&pfvf->qos.qos_lock);
+
+ return ret;
+}
+
static int otx2_qos_txschq_push_cfg_schq(struct otx2_nic *pfvf,
struct otx2_qos_node *node,
struct otx2_qos_cfg *cfg)
@@ -761,8 +900,10 @@ static void otx2_qos_free_cfg(struct otx2_nic *pfvf, struct otx2_qos_cfg *cfg)
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
- schq = cfg->schq_contig_list[lvl][idx];
- otx2_txschq_free_one(pfvf, lvl, schq);
+ if (cfg->schq_index_used[lvl][idx]) {
+ schq = cfg->schq_contig_list[lvl][idx];
+ otx2_txschq_free_one(pfvf, lvl, schq);
+ }
}
}
}
@@ -838,6 +979,10 @@ static int otx2_qos_push_txschq_cfg(struct otx2_nic *pfvf,
if (ret)
return -ENOSPC;
+ ret = otx2_qos_assign_base_idx(pfvf, node);
+ if (ret)
+ return -ENOMEM;
+
if (!(pfvf->netdev->flags & IFF_UP)) {
otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
return 0;
@@ -894,6 +1039,13 @@ static int otx2_qos_root_add(struct otx2_nic *pfvf, u16 htb_maj_id, u16 htb_defc
goto free_root_node;
}
+ /* Update TL1 RR PRIO */
+ if (root->level == NIX_TXSCH_LVL_TL1) {
+ root->child_dwrr_prio = pfvf->hw.txschq_aggr_lvl_rr_prio;
+ netdev_dbg(pfvf->netdev,
+ "TL1 DWRR Priority %d\n", root->child_dwrr_prio);
+ }
+
if (!(pfvf->netdev->flags & IFF_UP) ||
root->level == NIX_TXSCH_LVL_TL1) {
root->schq = new_cfg->schq_list[root->level][0];
@@ -940,37 +1092,126 @@ static int otx2_qos_root_destroy(struct otx2_nic *pfvf)
return 0;
}
+static int otx2_qos_validate_quantum(struct otx2_nic *pfvf, u32 quantum)
+{
+ u32 rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
+ int err = 0;
+
+ /* Max Round robin weight supported by octeontx2 and CN10K
+ * is different. Validate accordingly
+ */
+ if (is_dev_otx2(pfvf->pdev))
+ err = (rr_weight > OTX2_MAX_RR_QUANTUM) ? -EINVAL : 0;
+ else if (rr_weight > CN10K_MAX_RR_WEIGHT)
+ err = -EINVAL;
+
+ return err;
+}
+
+static int otx2_qos_validate_dwrr_cfg(struct otx2_qos_node *parent,
+ struct netlink_ext_ack *extack,
+ struct otx2_nic *pfvf,
+ u64 prio, u64 quantum)
+{
+ int err;
+
+ err = otx2_qos_validate_quantum(pfvf, quantum);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported quantum value");
+ return err;
+ }
+
+ if (parent->child_dwrr_prio == OTX2_QOS_DEFAULT_PRIO) {
+ parent->child_dwrr_prio = prio;
+ } else if (prio != parent->child_dwrr_prio) {
+ NL_SET_ERR_MSG_MOD(extack, "Only one DWRR group is allowed");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int otx2_qos_validate_configuration(struct otx2_qos_node *parent,
struct netlink_ext_ack *extack,
struct otx2_nic *pfvf,
- u64 prio)
+ u64 prio, bool static_cfg)
{
- if (test_bit(prio, parent->prio_bmap)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Static priority child with same priority exists");
+ if (prio == parent->child_dwrr_prio && static_cfg) {
+ NL_SET_ERR_MSG_MOD(extack, "DWRR child group with same priority exists");
return -EEXIST;
}
- if (prio == TXSCH_TL1_DFLT_RR_PRIO) {
+ if (static_cfg && test_bit(prio, parent->prio_bmap)) {
NL_SET_ERR_MSG_MOD(extack,
- "Priority is reserved for Round Robin");
- return -EINVAL;
+ "Static priority child with same priority exists");
+ return -EEXIST;
}
return 0;
}
+static void otx2_reset_dwrr_prio(struct otx2_qos_node *parent, u64 prio)
+{
+ /* For PF, root node dwrr priority is static */
+ if (parent->level == NIX_TXSCH_LVL_TL1)
+ return;
+
+ if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) {
+ parent->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
+ clear_bit(prio, parent->prio_bmap);
+ }
+}
+
+static bool is_qos_node_dwrr(struct otx2_qos_node *parent,
+ struct otx2_nic *pfvf,
+ u64 prio)
+{
+ struct otx2_qos_node *node;
+ bool ret = false;
+
+ if (parent->child_dwrr_prio == prio)
+ return true;
+
+ mutex_lock(&pfvf->qos.qos_lock);
+ list_for_each_entry(node, &parent->child_list, list) {
+ if (prio == node->prio) {
+ if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO &&
+ parent->child_dwrr_prio != prio)
+ continue;
+
+ if (otx2_qos_validate_quantum(pfvf, node->quantum)) {
+ netdev_err(pfvf->netdev,
+ "Unsupported quantum value for existing classid=0x%x quantum=%d prio=%d",
+ node->classid, node->quantum,
+ node->prio);
+ break;
+ }
+ /* mark old node as dwrr */
+ node->is_static = false;
+ parent->child_dwrr_cnt++;
+ parent->child_static_cnt--;
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&pfvf->qos.qos_lock);
+
+ return ret;
+}
+
static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid,
u32 parent_classid, u64 rate, u64 ceil,
- u64 prio, struct netlink_ext_ack *extack)
+ u64 prio, u32 quantum,
+ struct netlink_ext_ack *extack)
{
struct otx2_qos_cfg *old_cfg, *new_cfg;
struct otx2_qos_node *node, *parent;
int qid, ret, err;
+ bool static_cfg;
netdev_dbg(pfvf->netdev,
- "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld\n",
- classid, parent_classid, rate, ceil, prio);
+ "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld quantum=%d\n",
+ classid, parent_classid, rate, ceil, prio, quantum);
if (prio > OTX2_QOS_MAX_PRIO) {
NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7");
@@ -978,6 +1219,12 @@ static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid,
goto out;
}
+ if (!quantum || quantum > INT_MAX) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
/* get parent node */
parent = otx2_sw_node_find(pfvf, parent_classid);
if (!parent) {
@@ -991,10 +1238,24 @@ static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid,
goto out;
}
- ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio);
+ static_cfg = !is_qos_node_dwrr(parent, pfvf, prio);
+ ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio,
+ static_cfg);
if (ret)
goto out;
+ if (!static_cfg) {
+ ret = otx2_qos_validate_dwrr_cfg(parent, extack, pfvf, prio,
+ quantum);
+ if (ret)
+ goto out;
+ }
+
+ if (static_cfg)
+ parent->child_static_cnt++;
+ else
+ parent->child_dwrr_cnt++;
+
set_bit(prio, parent->prio_bmap);
/* read current txschq configuration */
@@ -1019,7 +1280,7 @@ static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid,
/* allocate and initialize a new child node */
node = otx2_qos_sw_create_leaf_node(pfvf, parent, classid, prio, rate,
- ceil, qid);
+ ceil, quantum, qid, static_cfg);
if (IS_ERR(node)) {
NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
ret = PTR_ERR(node);
@@ -1067,6 +1328,11 @@ free_node:
free_old_cfg:
kfree(old_cfg);
reset_prio:
+ if (static_cfg)
+ parent->child_static_cnt--;
+ else
+ parent->child_dwrr_cnt--;
+
clear_bit(prio, parent->prio_bmap);
out:
return ret;
@@ -1074,10 +1340,11 @@ out:
static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
u16 child_classid, u64 rate, u64 ceil, u64 prio,
- struct netlink_ext_ack *extack)
+ u32 quantum, struct netlink_ext_ack *extack)
{
struct otx2_qos_cfg *old_cfg, *new_cfg;
struct otx2_qos_node *node, *child;
+ bool static_cfg;
int ret, err;
u16 qid;
@@ -1091,6 +1358,12 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
goto out;
}
+ if (!quantum || quantum > INT_MAX) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
/* find node related to classid */
node = otx2_sw_node_find(pfvf, classid);
if (!node) {
@@ -1105,6 +1378,19 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
goto out;
}
+ static_cfg = !is_qos_node_dwrr(node, pfvf, prio);
+ if (!static_cfg) {
+ ret = otx2_qos_validate_dwrr_cfg(node, extack, pfvf, prio,
+ quantum);
+ if (ret)
+ goto out;
+ }
+
+ if (static_cfg)
+ node->child_static_cnt++;
+ else
+ node->child_dwrr_cnt++;
+
set_bit(prio, node->prio_bmap);
/* store the qid to assign to leaf node */
@@ -1127,7 +1413,8 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
/* allocate and initialize a new child node */
child = otx2_qos_sw_create_leaf_node(pfvf, node, child_classid,
- prio, rate, ceil, qid);
+ prio, rate, ceil, quantum,
+ qid, static_cfg);
if (IS_ERR(child)) {
NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
ret = PTR_ERR(child);
@@ -1178,6 +1465,10 @@ free_node:
free_old_cfg:
kfree(old_cfg);
reset_prio:
+ if (static_cfg)
+ node->child_static_cnt--;
+ else
+ node->child_dwrr_cnt--;
clear_bit(prio, node->prio_bmap);
out:
return ret;
@@ -1187,6 +1478,7 @@ static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid,
struct netlink_ext_ack *extack)
{
struct otx2_qos_node *node, *parent;
+ int dwrr_del_node = false;
u64 prio;
u16 qid;
@@ -1202,12 +1494,27 @@ static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid,
prio = node->prio;
qid = node->qid;
+ if (!node->is_static)
+ dwrr_del_node = true;
+
otx2_qos_disable_sq(pfvf, node->qid);
otx2_qos_destroy_node(pfvf, node);
pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
- clear_bit(prio, parent->prio_bmap);
+ if (dwrr_del_node) {
+ parent->child_dwrr_cnt--;
+ } else {
+ parent->child_static_cnt--;
+ clear_bit(prio, parent->prio_bmap);
+ }
+
+ /* Reset DWRR priority if all dwrr nodes are deleted */
+ if (!parent->child_dwrr_cnt)
+ otx2_reset_dwrr_prio(parent, prio);
+
+ if (!parent->child_static_cnt)
+ parent->max_static_prio = 0;
return 0;
}
@@ -1217,6 +1524,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
{
struct otx2_qos_node *node, *parent;
struct otx2_qos_cfg *new_cfg;
+ int dwrr_del_node = false;
u64 prio;
int err;
u16 qid;
@@ -1241,11 +1549,26 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
return -ENOENT;
}
+ if (!node->is_static)
+ dwrr_del_node = true;
+
/* destroy the leaf node */
otx2_qos_destroy_node(pfvf, node);
pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
- clear_bit(prio, parent->prio_bmap);
+ if (dwrr_del_node) {
+ parent->child_dwrr_cnt--;
+ } else {
+ parent->child_static_cnt--;
+ clear_bit(prio, parent->prio_bmap);
+ }
+
+ /* Reset DWRR priority if all dwrr nodes are deleted */
+ if (!parent->child_dwrr_cnt)
+ otx2_reset_dwrr_prio(parent, prio);
+
+ if (!parent->child_static_cnt)
+ parent->max_static_prio = 0;
/* create downstream txschq entries to parent */
err = otx2_qos_alloc_txschq_node(pfvf, parent);
@@ -1298,10 +1621,12 @@ void otx2_qos_config_txschq(struct otx2_nic *pfvf)
if (!root)
return;
- err = otx2_qos_txschq_config(pfvf, root);
- if (err) {
- netdev_err(pfvf->netdev, "Error update txschq configuration\n");
- goto root_destroy;
+ if (root->level != NIX_TXSCH_LVL_TL1) {
+ err = otx2_qos_txschq_config(pfvf, root);
+ if (err) {
+ netdev_err(pfvf->netdev, "Error update txschq configuration\n");
+ goto root_destroy;
+ }
}
err = otx2_qos_txschq_push_cfg_tl(pfvf, root, NULL);
@@ -1334,7 +1659,8 @@ int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb)
res = otx2_qos_leaf_alloc_queue(pfvf, htb->classid,
htb->parent_classid,
htb->rate, htb->ceil,
- htb->prio, htb->extack);
+ htb->prio, htb->quantum,
+ htb->extack);
if (res < 0)
return res;
htb->qid = res;
@@ -1343,7 +1669,7 @@ int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb)
return otx2_qos_leaf_to_inner(pfvf, htb->parent_classid,
htb->classid, htb->rate,
htb->ceil, htb->prio,
- htb->extack);
+ htb->quantum, htb->extack);
case TC_HTB_LEAF_DEL:
return otx2_qos_leaf_del(pfvf, &htb->classid, htb->extack);
case TC_HTB_LEAF_DEL_LAST:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.h b/drivers/net/ethernet/marvell/octeontx2/nic/qos.h
index 19773284be27..221bd0438f60 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.h
@@ -35,6 +35,7 @@ struct otx2_qos_cfg {
int dwrr_node_pos[NIX_TXSCH_LVL_CNT];
u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ bool schq_index_used[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
};
struct otx2_qos {
@@ -59,10 +60,18 @@ struct otx2_qos_node {
u64 ceil;
u32 classid;
u32 prio;
- u16 schq; /* hw txschq */
+ u32 quantum;
+ /* hw txschq */
+ u16 schq;
u16 qid;
u16 prio_anchor;
+ u16 max_static_prio;
+ u16 child_dwrr_cnt;
+ u16 child_static_cnt;
+ u16 child_dwrr_prio;
+ u16 txschq_idx; /* txschq allocation index */
u8 level;
+ bool is_static;
};
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index 3e20e71b0f81..8b9455d8a4f7 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -202,16 +202,16 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
int err;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_META) |
- BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ICMP) |
- BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_PORTS_RANGE) |
- BIT(FLOW_DISSECTOR_KEY_VLAN))) {
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_META) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS_RANGE) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) {
NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
index 9277a8fd1339..cc2a9ae794be 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -5,9 +5,6 @@
#include <linux/dmapool.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include "prestera_dsa.h"
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 7c487f9b36ec..07720841a8d7 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -32,7 +32,6 @@
#include <linux/prefetch.h>
#include <linux/debugfs.h>
#include <linux/mii.h>
-#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/dmi.h>
@@ -4529,7 +4528,7 @@ static __init void sky2_debug_init(void)
struct dentry *ent;
ent = debugfs_create_dir("sky2", NULL);
- if (!ent || IS_ERR(ent))
+ if (IS_ERR(ent))
return;
sky2_debug = ent;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
index 317e447f4991..7c27a19c4d8f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_path.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -15,10 +15,10 @@
struct mtk_eth_muxc {
const char *name;
int cap_bit;
- int (*set_path)(struct mtk_eth *eth, int path);
+ int (*set_path)(struct mtk_eth *eth, u64 path);
};
-static const char *mtk_eth_path_name(int path)
+static const char *mtk_eth_path_name(u64 path)
{
switch (path) {
case MTK_ETH_PATH_GMAC1_RGMII:
@@ -40,10 +40,10 @@ static const char *mtk_eth_path_name(int path)
}
}
-static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
+static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, u64 path)
{
bool updated = true;
- u32 val, mask, set;
+ u32 mask, set, reg;
switch (path) {
case MTK_ETH_PATH_GMAC1_SGMII:
@@ -59,11 +59,13 @@ static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
break;
}
- if (updated) {
- val = mtk_r32(eth, MTK_MAC_MISC);
- val = (val & mask) | set;
- mtk_w32(eth, val, MTK_MAC_MISC);
- }
+ if (mtk_is_netsys_v3_or_greater(eth))
+ reg = MTK_MAC_MISC_V3;
+ else
+ reg = MTK_MAC_MISC;
+
+ if (updated)
+ mtk_m32(eth, mask, set, reg);
dev_dbg(eth->dev, "path %s in %s updated = %d\n",
mtk_eth_path_name(path), __func__, updated);
@@ -71,7 +73,7 @@ static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path)
return 0;
}
-static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path)
+static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
bool updated = true;
@@ -94,7 +96,7 @@ static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path)
return 0;
}
-static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
+static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0, mask = 0, reg = 0;
bool updated = true;
@@ -125,7 +127,7 @@ static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
return 0;
}
-static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
+static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
bool updated = true;
@@ -163,7 +165,7 @@ static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path)
return 0;
}
-static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path)
+static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
bool updated = true;
@@ -218,7 +220,7 @@ static const struct mtk_eth_muxc mtk_eth_muxc[] = {
},
};
-static int mtk_eth_mux_setup(struct mtk_eth *eth, int path)
+static int mtk_eth_mux_setup(struct mtk_eth *eth, u64 path)
{
int i, err = 0;
@@ -249,7 +251,7 @@ out:
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
- int path;
+ u64 path;
path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII :
MTK_ETH_PATH_GMAC2_SGMII;
@@ -260,7 +262,7 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
{
- int path = 0;
+ u64 path = 0;
if (mac_id == 1)
path = MTK_ETH_PATH_GMAC2_GEPHY;
@@ -274,7 +276,7 @@ int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
- int path;
+ u64 path;
path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_RGMII :
MTK_ETH_PATH_GMAC2_RGMII;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 2d15342c260a..6ad42e3b488f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -6,11 +6,12 @@
* Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
*/
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_address.h>
#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
@@ -25,6 +26,7 @@
#include <linux/bitfield.h>
#include <net/dsa.h>
#include <net/dst_metadata.h>
+#include <net/page_pool/helpers.h>
#include "mtk_eth_soc.h"
#include "mtk_wed.h"
@@ -152,6 +154,54 @@ static const struct mtk_reg_map mt7986_reg_map = {
.pse_oq_sta = 0x01a0,
};
+static const struct mtk_reg_map mt7988_reg_map = {
+ .tx_irq_mask = 0x461c,
+ .tx_irq_status = 0x4618,
+ .pdma = {
+ .rx_ptr = 0x6900,
+ .rx_cnt_cfg = 0x6904,
+ .pcrx_ptr = 0x6908,
+ .glo_cfg = 0x6a04,
+ .rst_idx = 0x6a08,
+ .delay_irq = 0x6a0c,
+ .irq_status = 0x6a20,
+ .irq_mask = 0x6a28,
+ .adma_rx_dbg0 = 0x6a38,
+ .int_grp = 0x6a50,
+ },
+ .qdma = {
+ .qtx_cfg = 0x4400,
+ .qtx_sch = 0x4404,
+ .rx_ptr = 0x4500,
+ .rx_cnt_cfg = 0x4504,
+ .qcrx_ptr = 0x4508,
+ .glo_cfg = 0x4604,
+ .rst_idx = 0x4608,
+ .delay_irq = 0x460c,
+ .fc_th = 0x4610,
+ .int_grp = 0x4620,
+ .hred = 0x4644,
+ .ctx_ptr = 0x4700,
+ .dtx_ptr = 0x4704,
+ .crx_ptr = 0x4710,
+ .drx_ptr = 0x4714,
+ .fq_head = 0x4720,
+ .fq_tail = 0x4724,
+ .fq_count = 0x4728,
+ .fq_blen = 0x472c,
+ .tx_sch_rate = 0x4798,
+ },
+ .gdm1_cnt = 0x1c00,
+ .gdma_to_ppe = 0x3333,
+ .ppe_base = 0x2000,
+ .wdma_base = {
+ [0] = 0x4800,
+ [1] = 0x4c00,
+ },
+ .pse_iq_sta = 0x0180,
+ .pse_oq_sta = 0x01a0,
+};
+
/* strings used by ethtool */
static const struct mtk_ethtool_stats {
char str[ETH_GSTRING_LEN];
@@ -179,10 +229,54 @@ static const struct mtk_ethtool_stats {
};
static const char * const mtk_clks_source_name[] = {
- "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
- "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
- "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
- "sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
+ "ethif",
+ "sgmiitop",
+ "esw",
+ "gp0",
+ "gp1",
+ "gp2",
+ "gp3",
+ "xgp1",
+ "xgp2",
+ "xgp3",
+ "crypto",
+ "fe",
+ "trgpll",
+ "sgmii_tx250m",
+ "sgmii_rx250m",
+ "sgmii_cdr_ref",
+ "sgmii_cdr_fb",
+ "sgmii2_tx250m",
+ "sgmii2_rx250m",
+ "sgmii2_cdr_ref",
+ "sgmii2_cdr_fb",
+ "sgmii_ck",
+ "eth2pll",
+ "wocpu0",
+ "wocpu1",
+ "netsys0",
+ "netsys1",
+ "ethwarp_wocpu2",
+ "ethwarp_wocpu1",
+ "ethwarp_wocpu0",
+ "top_usxgmii0_sel",
+ "top_usxgmii1_sel",
+ "top_sgm0_sel",
+ "top_sgm1_sel",
+ "top_xfi_phy0_xtal_sel",
+ "top_xfi_phy1_xtal_sel",
+ "top_eth_gmii_sel",
+ "top_eth_refck_50m_sel",
+ "top_eth_sys_200m_sel",
+ "top_eth_sys_sel",
+ "top_eth_xgmii_sel",
+ "top_eth_mii_sel",
+ "top_netsys_sel",
+ "top_netsys_500m_sel",
+ "top_netsys_pao_2x_sel",
+ "top_netsys_sync_250m_sel",
+ "top_netsys_ppefb_250m_sel",
+ "top_netsys_warp_sel",
};
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
@@ -195,7 +289,7 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
return __raw_readl(eth->base + reg);
}
-static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
+u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
{
u32 val;
@@ -385,10 +479,8 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
}
static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
- phy_interface_t interface, int speed)
+ phy_interface_t interface)
{
- unsigned long rate;
- u32 tck, rck, intf;
int ret;
if (interface == PHY_INTERFACE_MODE_TRGMII) {
@@ -399,30 +491,20 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
return;
}
- if (speed == SPEED_1000) {
- intf = INTF_MODE_RGMII_1000;
- rate = 250000000;
- rck = RCK_CTRL_RGMII_1000;
- tck = TCK_CTRL_RGMII_1000;
- } else {
- intf = INTF_MODE_RGMII_10_100;
- rate = 500000000;
- rck = RCK_CTRL_RGMII_10_100;
- tck = TCK_CTRL_RGMII_10_100;
- }
-
- mtk_w32(eth, intf, INTF_MODE);
-
- regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
- ETHSYS_TRGMII_CLK_SEL362_5,
- ETHSYS_TRGMII_CLK_SEL362_5);
+ dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
+}
- ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], rate);
- if (ret)
- dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+static void mtk_setup_bridge_switch(struct mtk_eth *eth)
+{
+ /* Force Port1 XGMAC Link Up */
+ mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
+ MTK_XGMAC_STS(MTK_GMAC1_ID));
- mtk_w32(eth, rck, TRGMII_RCK_CTRL);
- mtk_w32(eth, tck, TRGMII_TCK_CTRL);
+ /* Adjust GSW bridge IPG to 11 */
+ mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
+ (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
+ (GSW_IPG_11 << GSWRX_IPG_SHIFT),
+ MTK_GSW_CFG);
}
static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
@@ -484,6 +566,8 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
goto init_err;
}
break;
+ case PHY_INTERFACE_MODE_INTERNAL:
+ break;
default:
goto err_phy;
}
@@ -498,17 +582,8 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
state->interface))
goto err_phy;
} else {
- /* FIXME: this is incorrect. Not only does it
- * use state->speed (which is not guaranteed
- * to be correct) but it also makes use of it
- * in a code path that will only be reachable
- * when the PHY interface mode changes, not
- * when the speed changes. Consequently, RGMII
- * is probably broken.
- */
mtk_gmac0_rgmii_adjust(mac->hw,
- state->interface,
- state->speed);
+ state->interface);
/* mt7623_pad_clk_setup */
for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
@@ -562,6 +637,15 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
return;
}
+ /* Setup gmac */
+ if (mtk_is_netsys_v3_or_greater(eth) &&
+ mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
+ mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
+ mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
+
+ mtk_setup_bridge_switch(eth);
+ }
+
return;
err_phy:
@@ -602,38 +686,6 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
return 0;
}
-static void mtk_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
-{
- struct mtk_mac *mac = container_of(config, struct mtk_mac,
- phylink_config);
- u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
-
- state->link = (pmsr & MAC_MSR_LINK);
- state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
-
- switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
- case 0:
- state->speed = SPEED_10;
- break;
- case MAC_MSR_SPEED_100:
- state->speed = SPEED_100;
- break;
- case MAC_MSR_SPEED_1000:
- state->speed = SPEED_1000;
- break;
- default:
- state->speed = SPEED_UNKNOWN;
- break;
- }
-
- state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
- if (pmsr & MAC_MSR_RX_FC)
- state->pause |= MLO_PAUSE_RX;
- if (pmsr & MAC_MSR_TX_FC)
- state->pause |= MLO_PAUSE_TX;
-}
-
static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
@@ -659,7 +711,7 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v1(eth))
val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
if (IS_ENABLED(CONFIG_SOC_MT7621)) {
@@ -756,7 +808,6 @@ static void mtk_mac_link_up(struct phylink_config *config,
static const struct phylink_mac_ops mtk_phylink_ops = {
.mac_select_pcs = mtk_mac_select_pcs,
- .mac_pcs_get_state = mtk_mac_pcs_get_state,
.mac_config = mtk_mac_config,
.mac_finish = mtk_mac_finish,
.mac_link_down = mtk_mac_link_down,
@@ -807,11 +858,15 @@ static int mtk_mdio_init(struct mtk_eth *eth)
}
divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
+ /* Configure MDC Turbo Mode */
+ if (mtk_is_netsys_v3_or_greater(eth))
+ mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
+
/* Configure MDC Divider */
- val = mtk_r32(eth, MTK_PPSC);
- val &= ~PPSC_MDC_CFG;
- val |= FIELD_PREP(PPSC_MDC_CFG, divider) | PPSC_MDC_TURBO;
- mtk_w32(eth, val, MTK_PPSC);
+ val = FIELD_PREP(PPSC_MDC_CFG, divider);
+ if (!mtk_is_netsys_v3_or_greater(eth))
+ val |= PPSC_MDC_TURBO;
+ mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
@@ -943,17 +998,32 @@ void mtk_stats_update_mac(struct mtk_mac *mac)
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
hw_stats->rx_flow_control_packets +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
- hw_stats->tx_skip +=
- mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
- hw_stats->tx_collisions +=
- mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
- hw_stats->tx_bytes +=
- mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
- stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
- if (stats)
- hw_stats->tx_bytes += (stats << 32);
- hw_stats->tx_packets +=
- mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
+
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+ hw_stats->tx_skip +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
+ hw_stats->tx_collisions +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
+ hw_stats->tx_bytes +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
+ if (stats)
+ hw_stats->tx_bytes += (stats << 32);
+ hw_stats->tx_packets +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
+ } else {
+ hw_stats->tx_skip +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
+ hw_stats->tx_collisions +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
+ hw_stats->tx_bytes +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
+ if (stats)
+ hw_stats->tx_bytes += (stats << 32);
+ hw_stats->tx_packets +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
+ }
}
u64_stats_update_end(&hw_stats->syncp);
@@ -963,7 +1033,7 @@ static void mtk_stats_update(struct mtk_eth *eth)
{
int i;
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->mac[i] || !eth->mac[i]->hw_stats)
continue;
if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
@@ -1037,7 +1107,7 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
}
@@ -1065,10 +1135,13 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
dma_addr_t dma_addr;
int i;
- eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
- cnt * soc->txrx.txd_size,
- &eth->phy_scratch_ring,
- GFP_KERNEL);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
+ eth->scratch_ring = eth->sram_base;
+ else
+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
+ cnt * soc->txrx.txd_size,
+ &eth->phy_scratch_ring,
+ GFP_KERNEL);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
@@ -1095,7 +1168,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
txd->txd4 = 0;
- if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
txd->txd5 = 0;
txd->txd6 = 0;
txd->txd7 = 0;
@@ -1255,9 +1328,25 @@ static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
data = TX_DMA_PLEN0(info->size);
if (info->last)
data |= TX_DMA_LS0;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
+ data |= TX_DMA_PREP_ADDR64(info->addr);
+
WRITE_ONCE(desc->txd3, data);
- data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
+ /* set forward port */
+ switch (mac->id) {
+ case MTK_GMAC1_ID:
+ data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
+ break;
+ case MTK_GMAC2_ID:
+ data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
+ break;
+ case MTK_GMAC3_ID:
+ data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
+ break;
+ }
+
data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
WRITE_ONCE(desc->txd4, data);
@@ -1268,6 +1357,8 @@ static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
/* tx checksum offload */
if (info->csum)
data |= TX_DMA_CHKSUM_V2;
+ if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
+ data |= TX_DMA_SPTAG_V3;
}
WRITE_ONCE(desc->txd5, data);
@@ -1286,7 +1377,7 @@ static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
mtk_tx_set_dma_desc_v2(dev, txd, info);
else
mtk_tx_set_dma_desc_v1(dev, txd, info);
@@ -1333,8 +1424,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
mtk_tx_set_dma_desc(dev, itxd, &txd_info);
itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
- itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
- MTK_TX_FLAGS_FPORT1;
+ itx_buf->mac_id = mac->id;
setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
k++);
@@ -1382,8 +1472,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
- tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
- MTK_TX_FLAGS_FPORT1;
+ tx_buf->mac_id = mac->id;
setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
txd_info.size, k++);
@@ -1468,7 +1557,7 @@ static int mtk_queue_stopped(struct mtk_eth *eth)
{
int i;
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
if (netif_queue_stopped(eth->netdev[i]))
@@ -1482,7 +1571,7 @@ static void mtk_wake_queue(struct mtk_eth *eth)
{
int i;
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
netif_tx_wake_all_queues(eth->netdev[i]);
@@ -1593,7 +1682,7 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
static bool mtk_page_pool_enabled(struct mtk_eth *eth)
{
- return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
+ return mtk_is_netsys_v2_or_greater(eth);
}
static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
@@ -1685,7 +1774,7 @@ static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
}
mtk_tx_set_dma_desc(dev, txd, txd_info);
- tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
+ tx_buf->mac_id = mac->id;
tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
@@ -1912,6 +2001,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
bool xdp_flush = false;
int idx;
struct sk_buff *skb;
+ u64 addr64 = 0;
u8 *data, *new_data;
struct mtk_rx_dma_v2 *rxd, trxd;
int done = 0, bytes = 0;
@@ -1935,13 +2025,26 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
break;
/* find out which mac the packet come from. values start at 1 */
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
- mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
- else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
- !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
+
+ switch (val) {
+ case PSE_GDM1_PORT:
+ case PSE_GDM2_PORT:
+ mac = val - 1;
+ break;
+ case PSE_GDM3_PORT:
+ mac = MTK_GMAC3_ID;
+ break;
+ default:
+ break;
+ }
+ } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
+ !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
+ }
- if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
+ if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
!eth->netdev[mac]))
goto release_desc;
@@ -2014,7 +2117,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto release_desc;
}
- dma_unmap_single(eth->dma_dev, trxd.rxd1,
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
+ addr64 = RX_DMA_GET_ADDR64(trxd.rxd2);
+
+ dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64),
ring->buf_size, DMA_FROM_DEVICE);
skb = build_skb(data, ring->frag_size);
@@ -2031,7 +2137,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb->dev = netdev;
bytes += skb->len;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
if (hash != MTK_RXD5_FOE_ENTRY)
@@ -2056,8 +2162,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
/* When using VLAN untagging in combination with DSA, the
* hardware treats the MTK special tag as a VLAN and untags it.
*/
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
- (trxd.rxd2 & RX_DMA_VTAG) && netdev_uses_dsa(netdev)) {
+ if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
+ netdev_uses_dsa(netdev)) {
unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
if (port < ARRAY_SIZE(eth->dsa_meta) &&
@@ -2080,6 +2186,9 @@ release_desc:
else
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
+ rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
+
ring->calc_idx = idx;
done++;
}
@@ -2161,7 +2270,6 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
while ((cpu != dma) && budget) {
u32 next_cpu = desc->txd2;
- int mac = 0;
desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
@@ -2169,15 +2277,13 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
tx_buf = mtk_desc_to_tx_buf(ring, desc,
eth->soc->txrx.txd_size);
- if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
- mac = 1;
-
if (!tx_buf->data)
break;
if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
if (tx_buf->type == MTK_TYPE_SKB)
- mtk_poll_tx_done(eth, state, mac, tx_buf->data);
+ mtk_poll_tx_done(eth, state, tx_buf->mac_id,
+ tx_buf->data);
budget--;
}
@@ -2354,8 +2460,14 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
if (!ring->buf)
goto no_tx_mem;
- ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
- &ring->phys, GFP_KERNEL);
+ if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
+ ring->dma = eth->sram_base + ring_size * sz;
+ ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
+ } else {
+ ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
+ &ring->phys, GFP_KERNEL);
+ }
+
if (!ring->dma)
goto no_tx_mem;
@@ -2367,7 +2479,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
txd->txd2 = next_ptr;
txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
txd->txd4 = 0;
- if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
txd->txd5 = 0;
txd->txd6 = 0;
txd->txd7 = 0;
@@ -2420,14 +2532,14 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v1(eth))
val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
ofs += MTK_QTX_OFFSET;
}
val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
} else {
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
@@ -2454,8 +2566,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
kfree(ring->buf);
ring->buf = NULL;
}
-
- if (ring->dma) {
+ if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
dma_free_coherent(eth->dma_dev,
ring->dma_size * soc->txrx.txd_size,
ring->dma, ring->phys);
@@ -2474,9 +2585,14 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_rx_ring *ring;
- int rx_data_len, rx_dma_size;
+ int rx_data_len, rx_dma_size, tx_ring_size;
int i;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ tx_ring_size = MTK_QDMA_RING_SIZE;
+ else
+ tx_ring_size = MTK_DMA_SIZE;
+
if (rx_flag == MTK_RX_FLAGS_QDMA) {
if (ring_no)
return -EINVAL;
@@ -2511,9 +2627,20 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
ring->page_pool = pp;
}
- ring->dma = dma_alloc_coherent(eth->dma_dev,
- rx_dma_size * eth->soc->txrx.rxd_size,
- &ring->phys, GFP_KERNEL);
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
+ rx_flag != MTK_RX_FLAGS_NORMAL) {
+ ring->dma = dma_alloc_coherent(eth->dma_dev,
+ rx_dma_size * eth->soc->txrx.rxd_size,
+ &ring->phys, GFP_KERNEL);
+ } else {
+ struct mtk_tx_ring *tx_ring = &eth->tx_ring;
+
+ ring->dma = tx_ring->dma + tx_ring_size *
+ eth->soc->txrx.txd_size * (ring_no + 1);
+ ring->phys = tx_ring->phys + tx_ring_size *
+ eth->soc->txrx.txd_size * (ring_no + 1);
+ }
+
if (!ring->dma)
return -ENOMEM;
@@ -2554,9 +2681,12 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
else
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
+ rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
+
rxd->rxd3 = 0;
rxd->rxd4 = 0;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
rxd->rxd5 = 0;
rxd->rxd6 = 0;
rxd->rxd7 = 0;
@@ -2598,8 +2728,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
return 0;
}
-static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
+static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram)
{
+ u64 addr64 = 0;
int i;
if (ring->data && ring->dma) {
@@ -2613,7 +2744,10 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
if (!rxd->rxd1)
continue;
- dma_unmap_single(eth->dma_dev, rxd->rxd1,
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
+ addr64 = RX_DMA_GET_ADDR64(rxd->rxd2);
+
+ dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64),
ring->buf_size, DMA_FROM_DEVICE);
mtk_rx_put_buff(ring, ring->data[i], false);
}
@@ -2621,7 +2755,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
ring->data = NULL;
}
- if (ring->dma) {
+ if (!in_sram && ring->dma) {
dma_free_coherent(eth->dma_dev,
ring->dma_size * eth->soc->txrx.rxd_size,
ring->dma, ring->phys);
@@ -2978,10 +3112,10 @@ static void mtk_dma_free(struct mtk_eth *eth)
const struct mtk_soc_data *soc = eth->soc;
int i;
- for (i = 0; i < MTK_MAC_COUNT; i++)
+ for (i = 0; i < MTK_MAX_DEVS; i++)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
- if (eth->scratch_ring) {
+ if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
dma_free_coherent(eth->dma_dev,
MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
eth->scratch_ring, eth->phy_scratch_ring);
@@ -2989,13 +3123,13 @@ static void mtk_dma_free(struct mtk_eth *eth)
eth->phy_scratch_ring = 0;
}
mtk_tx_clean(eth);
- mtk_rx_clean(eth, &eth->rx_ring[0]);
- mtk_rx_clean(eth, &eth->rx_ring_qdma);
+ mtk_rx_clean(eth, &eth->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM));
+ mtk_rx_clean(eth, &eth->rx_ring_qdma, false);
if (eth->hwlro) {
mtk_hwlro_rx_uninit(eth);
for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
- mtk_rx_clean(eth, &eth->rx_ring[i]);
+ mtk_rx_clean(eth, &eth->rx_ring[i], false);
}
kfree(eth->scratch_head);
@@ -3104,7 +3238,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
@@ -3132,8 +3266,13 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
return;
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ u32 val;
+
+ if (!eth->netdev[i])
+ continue;
+
+ val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
/* default setup the forward port to send frame to PDMA */
val &= ~0xffff;
@@ -3143,7 +3282,7 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
val |= config;
- if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
+ if (netdev_uses_dsa(eth->netdev[i]))
val |= MTK_GDMA_SPECIAL_TAG;
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
@@ -3250,7 +3389,7 @@ static int mtk_open(struct net_device *dev)
phylink_start(mac->phylink);
netif_tx_start_all_queues(dev);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return 0;
if (mtk_uses_dsa(dev) && !eth->prog) {
@@ -3516,19 +3655,34 @@ static void mtk_hw_reset(struct mtk_eth *eth)
{
u32 val;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth))
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
+
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+ val = RSTCTRL_PPE0_V3;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val |= RSTCTRL_PPE1_V3;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
+ val |= RSTCTRL_PPE2;
+
+ val |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
+ } else if (mtk_is_netsys_v2_or_greater(eth)) {
val = RSTCTRL_PPE0_V2;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val |= RSTCTRL_PPE1;
} else {
val = RSTCTRL_PPE0;
}
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
- val |= RSTCTRL_PPE1;
-
ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v3_or_greater(eth))
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
+ 0x6f8ff);
+ else if (mtk_is_netsys_v2_or_greater(eth))
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
0x3ffffff);
}
@@ -3554,13 +3708,21 @@ static void mtk_hw_warm_reset(struct mtk_eth *eth)
return;
}
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+ rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V3;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ rst_mask |= RSTCTRL_PPE1_V3;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
+ rst_mask |= RSTCTRL_PPE2;
+
+ rst_mask |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
+ } else if (mtk_is_netsys_v2_or_greater(eth)) {
rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
- else
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ rst_mask |= RSTCTRL_PPE1;
+ } else {
rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
-
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
- rst_mask |= RSTCTRL_PPE1;
+ }
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
@@ -3724,7 +3886,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
else
mtk_hw_reset(eth);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
/* Set FE to PDMAv2 if necessary */
val = mtk_r32(eth, MTK_FE_GLO_MISC);
mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
@@ -3745,15 +3907,15 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
* up with the more appropriate value when mtk_mac_config call is being
* invoked.
*/
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
struct net_device *dev = eth->netdev[i];
- mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
- if (dev) {
- struct mtk_mac *mac = netdev_priv(dev);
+ if (!dev)
+ continue;
- mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN);
- }
+ mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
+ mtk_set_mcr_max_rx(netdev_priv(dev),
+ dev->mtu + MTK_RX_ETH_HLEN);
}
/* Indicates CDM to parse the MTK special tag from CPU
@@ -3761,7 +3923,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
*/
val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v1(eth)) {
val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
@@ -3783,7 +3945,24 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+ /* PSE should not drop port1, port8 and port9 packets */
+ mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
+
+ /* GDM and CDM Threshold */
+ mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
+ mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
+
+ /* Disable GDM1 RX CRC stripping */
+ mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
+
+ /* PSE GDM3 MIB counter has incorrect hw default values,
+ * so the driver ought to read clear the values beforehand
+ * in case ethtool retrieve wrong mib values.
+ */
+ for (i = 0; i < 0x80; i += 0x4)
+ mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
+ } else if (!mtk_is_netsys_v1(eth)) {
/* PSE should not drop port8 and port9 packets from WDMA Tx */
mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
@@ -3895,11 +4074,17 @@ static void mtk_prepare_for_reset(struct mtk_eth *eth)
u32 val;
int i;
- /* disabe FE P3 and P4 */
- val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
- val |= MTK_FE_LINK_DOWN_P4;
- mtk_w32(eth, val, MTK_FE_GLO_CFG);
+ /* set FE PPE ports link down */
+ for (i = MTK_GMAC1_ID;
+ i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
+ i += 2) {
+ val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val |= MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
+ val |= MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
+ mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
+ }
/* adjust PPE configurations to prepare for reset */
for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
@@ -3933,7 +4118,7 @@ static void mtk_pending_work(struct work_struct *work)
mtk_prepare_for_reset(eth);
/* stop all devices to make sure that dma is properly shut down */
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
continue;
@@ -3949,8 +4134,8 @@ static void mtk_pending_work(struct work_struct *work)
mtk_hw_init(eth, true);
/* restart DMA and enable IRQs */
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!test_bit(i, &restart))
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i] || !test_bit(i, &restart))
continue;
if (mtk_open(eth->netdev[i])) {
@@ -3960,11 +4145,18 @@ static void mtk_pending_work(struct work_struct *work)
}
}
- /* enabe FE P3 and P4 */
- val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
- val &= ~MTK_FE_LINK_DOWN_P4;
- mtk_w32(eth, val, MTK_FE_GLO_CFG);
+ /* set FE PPE ports link up */
+ for (i = MTK_GMAC1_ID;
+ i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
+ i += 2) {
+ val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
+ val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
+
+ mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
+ }
clear_bit(MTK_RESETTING, &eth->state);
@@ -3977,7 +4169,7 @@ static int mtk_free_dev(struct mtk_eth *eth)
{
int i;
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
free_netdev(eth->netdev[i]);
@@ -3996,7 +4188,7 @@ static int mtk_unreg_dev(struct mtk_eth *eth)
{
int i;
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
struct mtk_mac *mac;
if (!eth->netdev[i])
continue;
@@ -4298,7 +4490,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
}
id = be32_to_cpup(_id);
- if (id >= MTK_MAC_COUNT) {
+ if (id >= MTK_MAX_DEVS) {
dev_err(eth->dev, "%d is not a valid mac id\n", id);
return -EINVAL;
}
@@ -4346,7 +4538,11 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
}
spin_lock_init(&mac->hw_stats->stats_lock);
u64_stats_init(&mac->hw_stats->syncp);
- mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
+
+ if (mtk_is_netsys_v3_or_greater(eth))
+ mac->hw_stats->reg_offset = id * 0x80;
+ else
+ mac->hw_stats->reg_offset = id * 0x40;
/* phylink create */
err = of_get_phy_mode(np, &phy_mode);
@@ -4361,18 +4557,22 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink_config.dev = &eth->netdev[id]->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
- /* This driver makes use of state->speed in mac_config */
- mac->phylink_config.legacy_pre_march2020 = true;
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
- __set_bit(PHY_INTERFACE_MODE_MII,
- mac->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_GMII,
- mac->phylink_config.supported_interfaces);
+ /* MT7623 gmac0 is now missing its speed-specific PLL configuration
+ * in its .mac_config method (since state->speed is not valid there.
+ * Disable support for MII, GMII and RGMII.
+ */
+ if (!mac->hw->soc->disable_pll_modes || mac->id != 0) {
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ mac->phylink_config.supported_interfaces);
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
- phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
+ phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
+ }
if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
__set_bit(PHY_INTERFACE_MODE_TRGMII,
@@ -4396,6 +4596,17 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink_config.supported_interfaces);
}
+ if (mtk_is_netsys_v3_or_greater(mac->hw) &&
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
+ id == MTK_GMAC1_ID) {
+ mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE |
+ MAC_10000FD;
+ phy_interface_zero(mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ mac->phylink_config.supported_interfaces);
+ }
+
phylink = phylink_create(&mac->phylink_config,
of_fwnode_handle(mac->of_node),
phy_mode, &mtk_phylink_ops);
@@ -4454,7 +4665,7 @@ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
rtnl_lock();
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
dev = eth->netdev[i];
if (!dev || !(dev->flags & IFF_UP))
@@ -4507,7 +4718,7 @@ static int mtk_sgmii_init(struct mtk_eth *eth)
static int mtk_probe(struct platform_device *pdev)
{
- struct resource *res = NULL;
+ struct resource *res = NULL, *res_sram;
struct device_node *mac_np;
struct mtk_eth *eth;
int err, i;
@@ -4527,6 +4738,28 @@ static int mtk_probe(struct platform_device *pdev)
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
eth->ip_align = NET_IP_ALIGN;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
+ /* SRAM is actual memory and supports transparent access just like DRAM.
+ * Hence we don't require __iomem being set and don't need to use accessor
+ * functions to read from or write to SRAM.
+ */
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+ eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(eth->sram_base))
+ return PTR_ERR(eth->sram_base);
+ } else {
+ eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
+ }
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
+ if (err) {
+ dev_err(&pdev->dev, "Wrong DMA config\n");
+ return -EINVAL;
+ }
+ }
+
spin_lock_init(&eth->page_lock);
spin_lock_init(&eth->tx_irq_lock);
spin_lock_init(&eth->rx_irq_lock);
@@ -4584,12 +4817,24 @@ static int mtk_probe(struct platform_device *pdev)
}
}
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
err = -EINVAL;
goto err_destroy_sgmii;
}
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+ res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_sram) {
+ err = -EINVAL;
+ goto err_destroy_sgmii;
+ }
+ eth->phy_scratch_ring = res_sram->start;
+ } else {
+ eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
+ }
+ }
}
if (eth->soc->offload_version) {
@@ -4692,9 +4937,8 @@ static int mtk_probe(struct platform_device *pdev)
}
if (eth->soc->offload_version) {
- u32 num_ppe;
+ u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1;
- num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
for (i = 0; i < num_ppe; i++) {
u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
@@ -4761,7 +5005,7 @@ static int mtk_remove(struct platform_device *pdev)
int i;
/* stop all devices to make sure that dma is properly shut down */
- for (i = 0; i < MTK_MAC_COUNT; i++) {
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
mtk_stop(eth->netdev[i]);
@@ -4786,6 +5030,7 @@ static const struct mtk_soc_data mt2701_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
+ .version = 1,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4802,9 +5047,10 @@ static const struct mtk_soc_data mt7621_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
+ .version = 1,
.offload_version = 1,
.hash_offset = 2,
- .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4822,10 +5068,11 @@ static const struct mtk_soc_data mt7622_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
+ .version = 1,
.offload_version = 2,
.hash_offset = 2,
.has_accounting = true,
- .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4842,9 +5089,11 @@ static const struct mtk_soc_data mt7623_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
+ .version = 1,
.offload_version = 1,
.hash_offset = 2,
- .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+ .disable_pll_modes = true,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4863,6 +5112,7 @@ static const struct mtk_soc_data mt7629_data = {
.required_clks = MT7629_CLKS_BITMAP,
.required_pctl = false,
.has_accounting = true,
+ .version = 1,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4880,10 +5130,11 @@ static const struct mtk_soc_data mt7981_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7981_CLKS_BITMAP,
.required_pctl = false,
+ .version = 2,
.offload_version = 2,
.hash_offset = 4,
- .foe_entry_size = sizeof(struct mtk_foe_entry),
.has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
@@ -4901,10 +5152,33 @@ static const struct mtk_soc_data mt7986_data = {
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7986_CLKS_BITMAP,
.required_pctl = false,
+ .version = 2,
+ .offload_version = 2,
+ .hash_offset = 4,
+ .has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma_v2),
+ .rxd_size = sizeof(struct mtk_rx_dma_v2),
+ .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+};
+
+static const struct mtk_soc_data mt7988_data = {
+ .reg_map = &mt7988_reg_map,
+ .ana_rgc3 = 0x128,
+ .caps = MT7988_CAPS,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7988_CLKS_BITMAP,
+ .required_pctl = false,
+ .version = 3,
.offload_version = 2,
.hash_offset = 4,
- .foe_entry_size = sizeof(struct mtk_foe_entry),
.has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
@@ -4921,6 +5195,7 @@ static const struct mtk_soc_data rt5350_data = {
.hw_features = MTK_HW_FEATURES_MT7628,
.required_clks = MT7628_CLKS_BITMAP,
.required_pctl = false,
+ .version = 1,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4932,14 +5207,15 @@ static const struct mtk_soc_data rt5350_data = {
};
const struct of_device_id of_mtk_match[] = {
- { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
- { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
- { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
- { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
- { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
- { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
- { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
- { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
+ { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
+ { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
+ { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
+ { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
+ { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
+ { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
+ { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
+ { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
+ { .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
{},
};
MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 707445f6bcb1..403219d987ef 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -18,7 +18,7 @@
#include <linux/rhashtable.h>
#include <linux/dim.h>
#include <linux/bitfield.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
#include <linux/bpf_trace.h>
#include "mtk_ppe.h"
@@ -33,7 +33,6 @@
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
#define MTK_QDMA_RING_SIZE 2048
#define MTK_DMA_SIZE 512
-#define MTK_MAC_COUNT 2
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
#define MTK_DMA_DUMMY_DESC 0xffffffff
@@ -77,9 +76,8 @@
#define MTK_HW_LRO_SDL_REMAIN_ROOM 1522
/* Frame Engine Global Configuration */
-#define MTK_FE_GLO_CFG 0x00
-#define MTK_FE_LINK_DOWN_P3 BIT(11)
-#define MTK_FE_LINK_DOWN_P4 BIT(12)
+#define MTK_FE_GLO_CFG(x) (((x) == MTK_GMAC3_ID) ? 0x24 : 0x00)
+#define MTK_FE_LINK_DOWN_P(x) BIT(((x) + 8) % 16)
/* Frame Engine Global Reset Register */
#define MTK_RST_GL 0x04
@@ -118,19 +116,31 @@
#define MTK_CDMP_EG_CTRL 0x404
/* GDM Exgress Control Register */
-#define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000))
+#define MTK_GDMA_FWD_CFG(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
+ 0x540 : 0x500 + (_x * 0x1000); })
#define MTK_GDMA_SPECIAL_TAG BIT(24)
#define MTK_GDMA_ICS_EN BIT(22)
#define MTK_GDMA_TCS_EN BIT(21)
#define MTK_GDMA_UCS_EN BIT(20)
+#define MTK_GDMA_STRP_CRC BIT(16)
#define MTK_GDMA_TO_PDMA 0x0
#define MTK_GDMA_DROP_ALL 0x7777
+/* GDM Egress Control Register */
+#define MTK_GDMA_EG_CTRL(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
+ 0x544 : 0x504 + (_x * 0x1000); })
+#define MTK_GDMA_XGDM_SEL BIT(31)
+
/* Unicast Filter MAC Address Register - Low */
-#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000))
+#define MTK_GDMA_MAC_ADRL(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
+ 0x548 : 0x508 + (_x * 0x1000); })
/* Unicast Filter MAC Address Register - High */
-#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
+#define MTK_GDMA_MAC_ADRH(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
+ 0x54C : 0x50C + (_x * 0x1000); })
+
+/* Internal SRAM offset */
+#define MTK_ETH_SRAM_OFFSET 0x40000
/* FE global misc reg*/
#define MTK_FE_GLO_MISC 0x124
@@ -288,8 +298,6 @@
/* QDMA Interrupt grouping registers */
#define MTK_RLS_DONE_INT BIT(0)
-#define MTK_STAT_OFFSET 0x40
-
/* QDMA TX NUM */
#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
#define MTK_QDMA_GMAC2_QID 8
@@ -302,6 +310,8 @@
#define TX_DMA_CHKSUM_V2 (0x7 << 28)
#define TX_DMA_TSO_V2 BIT(31)
+#define TX_DMA_SPTAG_V3 BIT(27)
+
/* QDMA V2 descriptor txd4 */
#define TX_DMA_FPORT_SHIFT_V2 8
#define TX_DMA_FPORT_MASK_V2 0xf
@@ -321,6 +331,14 @@
#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
#define TX_DMA_SWC BIT(14)
#define TX_DMA_PQID GENMASK(3, 0)
+#define TX_DMA_ADDR64_MASK GENMASK(3, 0)
+#if IS_ENABLED(CONFIG_64BIT)
+# define TX_DMA_GET_ADDR64(x) (((u64)FIELD_GET(TX_DMA_ADDR64_MASK, (x))) << 32)
+# define TX_DMA_PREP_ADDR64(x) FIELD_PREP(TX_DMA_ADDR64_MASK, ((x) >> 32))
+#else
+# define TX_DMA_GET_ADDR64(x) (0)
+# define TX_DMA_PREP_ADDR64(x) (0)
+#endif
/* PDMA on MT7628 */
#define TX_DMA_DONE BIT(31)
@@ -333,6 +351,14 @@
#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
#define RX_DMA_VTAG BIT(15)
+#define RX_DMA_ADDR64_MASK GENMASK(3, 0)
+#if IS_ENABLED(CONFIG_64BIT)
+# define RX_DMA_GET_ADDR64(x) (((u64)FIELD_GET(RX_DMA_ADDR64_MASK, (x))) << 32)
+# define RX_DMA_PREP_ADDR64(x) FIELD_PREP(RX_DMA_ADDR64_MASK, ((x) >> 32))
+#else
+# define RX_DMA_GET_ADDR64(x) (0)
+# define RX_DMA_PREP_ADDR64(x) (0)
+#endif
/* QDMA descriptor rxd3 */
#define RX_DMA_VID(x) ((x) & VLAN_VID_MASK)
@@ -389,7 +415,26 @@
#define PHY_IAC_TIMEOUT HZ
#define MTK_MAC_MISC 0x1000c
+#define MTK_MAC_MISC_V3 0x10010
#define MTK_MUX_TO_ESW BIT(0)
+#define MISC_MDC_TURBO BIT(4)
+
+/* XMAC status registers */
+#define MTK_XGMAC_STS(x) (((x) == MTK_GMAC3_ID) ? 0x1001C : 0x1000C)
+#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15))
+#define MTK_USXGMII_PCS_LINK BIT(8)
+#define MTK_XGMAC_RX_FC BIT(5)
+#define MTK_XGMAC_TX_FC BIT(4)
+#define MTK_USXGMII_PCS_MODE GENMASK(3, 1)
+#define MTK_XGMAC_LINK_STS BIT(0)
+
+/* GSW bridge registers */
+#define MTK_GSW_CFG (0x10080)
+#define GSWTX_IPG_MASK GENMASK(19, 16)
+#define GSWTX_IPG_SHIFT 16
+#define GSWRX_IPG_MASK GENMASK(3, 0)
+#define GSWRX_IPG_SHIFT 0
+#define GSW_IPG_11 11
/* Mac control registers */
#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100))
@@ -478,7 +523,7 @@
#define ETHSYS_SYSCFG0 0x14
#define SYSCFG0_GE_MASK 0x3
#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
-#define SYSCFG0_SGMII_MASK GENMASK(9, 8)
+#define SYSCFG0_SGMII_MASK GENMASK(9, 7)
#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK)
#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK)
#define SYSCFG0_SGMII_GMAC1_V2 BIT(9)
@@ -495,9 +540,15 @@
/* ethernet reset control register */
#define ETHSYS_RSTCTRL 0x34
#define RSTCTRL_FE BIT(6)
+#define RSTCTRL_WDMA0 BIT(24)
+#define RSTCTRL_WDMA1 BIT(25)
+#define RSTCTRL_WDMA2 BIT(26)
#define RSTCTRL_PPE0 BIT(31)
#define RSTCTRL_PPE0_V2 BIT(30)
#define RSTCTRL_PPE1 BIT(31)
+#define RSTCTRL_PPE0_V3 BIT(29)
+#define RSTCTRL_PPE1_V3 BIT(30)
+#define RSTCTRL_PPE2 BIT(31)
#define RSTCTRL_ETH BIT(23)
/* ethernet reset check idle register */
@@ -635,12 +686,6 @@ enum mtk_tx_flags {
*/
MTK_TX_FLAGS_SINGLE0 = 0x01,
MTK_TX_FLAGS_PAGE0 = 0x02,
-
- /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted
- * SKB out instead of looking up through hardware TX descriptor.
- */
- MTK_TX_FLAGS_FPORT0 = 0x04,
- MTK_TX_FLAGS_FPORT1 = 0x08,
};
/* This enum allows us to identify how the clock is defined on the array of the
@@ -653,6 +698,11 @@ enum mtk_clks_map {
MTK_CLK_GP0,
MTK_CLK_GP1,
MTK_CLK_GP2,
+ MTK_CLK_GP3,
+ MTK_CLK_XGP1,
+ MTK_CLK_XGP2,
+ MTK_CLK_XGP3,
+ MTK_CLK_CRYPTO,
MTK_CLK_FE,
MTK_CLK_TRGPLL,
MTK_CLK_SGMII_TX_250M,
@@ -669,63 +719,145 @@ enum mtk_clks_map {
MTK_CLK_WOCPU1,
MTK_CLK_NETSYS0,
MTK_CLK_NETSYS1,
+ MTK_CLK_ETHWARP_WOCPU2,
+ MTK_CLK_ETHWARP_WOCPU1,
+ MTK_CLK_ETHWARP_WOCPU0,
+ MTK_CLK_TOP_USXGMII_SBUS_0_SEL,
+ MTK_CLK_TOP_USXGMII_SBUS_1_SEL,
+ MTK_CLK_TOP_SGM_0_SEL,
+ MTK_CLK_TOP_SGM_1_SEL,
+ MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL,
+ MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL,
+ MTK_CLK_TOP_ETH_GMII_SEL,
+ MTK_CLK_TOP_ETH_REFCK_50M_SEL,
+ MTK_CLK_TOP_ETH_SYS_200M_SEL,
+ MTK_CLK_TOP_ETH_SYS_SEL,
+ MTK_CLK_TOP_ETH_XGMII_SEL,
+ MTK_CLK_TOP_ETH_MII_SEL,
+ MTK_CLK_TOP_NETSYS_SEL,
+ MTK_CLK_TOP_NETSYS_500M_SEL,
+ MTK_CLK_TOP_NETSYS_PAO_2X_SEL,
+ MTK_CLK_TOP_NETSYS_SYNC_250M_SEL,
+ MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL,
+ MTK_CLK_TOP_NETSYS_WARP_SEL,
MTK_CLK_MAX
};
-#define MT7623_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
- BIT(MTK_CLK_GP1) | BIT(MTK_CLK_GP2) | \
- BIT(MTK_CLK_TRGPLL))
-#define MT7622_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
- BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
- BIT(MTK_CLK_GP2) | \
- BIT(MTK_CLK_SGMII_TX_250M) | \
- BIT(MTK_CLK_SGMII_RX_250M) | \
- BIT(MTK_CLK_SGMII_CDR_REF) | \
- BIT(MTK_CLK_SGMII_CDR_FB) | \
- BIT(MTK_CLK_SGMII_CK) | \
- BIT(MTK_CLK_ETH2PLL))
+#define MT7623_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
+ BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_TRGPLL))
+#define MT7622_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
+ BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \
+ BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII_CK) | \
+ BIT_ULL(MTK_CLK_ETH2PLL))
#define MT7621_CLKS_BITMAP (0)
#define MT7628_CLKS_BITMAP (0)
-#define MT7629_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
- BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
- BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \
- BIT(MTK_CLK_SGMII_TX_250M) | \
- BIT(MTK_CLK_SGMII_RX_250M) | \
- BIT(MTK_CLK_SGMII_CDR_REF) | \
- BIT(MTK_CLK_SGMII_CDR_FB) | \
- BIT(MTK_CLK_SGMII2_TX_250M) | \
- BIT(MTK_CLK_SGMII2_RX_250M) | \
- BIT(MTK_CLK_SGMII2_CDR_REF) | \
- BIT(MTK_CLK_SGMII2_CDR_FB) | \
- BIT(MTK_CLK_SGMII_CK) | \
- BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
-#define MT7981_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
- BIT(MTK_CLK_WOCPU0) | \
- BIT(MTK_CLK_SGMII_TX_250M) | \
- BIT(MTK_CLK_SGMII_RX_250M) | \
- BIT(MTK_CLK_SGMII_CDR_REF) | \
- BIT(MTK_CLK_SGMII_CDR_FB) | \
- BIT(MTK_CLK_SGMII2_TX_250M) | \
- BIT(MTK_CLK_SGMII2_RX_250M) | \
- BIT(MTK_CLK_SGMII2_CDR_REF) | \
- BIT(MTK_CLK_SGMII2_CDR_FB) | \
- BIT(MTK_CLK_SGMII_CK))
-#define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
- BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \
- BIT(MTK_CLK_SGMII_TX_250M) | \
- BIT(MTK_CLK_SGMII_RX_250M) | \
- BIT(MTK_CLK_SGMII_CDR_REF) | \
- BIT(MTK_CLK_SGMII_CDR_FB) | \
- BIT(MTK_CLK_SGMII2_TX_250M) | \
- BIT(MTK_CLK_SGMII2_RX_250M) | \
- BIT(MTK_CLK_SGMII2_CDR_REF) | \
- BIT(MTK_CLK_SGMII2_CDR_FB))
+#define MT7629_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
+ BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \
+ BIT_ULL(MTK_CLK_GP2) | BIT_ULL(MTK_CLK_FE) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII_CK) | \
+ BIT_ULL(MTK_CLK_ETH2PLL) | BIT_ULL(MTK_CLK_SGMIITOP))
+#define MT7981_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_GP1) | \
+ BIT_ULL(MTK_CLK_WOCPU0) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII_CK))
+#define MT7986_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_GP1) | \
+ BIT_ULL(MTK_CLK_WOCPU1) | BIT_ULL(MTK_CLK_WOCPU0) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_FB))
+#define MT7988_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_ESW) | \
+ BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_GP3) | BIT_ULL(MTK_CLK_XGP1) | \
+ BIT_ULL(MTK_CLK_XGP2) | BIT_ULL(MTK_CLK_XGP3) | \
+ BIT_ULL(MTK_CLK_CRYPTO) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
+ BIT_ULL(MTK_CLK_ETHWARP_WOCPU2) | \
+ BIT_ULL(MTK_CLK_ETHWARP_WOCPU1) | \
+ BIT_ULL(MTK_CLK_ETHWARP_WOCPU0) | \
+ BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_0_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_1_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_SGM_0_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_SGM_1_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_GMII_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_REFCK_50M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_SYS_200M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_SYS_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_XGMII_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_MII_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_500M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_PAO_2X_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_SYNC_250M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_WARP_SEL))
enum mtk_dev_state {
MTK_HW_INIT,
MTK_RESETTING
};
+/* PSE Port Definition */
+enum mtk_pse_port {
+ PSE_ADMA_PORT = 0,
+ PSE_GDM1_PORT,
+ PSE_GDM2_PORT,
+ PSE_PPE0_PORT,
+ PSE_PPE1_PORT,
+ PSE_QDMA_TX_PORT,
+ PSE_QDMA_RX_PORT,
+ PSE_DROP_PORT,
+ PSE_WDMA0_PORT,
+ PSE_WDMA1_PORT,
+ PSE_TDMA_PORT,
+ PSE_NONE_PORT,
+ PSE_PPE2_PORT,
+ PSE_WDMA2_PORT,
+ PSE_EIP197_PORT,
+ PSE_GDM3_PORT,
+ PSE_PORT_MAX
+};
+
+/* GMAC Identifier */
+enum mtk_gmac_id {
+ MTK_GMAC1_ID = 0,
+ MTK_GMAC2_ID,
+ MTK_GMAC3_ID,
+ MTK_GMAC_ID_MAX
+};
+
enum mtk_tx_buf_type {
MTK_TYPE_SKB,
MTK_TYPE_XDP_TX,
@@ -744,7 +876,8 @@ struct mtk_tx_buf {
enum mtk_tx_buf_type type;
void *data;
- u32 flags;
+ u16 mac_id;
+ u16 flags;
DEFINE_DMA_UNMAP_ADDR(dma_addr0);
DEFINE_DMA_UNMAP_LEN(dma_len0);
DEFINE_DMA_UNMAP_ADDR(dma_addr1);
@@ -820,10 +953,12 @@ enum mkt_eth_capabilities {
MTK_SHARED_INT_BIT,
MTK_TRGMII_MT7621_CLK_BIT,
MTK_QDMA_BIT,
- MTK_NETSYS_V2_BIT,
MTK_SOC_MT7628_BIT,
MTK_RSTCTRL_PPE1_BIT,
+ MTK_RSTCTRL_PPE2_BIT,
MTK_U3_COPHY_V2_BIT,
+ MTK_SRAM_BIT,
+ MTK_36BIT_DMA_BIT,
/* MUX BITS*/
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
@@ -843,42 +978,44 @@ enum mkt_eth_capabilities {
};
/* Supported hardware group on SoCs */
-#define MTK_RGMII BIT(MTK_RGMII_BIT)
-#define MTK_TRGMII BIT(MTK_TRGMII_BIT)
-#define MTK_SGMII BIT(MTK_SGMII_BIT)
-#define MTK_ESW BIT(MTK_ESW_BIT)
-#define MTK_GEPHY BIT(MTK_GEPHY_BIT)
-#define MTK_MUX BIT(MTK_MUX_BIT)
-#define MTK_INFRA BIT(MTK_INFRA_BIT)
-#define MTK_SHARED_SGMII BIT(MTK_SHARED_SGMII_BIT)
-#define MTK_HWLRO BIT(MTK_HWLRO_BIT)
-#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
-#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
-#define MTK_QDMA BIT(MTK_QDMA_BIT)
-#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT)
-#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
-#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT)
-#define MTK_U3_COPHY_V2 BIT(MTK_U3_COPHY_V2_BIT)
+#define MTK_RGMII BIT_ULL(MTK_RGMII_BIT)
+#define MTK_TRGMII BIT_ULL(MTK_TRGMII_BIT)
+#define MTK_SGMII BIT_ULL(MTK_SGMII_BIT)
+#define MTK_ESW BIT_ULL(MTK_ESW_BIT)
+#define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT)
+#define MTK_MUX BIT_ULL(MTK_MUX_BIT)
+#define MTK_INFRA BIT_ULL(MTK_INFRA_BIT)
+#define MTK_SHARED_SGMII BIT_ULL(MTK_SHARED_SGMII_BIT)
+#define MTK_HWLRO BIT_ULL(MTK_HWLRO_BIT)
+#define MTK_SHARED_INT BIT_ULL(MTK_SHARED_INT_BIT)
+#define MTK_TRGMII_MT7621_CLK BIT_ULL(MTK_TRGMII_MT7621_CLK_BIT)
+#define MTK_QDMA BIT_ULL(MTK_QDMA_BIT)
+#define MTK_SOC_MT7628 BIT_ULL(MTK_SOC_MT7628_BIT)
+#define MTK_RSTCTRL_PPE1 BIT_ULL(MTK_RSTCTRL_PPE1_BIT)
+#define MTK_RSTCTRL_PPE2 BIT_ULL(MTK_RSTCTRL_PPE2_BIT)
+#define MTK_U3_COPHY_V2 BIT_ULL(MTK_U3_COPHY_V2_BIT)
+#define MTK_SRAM BIT_ULL(MTK_SRAM_BIT)
+#define MTK_36BIT_DMA BIT_ULL(MTK_36BIT_DMA_BIT)
#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
- BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
+ BIT_ULL(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
#define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY \
- BIT(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
+ BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \
- BIT(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
+ BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
- BIT(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
+ BIT_ULL(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
#define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \
- BIT(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT)
+ BIT_ULL(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT)
/* Supported path present on SoCs */
-#define MTK_ETH_PATH_GMAC1_RGMII BIT(MTK_ETH_PATH_GMAC1_RGMII_BIT)
-#define MTK_ETH_PATH_GMAC1_TRGMII BIT(MTK_ETH_PATH_GMAC1_TRGMII_BIT)
-#define MTK_ETH_PATH_GMAC1_SGMII BIT(MTK_ETH_PATH_GMAC1_SGMII_BIT)
-#define MTK_ETH_PATH_GMAC2_RGMII BIT(MTK_ETH_PATH_GMAC2_RGMII_BIT)
-#define MTK_ETH_PATH_GMAC2_SGMII BIT(MTK_ETH_PATH_GMAC2_SGMII_BIT)
-#define MTK_ETH_PATH_GMAC2_GEPHY BIT(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
-#define MTK_ETH_PATH_GDM1_ESW BIT(MTK_ETH_PATH_GDM1_ESW_BIT)
+#define MTK_ETH_PATH_GMAC1_RGMII BIT_ULL(MTK_ETH_PATH_GMAC1_RGMII_BIT)
+#define MTK_ETH_PATH_GMAC1_TRGMII BIT_ULL(MTK_ETH_PATH_GMAC1_TRGMII_BIT)
+#define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_GEPHY BIT_ULL(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
+#define MTK_ETH_PATH_GDM1_ESW BIT_ULL(MTK_ETH_PATH_GDM1_ESW_BIT)
#define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII)
#define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII)
@@ -934,11 +1071,14 @@ enum mkt_eth_capabilities {
#define MT7981_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \
- MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
+ MTK_RSTCTRL_PPE1 | MTK_SRAM)
#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
- MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
+ MTK_RSTCTRL_PPE1 | MTK_SRAM)
+
+#define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_QDMA | \
+ MTK_RSTCTRL_PPE1 | MTK_RSTCTRL_PPE2 | MTK_SRAM)
struct mtk_tx_dma_desc_info {
dma_addr_t addr;
@@ -1009,6 +1149,7 @@ struct mtk_reg_map {
* @required_pctl A bool value to show whether the SoC requires
* the extra setup for those pins used by GMAC.
* @hash_offset Flow table hash offset.
+ * @version SoC version.
* @foe_entry_size Foe table entry size.
* @has_accounting Bool indicating support for accounting of
* offloaded flows.
@@ -1022,14 +1163,16 @@ struct mtk_reg_map {
struct mtk_soc_data {
const struct mtk_reg_map *reg_map;
u32 ana_rgc3;
- u32 caps;
- u32 required_clks;
+ u64 caps;
+ u64 required_clks;
bool required_pctl;
u8 offload_version;
u8 hash_offset;
+ u8 version;
u16 foe_entry_size;
netdev_features_t hw_features;
bool has_accounting;
+ bool disable_pll_modes;
struct {
u32 txd_size;
u32 rxd_size;
@@ -1042,8 +1185,8 @@ struct mtk_soc_data {
#define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
-/* currently no SoC has more than 2 macs */
-#define MTK_MAX_DEVS 2
+/* currently no SoC has more than 3 macs */
+#define MTK_MAX_DEVS 3
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
@@ -1095,6 +1238,7 @@ struct mtk_eth {
struct device *dev;
struct device *dma_dev;
void __iomem *base;
+ void *sram_base;
spinlock_t page_lock;
spinlock_t tx_irq_lock;
spinlock_t rx_irq_lock;
@@ -1182,6 +1326,21 @@ struct mtk_mac {
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
extern const struct of_device_id of_mtk_match[];
+static inline bool mtk_is_netsys_v1(struct mtk_eth *eth)
+{
+ return eth->soc->version == 1;
+}
+
+static inline bool mtk_is_netsys_v2_or_greater(struct mtk_eth *eth)
+{
+ return eth->soc->version > 1;
+}
+
+static inline bool mtk_is_netsys_v3_or_greater(struct mtk_eth *eth)
+{
+ return eth->soc->version > 2;
+}
+
static inline struct mtk_foe_entry *
mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
{
@@ -1192,7 +1351,7 @@ mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB1_BIND_TIMESTAMP_V2;
return MTK_FOE_IB1_BIND_TIMESTAMP;
@@ -1200,7 +1359,7 @@ static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB1_BIND_PPPOE_V2;
return MTK_FOE_IB1_BIND_PPPOE;
@@ -1208,7 +1367,7 @@ static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB1_BIND_VLAN_TAG_V2;
return MTK_FOE_IB1_BIND_VLAN_TAG;
@@ -1216,7 +1375,7 @@ static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB1_BIND_VLAN_LAYER_V2;
return MTK_FOE_IB1_BIND_VLAN_LAYER;
@@ -1224,7 +1383,7 @@ static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
@@ -1232,7 +1391,7 @@ static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
@@ -1240,7 +1399,7 @@ static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB1_PACKET_TYPE_V2;
return MTK_FOE_IB1_PACKET_TYPE;
@@ -1248,7 +1407,7 @@ static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val);
return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val);
@@ -1256,7 +1415,7 @@ static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
{
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
return MTK_FOE_IB2_MULTICAST_V2;
return MTK_FOE_IB2_MULTICAST;
@@ -1267,6 +1426,7 @@ void mtk_stats_update_mac(struct mtk_mac *mac);
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
+u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg);
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 9129821f3ab8..86f32f486043 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -92,7 +92,6 @@ static int mtk_ppe_mib_wait_busy(struct mtk_ppe *ppe)
static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
{
- u32 byte_cnt_low, byte_cnt_high, pkt_cnt_low, pkt_cnt_high;
u32 val, cnt_r0, cnt_r1, cnt_r2;
int ret;
@@ -107,12 +106,20 @@ static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *p
cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
cnt_r2 = readl(ppe->base + MTK_PPE_MIB_SER_R2);
- byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
- byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
- pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
- pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
- *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
- *packets = (pkt_cnt_high << 16) | pkt_cnt_low;
+ if (mtk_is_netsys_v3_or_greater(ppe->eth)) {
+ /* 64 bit for each counter */
+ u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3);
+ *bytes = ((u64)cnt_r1 << 32) | cnt_r0;
+ *packets = ((u64)cnt_r3 << 32) | cnt_r2;
+ } else {
+ /* 48 bit byte counter, 40 bit packet counter */
+ u32 byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
+ u32 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
+ u32 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
+ u32 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
+ *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
+ *packets = ((u64)pkt_cnt_high << 16) | pkt_cnt_low;
+ }
return 0;
}
@@ -208,7 +215,7 @@ int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
memset(entry, 0, sizeof(*entry));
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
@@ -272,7 +279,7 @@ int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
u32 val = *ib2;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
val &= ~MTK_FOE_IB2_DEST_PORT_V2;
val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
} else {
@@ -423,13 +430,22 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ switch (eth->soc->version) {
+ case 3:
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
+ MTK_FOE_IB2_WDMA_WINFO_V2;
+ l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) |
+ FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss);
+ break;
+ case 2:
*ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
*ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
MTK_FOE_IB2_WDMA_WINFO_V2;
l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
- } else {
+ break;
+ default:
*ib2 &= ~MTK_FOE_IB2_PORT_MG;
*ib2 |= MTK_FOE_IB2_WDMA_WINFO;
if (wdma_idx)
@@ -437,6 +453,7 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
+ break;
}
return 0;
@@ -447,7 +464,7 @@ int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
{
u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
*ib2 &= ~MTK_FOE_IB2_QID_V2;
*ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
*ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
@@ -603,7 +620,7 @@ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
struct mtk_foe_entry *hwe;
u32 val;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
timestamp);
@@ -619,7 +636,7 @@ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
hwe->ib1 = entry->ib1;
if (ppe->accounting) {
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
val = MTK_FOE_IB2_MIB_CNT_V2;
else
val = MTK_FOE_IB2_MIB_CNT;
@@ -964,8 +981,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
mtk_ppe_init_foe_table(ppe);
ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
- val = MTK_PPE_TB_CFG_ENTRY_80B |
- MTK_PPE_TB_CFG_AGE_NON_L4 |
+ val = MTK_PPE_TB_CFG_AGE_NON_L4 |
MTK_PPE_TB_CFG_AGE_UNBIND |
MTK_PPE_TB_CFG_AGE_TCP |
MTK_PPE_TB_CFG_AGE_UDP |
@@ -979,8 +995,10 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
MTK_PPE_ENTRIES_SHIFT);
- if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(ppe->eth))
val |= MTK_PPE_TB_CFG_INFO_SEL;
+ if (!mtk_is_netsys_v3_or_greater(ppe->eth))
+ val |= MTK_PPE_TB_CFG_ENTRY_80B;
ppe_w32(ppe, MTK_PPE_TB_CFG, val);
ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
@@ -995,7 +1013,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
MTK_PPE_FLOW_CFG_IP4_NAPT |
MTK_PPE_FLOW_CFG_IP4_DSLITE |
MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
- if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(ppe->eth))
val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
MTK_PPE_MD_TOAP_BYP_CRSN1 |
MTK_PPE_MD_TOAP_BYP_CRSN2 |
@@ -1037,7 +1055,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
- if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(ppe->eth)) {
ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index e51de31a52ec..e3d0ec72bc69 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -85,6 +85,17 @@ enum {
#define MTK_FOE_WINFO_BSS GENMASK(5, 0)
#define MTK_FOE_WINFO_WCID GENMASK(15, 6)
+#define MTK_FOE_WINFO_BSS_V3 GENMASK(23, 16)
+#define MTK_FOE_WINFO_WCID_V3 GENMASK(15, 0)
+
+#define MTK_FOE_WINFO_PAO_USR_INFO GENMASK(15, 0)
+#define MTK_FOE_WINFO_PAO_TID GENMASK(19, 16)
+#define MTK_FOE_WINFO_PAO_IS_FIXEDRATE BIT(20)
+#define MTK_FOE_WINFO_PAO_IS_PRIOR BIT(21)
+#define MTK_FOE_WINFO_PAO_IS_SP BIT(22)
+#define MTK_FOE_WINFO_PAO_HF BIT(23)
+#define MTK_FOE_WINFO_PAO_AMSDU_EN BIT(24)
+
enum {
MTK_FOE_STATE_INVALID,
MTK_FOE_STATE_UNBIND,
@@ -106,8 +117,13 @@ struct mtk_foe_mac_info {
u16 pppoe_id;
u16 src_mac_lo;
+ /* netsys_v2 */
u16 minfo;
u16 winfo;
+
+ /* netsys_v3 */
+ u32 w3info;
+ u32 wpao;
};
/* software-only entry type */
@@ -216,6 +232,10 @@ struct mtk_foe_ipv6_6rd {
struct mtk_foe_mac_info l2;
};
+#define MTK_FOE_ENTRY_V1_SIZE 80
+#define MTK_FOE_ENTRY_V2_SIZE 96
+#define MTK_FOE_ENTRY_V3_SIZE 128
+
struct mtk_foe_entry {
u32 ib1;
@@ -225,7 +245,7 @@ struct mtk_foe_entry {
struct mtk_foe_ipv4_dslite dslite;
struct mtk_foe_ipv6 ipv6;
struct mtk_foe_ipv6_6rd ipv6_6rd;
- u32 data[23];
+ u32 data[31];
};
};
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 02eebff02d45..a70a5417c173 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -193,7 +193,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
info.bss, info.wcid);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (mtk_is_netsys_v2_or_greater(eth)) {
switch (info.wdma_idx) {
case 0:
pse_port = 8;
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
index a2e61b3eb006..3ce088eef0ef 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
@@ -163,6 +163,8 @@ enum {
#define MTK_PPE_MIB_SER_R2 0x348
#define MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH GENMASK(23, 0)
+#define MTK_PPE_MIB_SER_R3 0x34c
+
#define MTK_PPE_MIB_CACHE_CTL 0x350
#define MTK_PPE_MIB_CACHE_CTL_EN BIT(0)
#define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2)
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 02c03325911f..31aebeb2e285 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 3b651efcc25e..94376aa2b34c 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
#include <linux/kernel.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bitfield.h>
@@ -1099,7 +1100,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
} else {
struct mtk_eth *eth = dev->hw->eth;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ if (mtk_is_netsys_v2_or_greater(eth))
wed_set(dev, MTK_WED_RESET_IDX,
MTK_WED_RESET_IDX_RX_V2);
else
@@ -1915,7 +1916,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
hw->wdma = wdma;
hw->index = index;
hw->irq = irq;
- hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+ hw->version = mtk_is_netsys_v1(eth) ? 1 : 2;
if (hw->version == 1) {
hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
index b244c02c5b51..e24afeaea0da 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
@@ -84,7 +84,6 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
}
}
-
static int
wed_txinfo_show(struct seq_file *s, void *data)
{
@@ -127,16 +126,23 @@ wed_txinfo_show(struct seq_file *s, void *data)
DUMP_WDMA_RING(WDMA_RING_RX(0)),
DUMP_WDMA_RING(WDMA_RING_RX(1)),
- DUMP_STR("TX FREE"),
+ DUMP_STR("WED TX FREE"),
DUMP_WED(WED_RX_MIB(0)),
+ DUMP_WED_RING(WED_RING_RX(0)),
+ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(0)),
+ DUMP_WED(WED_RX_MIB(1)),
+ DUMP_WED_RING(WED_RING_RX(1)),
+ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(1)),
+
+ DUMP_STR("WED WPDMA TX FREE"),
+ DUMP_WED_RING(WED_WPDMA_RING_RX(0)),
+ DUMP_WED_RING(WED_WPDMA_RING_RX(1)),
};
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
- if (!dev)
- return 0;
-
- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+ if (dev)
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
return 0;
}
@@ -208,10 +214,8 @@ wed_rxinfo_show(struct seq_file *s, void *data)
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
- if (!dev)
- return 0;
-
- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+ if (dev)
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
index 0a50bb98c5ea..47ea69feb3b2 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
@@ -266,6 +266,8 @@ struct mtk_wdma_desc {
#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
+#define MTK_WED_WPDMA_RX_MIB(_n) (0x5e0 + (_n) * 4)
+#define MTK_WED_WPDMA_RX_COHERENT_MIB(_n) (0x5f0 + (_n) * 4)
#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
index 69fba29055e9..3bd51a3d6650 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -7,10 +7,9 @@
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
-#include <linux/of_platform.h>
#include <linux/interrupt.h>
-#include <linux/of_address.h>
#include <linux/mfd/syscon.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/bitfield.h>
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 1b4b1f642317..825e05fb8607 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -27,6 +27,7 @@ config MLX4_EN_DCB
config MLX4_CORE
tristate
depends on PCI
+ select AUXILIARY_BUS
select NET_DEVLINK
default n
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 0eb7b83637d8..0d8a362c2673 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -194,7 +194,7 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
mutex_unlock(&persist->device_state_mutex);
/* At that step HW was already reset, now notify clients */
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, NULL);
mlx4_cmd_wake_completions(dev);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index c56d2194cbfc..f5b1f8c7834f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2113,7 +2113,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
goto inform_slave_state;
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, &slave);
/* write the version in the event field */
reply |= mlx4_comm_get_version();
@@ -2152,7 +2152,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
if (mlx4_master_activate_admin_state(priv, slave))
goto reset_slave;
slave_state[slave].active = true;
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, &slave);
break;
case MLX4_COMM_CMD_VHCR_POST:
if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 7d45f1d55f79..164a13272faa 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1467,8 +1467,8 @@ static int add_ip_rule(struct mlx4_en_priv *priv,
struct list_head *list_h)
{
int err;
- struct mlx4_spec_list *spec_l2 = NULL;
- struct mlx4_spec_list *spec_l3 = NULL;
+ struct mlx4_spec_list *spec_l2;
+ struct mlx4_spec_list *spec_l3;
struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
@@ -1505,9 +1505,9 @@ static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
struct list_head *list_h, int proto)
{
int err;
- struct mlx4_spec_list *spec_l2 = NULL;
- struct mlx4_spec_list *spec_l3 = NULL;
- struct mlx4_spec_list *spec_l4 = NULL;
+ struct mlx4_spec_list *spec_l2;
+ struct mlx4_spec_list *spec_l3;
+ struct mlx4_spec_list *spec_l4;
struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index f1259bdb1a29..d8f4d00ad26b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -183,24 +183,31 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
}
}
-static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
+static int mlx4_en_event(struct notifier_block *this, unsigned long event,
+ void *param)
{
- struct mlx4_en_dev *endev = ctx;
-
- return endev->pndev[port];
-}
-
-static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
- enum mlx4_dev_event event, unsigned long port)
-{
- struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
+ struct mlx4_en_dev *mdev =
+ container_of(this, struct mlx4_en_dev, mlx_nb);
+ struct mlx4_dev *dev = mdev->dev;
struct mlx4_en_priv *priv;
+ int port;
+
+ switch (event) {
+ case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
+ case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
+ case MLX4_DEV_EVENT_SLAVE_INIT:
+ case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
+ break;
+ default:
+ port = *(int *)param;
+ break;
+ }
switch (event) {
case MLX4_DEV_EVENT_PORT_UP:
case MLX4_DEV_EVENT_PORT_DOWN:
if (!mdev->pndev[port])
- return;
+ return NOTIFY_DONE;
priv = netdev_priv(mdev->pndev[port]);
/* To prevent races, we poll the link state in a separate
task rather than changing it here */
@@ -212,23 +219,30 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
mlx4_err(mdev, "Internal error detected, restarting device\n");
break;
+ case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
case MLX4_DEV_EVENT_SLAVE_INIT:
case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
break;
default:
if (port < 1 || port > dev->caps.num_ports ||
!mdev->pndev[port])
- return;
- mlx4_warn(mdev, "Unhandled event %d for port %d\n", event,
- (int) port);
+ return NOTIFY_DONE;
+ mlx4_warn(mdev, "Unhandled event %d for port %d\n", (int)event,
+ port);
}
+
+ return NOTIFY_DONE;
}
-static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
+static void mlx4_en_remove(struct auxiliary_device *adev)
{
- struct mlx4_en_dev *mdev = endev_ptr;
+ struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev);
+ struct mlx4_dev *dev = madev->mdev;
+ struct mlx4_en_dev *mdev = auxiliary_get_drvdata(adev);
int i;
+ mlx4_unregister_event_notifier(dev, &mdev->mlx_nb);
+
mutex_lock(&mdev->state_lock);
mdev->device_up = false;
mutex_unlock(&mdev->state_lock);
@@ -242,52 +256,41 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
iounmap(mdev->uar_map);
mlx4_uar_free(dev, &mdev->priv_uar);
mlx4_pd_free(dev, mdev->priv_pdn);
- if (mdev->nb.notifier_call)
- unregister_netdevice_notifier(&mdev->nb);
+ if (mdev->netdev_nb.notifier_call)
+ unregister_netdevice_notifier(&mdev->netdev_nb);
kfree(mdev);
}
-static void mlx4_en_activate(struct mlx4_dev *dev, void *ctx)
-{
- int i;
- struct mlx4_en_dev *mdev = ctx;
-
- /* Create a netdev for each port */
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
- mlx4_info(mdev, "Activating port:%d\n", i);
- if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
- mdev->pndev[i] = NULL;
- }
-
- /* register notifier */
- mdev->nb.notifier_call = mlx4_en_netdev_event;
- if (register_netdevice_notifier(&mdev->nb)) {
- mdev->nb.notifier_call = NULL;
- mlx4_err(mdev, "Failed to create notifier\n");
- }
-}
-
-static void *mlx4_en_add(struct mlx4_dev *dev)
+static int mlx4_en_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
{
+ struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev);
+ struct mlx4_dev *dev = madev->mdev;
struct mlx4_en_dev *mdev;
- int i;
+ int err, i;
printk_once(KERN_INFO "%s", mlx4_en_version);
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
- if (!mdev)
+ if (!mdev) {
+ err = -ENOMEM;
goto err_free_res;
+ }
- if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
+ err = mlx4_pd_alloc(dev, &mdev->priv_pdn);
+ if (err)
goto err_free_dev;
- if (mlx4_uar_alloc(dev, &mdev->priv_uar))
+ err = mlx4_uar_alloc(dev, &mdev->priv_uar);
+ if (err)
goto err_pd;
mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
PAGE_SIZE);
- if (!mdev->uar_map)
+ if (!mdev->uar_map) {
+ err = -ENOMEM;
goto err_uar;
+ }
spin_lock_init(&mdev->uar_lock);
mdev->dev = dev;
@@ -299,13 +302,15 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
if (!mdev->LSO_support)
mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n");
- if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
- MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
- 0, 0, &mdev->mr)) {
+ err = mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
+ MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, 0, 0,
+ &mdev->mr);
+ if (err) {
mlx4_err(mdev, "Failed allocating memory region\n");
goto err_map;
}
- if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
+ err = mlx4_mr_enable(mdev->dev, &mdev->mr);
+ if (err) {
mlx4_err(mdev, "Failed enabling memory region\n");
goto err_mr;
}
@@ -325,15 +330,39 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
* Note: we cannot use the shared workqueue because of deadlocks caused
* by the rtnl lock */
mdev->workqueue = create_singlethread_workqueue("mlx4_en");
- if (!mdev->workqueue)
+ if (!mdev->workqueue) {
+ err = -ENOMEM;
goto err_mr;
+ }
/* At this stage all non-port specific tasks are complete:
* mark the card state as up */
mutex_init(&mdev->state_lock);
mdev->device_up = true;
- return mdev;
+ /* register mlx4 core notifier */
+ mdev->mlx_nb.notifier_call = mlx4_en_event;
+ err = mlx4_register_event_notifier(dev, &mdev->mlx_nb);
+ WARN(err, "failed to register mlx4 event notifier (%d)", err);
+
+ /* Setup ports */
+
+ /* Create a netdev for each port */
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ mlx4_info(mdev, "Activating port:%d\n", i);
+ if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
+ mdev->pndev[i] = NULL;
+ }
+
+ /* register netdev notifier */
+ mdev->netdev_nb.notifier_call = mlx4_en_netdev_event;
+ if (register_netdevice_notifier(&mdev->netdev_nb)) {
+ mdev->netdev_nb.notifier_call = NULL;
+ mlx4_err(mdev, "Failed to create netdev notifier\n");
+ }
+
+ auxiliary_set_drvdata(adev, mdev);
+ return 0;
err_mr:
(void) mlx4_mr_free(dev, &mdev->mr);
@@ -347,16 +376,24 @@ err_pd:
err_free_dev:
kfree(mdev);
err_free_res:
- return NULL;
+ return err;
}
-static struct mlx4_interface mlx4_en_interface = {
- .add = mlx4_en_add,
- .remove = mlx4_en_remove,
- .event = mlx4_en_event,
- .get_dev = mlx4_en_get_netdev,
+static const struct auxiliary_device_id mlx4_en_id_table[] = {
+ { .name = MLX4_ADEV_NAME ".eth" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlx4_en_id_table);
+
+static struct mlx4_adrv mlx4_en_adrv = {
+ .adrv = {
+ .name = "eth",
+ .probe = mlx4_en_probe,
+ .remove = mlx4_en_remove,
+ .id_table = mlx4_en_id_table,
+ },
.protocol = MLX4_PROT_ETH,
- .activate = mlx4_en_activate,
};
static void mlx4_en_verify_params(void)
@@ -385,12 +422,12 @@ static int __init mlx4_en_init(void)
mlx4_en_verify_params();
mlx4_en_init_ptys2ethtool_map();
- return mlx4_register_interface(&mlx4_en_interface);
+ return mlx4_register_auxiliary_driver(&mlx4_en_adrv);
}
static void __exit mlx4_en_cleanup(void)
{
- mlx4_unregister_interface(&mlx4_en_interface);
+ mlx4_unregister_auxiliary_driver(&mlx4_en_adrv);
}
module_init(mlx4_en_init);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index e11bc0ac880e..33bbcced8105 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -291,7 +291,7 @@ mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
__be32 dst_ip, u8 ip_proto, __be16 src_port,
__be16 dst_port, u32 flow_id)
{
- struct mlx4_en_filter *filter = NULL;
+ struct mlx4_en_filter *filter;
filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
if (!filter)
@@ -2894,63 +2894,6 @@ static const struct xdp_metadata_ops mlx4_xdp_metadata_ops = {
.xmo_rx_hash = mlx4_en_xdp_rx_hash,
};
-struct mlx4_en_bond {
- struct work_struct work;
- struct mlx4_en_priv *priv;
- int is_bonded;
- struct mlx4_port_map port_map;
-};
-
-static void mlx4_en_bond_work(struct work_struct *work)
-{
- struct mlx4_en_bond *bond = container_of(work,
- struct mlx4_en_bond,
- work);
- int err = 0;
- struct mlx4_dev *dev = bond->priv->mdev->dev;
-
- if (bond->is_bonded) {
- if (!mlx4_is_bonded(dev)) {
- err = mlx4_bond(dev);
- if (err)
- en_err(bond->priv, "Fail to bond device\n");
- }
- if (!err) {
- err = mlx4_port_map_set(dev, &bond->port_map);
- if (err)
- en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
- bond->port_map.port1,
- bond->port_map.port2,
- err);
- }
- } else if (mlx4_is_bonded(dev)) {
- err = mlx4_unbond(dev);
- if (err)
- en_err(bond->priv, "Fail to unbond device\n");
- }
- dev_put(bond->priv->dev);
- kfree(bond);
-}
-
-static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
- u8 v2p_p1, u8 v2p_p2)
-{
- struct mlx4_en_bond *bond = NULL;
-
- bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
- if (!bond)
- return -ENOMEM;
-
- INIT_WORK(&bond->work, mlx4_en_bond_work);
- bond->priv = priv;
- bond->is_bonded = is_bonded;
- bond->port_map.port1 = v2p_p1;
- bond->port_map.port2 = v2p_p2;
- dev_hold(priv->dev);
- queue_work(priv->mdev->workqueue, &bond->work);
- return 0;
-}
-
int mlx4_en_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -2960,14 +2903,13 @@ int mlx4_en_netdev_event(struct notifier_block *this,
struct mlx4_dev *dev;
int i, num_eth_ports = 0;
bool do_bond = true;
- struct mlx4_en_priv *priv;
u8 v2p_port1 = 0;
u8 v2p_port2 = 0;
if (!net_eq(dev_net(ndev), &init_net))
return NOTIFY_DONE;
- mdev = container_of(this, struct mlx4_en_dev, nb);
+ mdev = container_of(this, struct mlx4_en_dev, netdev_nb);
dev = mdev->dev;
/* Go into this mode only when two network devices set on two ports
@@ -2995,7 +2937,6 @@ int mlx4_en_netdev_event(struct notifier_block *this,
if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
return NOTIFY_DONE;
- priv = netdev_priv(ndev);
if (do_bond) {
struct netdev_notifier_bonding_info *notifier_info = ptr;
struct netdev_bonding_info *bonding_info =
@@ -3062,8 +3003,7 @@ int mlx4_en_netdev_event(struct notifier_block *this,
}
}
- mlx4_en_queue_bond_work(priv, do_bond,
- v2p_port1, v2p_port2);
+ mlx4_queue_bond_work(dev, do_bond, v2p_port1, v2p_port2);
return NOTIFY_DONE;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 414e390e6b48..6598b10a9ff4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -501,7 +501,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
int port;
int slave = 0;
int ret;
- u32 flr_slave;
+ int flr_slave;
u8 update_slave_state;
int i;
enum slave_port_gen_event gen_event;
@@ -606,8 +606,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
port = be32_to_cpu(eqe->event.port_change.port) >> 28;
slaves_port = mlx4_phys_to_slaves_pport(dev, port);
if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
- port);
+ mlx4_dispatch_event(
+ dev, MLX4_DEV_EVENT_PORT_DOWN, &port);
mlx4_priv(dev)->sense.do_sense_port[port] = 1;
if (!mlx4_is_master(dev))
break;
@@ -647,7 +647,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
}
}
} else {
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port);
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
+ &port);
mlx4_priv(dev)->sense.do_sense_port[port] = 0;
@@ -758,7 +759,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
}
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
- flr_slave);
+ &flr_slave);
queue_work(priv->mfunc.master.comm_wq,
&priv->mfunc.master.slave_flr_event_work);
break;
@@ -787,8 +788,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break;
case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT:
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
- (unsigned long) eqe);
+ mlx4_dispatch_event(
+ dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, eqe);
break;
case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT:
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 65482f004e50..a371b970ac1e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -38,102 +38,131 @@
#include "mlx4.h"
-struct mlx4_device_context {
- struct list_head list;
- struct list_head bond_list;
- struct mlx4_interface *intf;
- void *context;
-};
-
-static LIST_HEAD(intf_list);
-static LIST_HEAD(dev_list);
static DEFINE_MUTEX(intf_mutex);
+static DEFINE_IDA(mlx4_adev_ida);
-static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
+static bool is_eth_supported(struct mlx4_dev *dev)
{
- struct mlx4_device_context *dev_ctx;
+ for (int port = 1; port <= dev->caps.num_ports; port++)
+ if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+ return true;
- dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
- if (!dev_ctx)
- return;
+ return false;
+}
- dev_ctx->intf = intf;
- dev_ctx->context = intf->add(&priv->dev);
+static bool is_ib_supported(struct mlx4_dev *dev)
+{
+ for (int port = 1; port <= dev->caps.num_ports; port++)
+ if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
+ return true;
- if (dev_ctx->context) {
- spin_lock_irq(&priv->ctx_lock);
- list_add_tail(&dev_ctx->list, &priv->ctx_list);
- spin_unlock_irq(&priv->ctx_lock);
- if (intf->activate)
- intf->activate(&priv->dev, dev_ctx->context);
- } else
- kfree(dev_ctx);
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
+ return true;
+ return false;
}
-static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
+static const struct mlx4_adev_device {
+ const char *suffix;
+ bool (*is_supported)(struct mlx4_dev *dev);
+} mlx4_adev_devices[] = {
+ { "eth", is_eth_supported },
+ { "ib", is_ib_supported },
+};
+
+int mlx4_adev_init(struct mlx4_dev *dev)
{
- struct mlx4_device_context *dev_ctx;
+ struct mlx4_priv *priv = mlx4_priv(dev);
- list_for_each_entry(dev_ctx, &priv->ctx_list, list)
- if (dev_ctx->intf == intf) {
- spin_lock_irq(&priv->ctx_lock);
- list_del(&dev_ctx->list);
- spin_unlock_irq(&priv->ctx_lock);
+ priv->adev_idx = ida_alloc(&mlx4_adev_ida, GFP_KERNEL);
+ if (priv->adev_idx < 0)
+ return priv->adev_idx;
- intf->remove(&priv->dev, dev_ctx->context);
- kfree(dev_ctx);
- return;
- }
+ priv->adev = kcalloc(ARRAY_SIZE(mlx4_adev_devices),
+ sizeof(struct mlx4_adev *), GFP_KERNEL);
+ if (!priv->adev) {
+ ida_free(&mlx4_adev_ida, priv->adev_idx);
+ return -ENOMEM;
+ }
+
+ return 0;
}
-int mlx4_register_interface(struct mlx4_interface *intf)
+void mlx4_adev_cleanup(struct mlx4_dev *dev)
{
- struct mlx4_priv *priv;
-
- if (!intf->add || !intf->remove)
- return -EINVAL;
-
- mutex_lock(&intf_mutex);
+ struct mlx4_priv *priv = mlx4_priv(dev);
- list_add_tail(&intf->list, &intf_list);
- list_for_each_entry(priv, &dev_list, dev_list) {
- if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) {
- mlx4_dbg(&priv->dev,
- "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol);
- intf->flags &= ~MLX4_INTFF_BONDING;
- }
- mlx4_add_device(intf, priv);
- }
+ kfree(priv->adev);
+ ida_free(&mlx4_adev_ida, priv->adev_idx);
+}
- mutex_unlock(&intf_mutex);
+static void adev_release(struct device *dev)
+{
+ struct mlx4_adev *mlx4_adev =
+ container_of(dev, struct mlx4_adev, adev.dev);
+ struct mlx4_priv *priv = mlx4_priv(mlx4_adev->mdev);
+ int idx = mlx4_adev->idx;
- return 0;
+ kfree(mlx4_adev);
+ priv->adev[idx] = NULL;
}
-EXPORT_SYMBOL_GPL(mlx4_register_interface);
-void mlx4_unregister_interface(struct mlx4_interface *intf)
+static struct mlx4_adev *add_adev(struct mlx4_dev *dev, int idx)
{
- struct mlx4_priv *priv;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ const char *suffix = mlx4_adev_devices[idx].suffix;
+ struct auxiliary_device *adev;
+ struct mlx4_adev *madev;
+ int ret;
- mutex_lock(&intf_mutex);
+ madev = kzalloc(sizeof(*madev), GFP_KERNEL);
+ if (!madev)
+ return ERR_PTR(-ENOMEM);
- list_for_each_entry(priv, &dev_list, dev_list)
- mlx4_remove_device(intf, priv);
+ adev = &madev->adev;
+ adev->id = priv->adev_idx;
+ adev->name = suffix;
+ adev->dev.parent = &dev->persist->pdev->dev;
+ adev->dev.release = adev_release;
+ madev->mdev = dev;
+ madev->idx = idx;
- list_del(&intf->list);
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+ kfree(madev);
+ return ERR_PTR(ret);
+ }
- mutex_unlock(&intf_mutex);
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ERR_PTR(ret);
+ }
+ return madev;
+}
+
+static void del_adev(struct auxiliary_device *adev)
+{
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+int mlx4_register_auxiliary_driver(struct mlx4_adrv *madrv)
+{
+ return auxiliary_driver_register(&madrv->adrv);
+}
+EXPORT_SYMBOL_GPL(mlx4_register_auxiliary_driver);
+
+void mlx4_unregister_auxiliary_driver(struct mlx4_adrv *madrv)
+{
+ auxiliary_driver_unregister(&madrv->adrv);
}
-EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
+EXPORT_SYMBOL_GPL(mlx4_unregister_auxiliary_driver);
int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx;
- unsigned long flags;
- int ret;
- LIST_HEAD(bond_list);
+ int i, ret;
if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
return -EOPNOTSUPP;
@@ -155,69 +184,178 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
dev->flags &= ~MLX4_FLAG_BONDED;
}
- spin_lock_irqsave(&priv->ctx_lock, flags);
- list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) {
- if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) {
- list_add_tail(&dev_ctx->bond_list, &bond_list);
- list_del(&dev_ctx->list);
+ mutex_lock(&intf_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(mlx4_adev_devices); i++) {
+ struct mlx4_adev *madev = priv->adev[i];
+ struct mlx4_adrv *madrv;
+ enum mlx4_protocol protocol;
+
+ if (!madev)
+ continue;
+
+ device_lock(&madev->adev.dev);
+ if (!madev->adev.dev.driver) {
+ device_unlock(&madev->adev.dev);
+ continue;
+ }
+
+ madrv = container_of(madev->adev.dev.driver, struct mlx4_adrv,
+ adrv.driver);
+ if (!(madrv->flags & MLX4_INTFF_BONDING)) {
+ device_unlock(&madev->adev.dev);
+ continue;
+ }
+
+ if (mlx4_is_mfunc(dev)) {
+ mlx4_dbg(dev,
+ "SRIOV, disabled HA mode for intf proto %d\n",
+ madrv->protocol);
+ device_unlock(&madev->adev.dev);
+ continue;
}
- }
- spin_unlock_irqrestore(&priv->ctx_lock, flags);
- list_for_each_entry(dev_ctx, &bond_list, bond_list) {
- dev_ctx->intf->remove(dev, dev_ctx->context);
- dev_ctx->context = dev_ctx->intf->add(dev);
+ protocol = madrv->protocol;
+ device_unlock(&madev->adev.dev);
- spin_lock_irqsave(&priv->ctx_lock, flags);
- list_add_tail(&dev_ctx->list, &priv->ctx_list);
- spin_unlock_irqrestore(&priv->ctx_lock, flags);
+ del_adev(&madev->adev);
+ priv->adev[i] = add_adev(dev, i);
+ if (IS_ERR(priv->adev[i])) {
+ mlx4_warn(dev, "Device[%d] (%s) failed to load\n", i,
+ mlx4_adev_devices[i].suffix);
+ priv->adev[i] = NULL;
+ continue;
+ }
- mlx4_dbg(dev, "Interface for protocol %d restarted with bonded mode %s\n",
- dev_ctx->intf->protocol, enable ?
- "enabled" : "disabled");
+ mlx4_dbg(dev,
+ "Interface for protocol %d restarted with bonded mode %s\n",
+ protocol, enable ? "enabled" : "disabled");
}
+
+ mutex_unlock(&intf_mutex);
+
return 0;
}
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
- unsigned long param)
+ void *param)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ atomic_notifier_call_chain(&priv->event_nh, type, param);
+}
+
+int mlx4_register_event_notifier(struct mlx4_dev *dev,
+ struct notifier_block *nb)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- struct mlx4_device_context *dev_ctx;
- unsigned long flags;
- spin_lock_irqsave(&priv->ctx_lock, flags);
+ return atomic_notifier_chain_register(&priv->event_nh, nb);
+}
+EXPORT_SYMBOL(mlx4_register_event_notifier);
- list_for_each_entry(dev_ctx, &priv->ctx_list, list)
- if (dev_ctx->intf->event)
- dev_ctx->intf->event(dev, dev_ctx->context, type, param);
+int mlx4_unregister_event_notifier(struct mlx4_dev *dev,
+ struct notifier_block *nb)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
- spin_unlock_irqrestore(&priv->ctx_lock, flags);
+ return atomic_notifier_chain_unregister(&priv->event_nh, nb);
}
+EXPORT_SYMBOL(mlx4_unregister_event_notifier);
-int mlx4_register_device(struct mlx4_dev *dev)
+static int add_drivers(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mlx4_adev_devices); i++) {
+ bool is_supported = false;
+
+ if (priv->adev[i])
+ continue;
+
+ if (mlx4_adev_devices[i].is_supported)
+ is_supported = mlx4_adev_devices[i].is_supported(dev);
+
+ if (!is_supported)
+ continue;
+
+ priv->adev[i] = add_adev(dev, i);
+ if (IS_ERR(priv->adev[i])) {
+ mlx4_warn(dev, "Device[%d] (%s) failed to load\n", i,
+ mlx4_adev_devices[i].suffix);
+ /* We continue to rescan drivers and leave to the caller
+ * to make decision if to release everything or
+ * continue. */
+ ret = PTR_ERR(priv->adev[i]);
+ priv->adev[i] = NULL;
+ }
+ }
+ return ret;
+}
+
+static void delete_drivers(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- struct mlx4_interface *intf;
+ bool delete_all;
+ int i;
+
+ delete_all = !(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP);
+
+ for (i = ARRAY_SIZE(mlx4_adev_devices) - 1; i >= 0; i--) {
+ bool is_supported = false;
+
+ if (!priv->adev[i])
+ continue;
+
+ if (mlx4_adev_devices[i].is_supported && !delete_all)
+ is_supported = mlx4_adev_devices[i].is_supported(dev);
+
+ if (is_supported)
+ continue;
+
+ del_adev(&priv->adev[i]->adev);
+ priv->adev[i] = NULL;
+ }
+}
+
+/* This function is used after mlx4_dev is reconfigured.
+ */
+static int rescan_drivers_locked(struct mlx4_dev *dev)
+{
+ lockdep_assert_held(&intf_mutex);
+
+ delete_drivers(dev);
+ if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP))
+ return 0;
+
+ return add_drivers(dev);
+}
+
+int mlx4_register_device(struct mlx4_dev *dev)
+{
+ int ret;
mutex_lock(&intf_mutex);
dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP;
- list_add_tail(&priv->dev_list, &dev_list);
- list_for_each_entry(intf, &intf_list, list)
- mlx4_add_device(intf, priv);
+
+ ret = rescan_drivers_locked(dev);
mutex_unlock(&intf_mutex);
+
+ if (ret) {
+ mlx4_unregister_device(dev);
+ return ret;
+ }
+
mlx4_start_catas_poll(dev);
- return 0;
+ return ret;
}
void mlx4_unregister_device(struct mlx4_dev *dev)
{
- struct mlx4_priv *priv = mlx4_priv(dev);
- struct mlx4_interface *intf;
-
if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP))
return;
@@ -236,35 +374,12 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
}
mutex_lock(&intf_mutex);
- list_for_each_entry(intf, &intf_list, list)
- mlx4_remove_device(intf, priv);
-
- list_del(&priv->dev_list);
dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP;
- mutex_unlock(&intf_mutex);
-}
-
-void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port)
-{
- struct mlx4_priv *priv = mlx4_priv(dev);
- struct mlx4_device_context *dev_ctx;
- unsigned long flags;
- void *result = NULL;
+ rescan_drivers_locked(dev);
- spin_lock_irqsave(&priv->ctx_lock, flags);
-
- list_for_each_entry(dev_ctx, &priv->ctx_list, list)
- if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) {
- result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port);
- break;
- }
-
- spin_unlock_irqrestore(&priv->ctx_lock, flags);
-
- return result;
+ mutex_unlock(&intf_mutex);
}
-EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port)
{
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 61286b0d9b0c..2581226836b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -42,7 +42,6 @@
#include <linux/slab.h>
#include <linux/io-mapping.h>
#include <linux/delay.h>
-#include <linux/kmod.h>
#include <linux/etherdevice.h>
#include <net/devlink.h>
@@ -864,7 +863,7 @@ static void mlx4_slave_destroy_special_qp_cap(struct mlx4_dev *dev)
static int mlx4_slave_special_qp_cap(struct mlx4_dev *dev)
{
- struct mlx4_func_cap *func_cap = NULL;
+ struct mlx4_func_cap *func_cap;
struct mlx4_caps *caps = &dev->caps;
int i, err = 0;
@@ -908,9 +907,9 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
{
int err;
u32 page_size;
- struct mlx4_dev_cap *dev_cap = NULL;
- struct mlx4_func_cap *func_cap = NULL;
- struct mlx4_init_hca_param *hca_param = NULL;
+ struct mlx4_dev_cap *dev_cap;
+ struct mlx4_func_cap *func_cap;
+ struct mlx4_init_hca_param *hca_param;
hca_param = kzalloc(sizeof(*hca_param), GFP_KERNEL);
func_cap = kzalloc(sizeof(*func_cap), GFP_KERNEL);
@@ -1091,27 +1090,6 @@ free_mem:
return err;
}
-static void mlx4_request_modules(struct mlx4_dev *dev)
-{
- int port;
- int has_ib_port = false;
- int has_eth_port = false;
-#define EN_DRV_NAME "mlx4_en"
-#define IB_DRV_NAME "mlx4_ib"
-
- for (port = 1; port <= dev->caps.num_ports; port++) {
- if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
- has_ib_port = true;
- else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
- has_eth_port = true;
- }
-
- if (has_eth_port)
- request_module_nowait(EN_DRV_NAME);
- if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
- request_module_nowait(IB_DRV_NAME);
-}
-
/*
* Change the port configuration of the device.
* Every user of this function must hold the port mutex.
@@ -1147,7 +1125,6 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
mlx4_err(dev, "Failed to register device\n");
goto out;
}
- mlx4_request_modules(dev);
}
out:
@@ -1441,7 +1418,7 @@ static int mlx4_mf_unbond(struct mlx4_dev *dev)
return ret;
}
-int mlx4_bond(struct mlx4_dev *dev)
+static int mlx4_bond(struct mlx4_dev *dev)
{
int ret = 0;
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1467,9 +1444,8 @@ int mlx4_bond(struct mlx4_dev *dev)
return ret;
}
-EXPORT_SYMBOL_GPL(mlx4_bond);
-int mlx4_unbond(struct mlx4_dev *dev)
+static int mlx4_unbond(struct mlx4_dev *dev)
{
int ret = 0;
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1496,10 +1472,8 @@ int mlx4_unbond(struct mlx4_dev *dev)
return ret;
}
-EXPORT_SYMBOL_GPL(mlx4_unbond);
-
-int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
+static int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
{
u8 port1 = v2p->port1;
u8 port2 = v2p->port2;
@@ -1541,7 +1515,61 @@ int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
mutex_unlock(&priv->bond_mutex);
return err;
}
-EXPORT_SYMBOL_GPL(mlx4_port_map_set);
+
+struct mlx4_bond {
+ struct work_struct work;
+ struct mlx4_dev *dev;
+ int is_bonded;
+ struct mlx4_port_map port_map;
+};
+
+static void mlx4_bond_work(struct work_struct *work)
+{
+ struct mlx4_bond *bond = container_of(work, struct mlx4_bond, work);
+ int err = 0;
+
+ if (bond->is_bonded) {
+ if (!mlx4_is_bonded(bond->dev)) {
+ err = mlx4_bond(bond->dev);
+ if (err)
+ mlx4_err(bond->dev, "Fail to bond device\n");
+ }
+ if (!err) {
+ err = mlx4_port_map_set(bond->dev, &bond->port_map);
+ if (err)
+ mlx4_err(bond->dev,
+ "Fail to set port map [%d][%d]: %d\n",
+ bond->port_map.port1,
+ bond->port_map.port2, err);
+ }
+ } else if (mlx4_is_bonded(bond->dev)) {
+ err = mlx4_unbond(bond->dev);
+ if (err)
+ mlx4_err(bond->dev, "Fail to unbond device\n");
+ }
+ put_device(&bond->dev->persist->pdev->dev);
+ kfree(bond);
+}
+
+int mlx4_queue_bond_work(struct mlx4_dev *dev, int is_bonded, u8 v2p_p1,
+ u8 v2p_p2)
+{
+ struct mlx4_bond *bond;
+
+ bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
+ if (!bond)
+ return -ENOMEM;
+
+ INIT_WORK(&bond->work, mlx4_bond_work);
+ get_device(&dev->persist->pdev->dev);
+ bond->dev = dev;
+ bond->is_bonded = is_bonded;
+ bond->port_map.port1 = v2p_p1;
+ bond->port_map.port2 = v2p_p2;
+ queue_work(mlx4_wq, &bond->work);
+ return 0;
+}
+EXPORT_SYMBOL(mlx4_queue_bond_work);
static int mlx4_load_fw(struct mlx4_dev *dev)
{
@@ -3375,8 +3403,11 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
devl_assert_locked(devlink);
dev = &priv->dev;
- INIT_LIST_HEAD(&priv->ctx_list);
- spin_lock_init(&priv->ctx_lock);
+ err = mlx4_adev_init(dev);
+ if (err)
+ return err;
+
+ ATOMIC_INIT_NOTIFIER_HEAD(&priv->event_nh);
mutex_init(&priv->port_mutex);
mutex_init(&priv->bond_mutex);
@@ -3402,10 +3433,11 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
err = mlx4_get_ownership(dev);
if (err) {
if (err < 0)
- return err;
+ goto err_adev;
else {
mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto err_adev;
}
}
@@ -3674,8 +3706,6 @@ slave_start:
if (err)
goto err_port;
- mlx4_request_modules(dev);
-
mlx4_sense_init(dev);
mlx4_start_sense(dev);
@@ -3753,6 +3783,9 @@ err_sriov:
mlx4_free_ownership(dev);
kfree(dev_cap);
+
+err_adev:
+ mlx4_adev_cleanup(dev);
return err;
}
@@ -4133,6 +4166,8 @@ static void mlx4_unload_one(struct pci_dev *pdev)
mlx4_slave_destroy_special_qp_cap(dev);
kfree(dev->dev_vfs);
+ mlx4_adev_cleanup(dev);
+
mlx4_clean_dev(dev);
priv->pci_dev_data = pci_dev_data;
priv->removed = 1;
@@ -4520,6 +4555,9 @@ static int __init mlx4_init(void)
{
int ret;
+ WARN_ONCE(strcmp(MLX4_ADEV_NAME, KBUILD_MODNAME),
+ "mlx4_core name not in sync with kernel module name");
+
if (mlx4_verify_params())
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f1716a83a4d3..24d0c7c46878 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -294,7 +294,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
struct mlx4_promisc_qp *dqp, *tmp_dqp;
if (port < 1 || port > dev->caps.num_ports)
- return NULL;
+ return false;
s_steer = &mlx4_priv(dev)->steer[port - 1];
@@ -375,7 +375,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
bool ret = false;
if (port < 1 || port > dev->caps.num_ports)
- return NULL;
+ return false;
s_steer = &mlx4_priv(dev)->steer[port - 1];
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 6ccf340660d9..d7d856d1758a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -47,6 +47,8 @@
#include <linux/spinlock.h>
#include <net/devlink.h>
#include <linux/rwsem.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/notifier.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h>
@@ -862,6 +864,11 @@ struct mlx4_steer {
struct list_head steer_entries[MLX4_NUM_STEERS];
};
+struct mlx4_port_map {
+ u8 port1;
+ u8 port2;
+};
+
enum {
MLX4_PCI_DEV_IS_VF = 1 << 0,
MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1,
@@ -875,9 +882,9 @@ enum {
struct mlx4_priv {
struct mlx4_dev dev;
- struct list_head dev_list;
- struct list_head ctx_list;
- spinlock_t ctx_lock;
+ struct mlx4_adev **adev;
+ int adev_idx;
+ struct atomic_notifier_head event_nh;
int pci_dev_data;
int removed;
@@ -1045,10 +1052,13 @@ void mlx4_catas_end(struct mlx4_dev *dev);
int mlx4_crdump_init(struct mlx4_dev *dev);
void mlx4_crdump_end(struct mlx4_dev *dev);
int mlx4_restart_one(struct pci_dev *pdev);
+
+int mlx4_adev_init(struct mlx4_dev *dev);
+void mlx4_adev_cleanup(struct mlx4_dev *dev);
int mlx4_register_device(struct mlx4_dev *dev);
void mlx4_unregister_device(struct mlx4_dev *dev);
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
- unsigned long param);
+ void *param);
struct mlx4_dev_cap;
struct mlx4_init_hca_param;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 321f801c1d7c..efe3f97b874f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -49,6 +49,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/irq.h>
#include <net/xdp.h>
+#include <linux/notifier.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/qp.h>
@@ -432,7 +433,8 @@ struct mlx4_en_dev {
unsigned long last_overflow_check;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
- struct notifier_block nb;
+ struct notifier_block netdev_nb;
+ struct notifier_block mlx_nb;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index bb1d7b039a7e..c4f4de82e29e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -12,6 +12,7 @@ config MLX5_CORE
depends on MLXFW || !MLXFW
depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE
+ depends on HWMON || !HWMON
help
Core driver for low level functionality of the ConnectX-4 and
Connect-IB cards by Mellanox Technologies.
@@ -139,7 +140,7 @@ config MLX5_CORE_IPOIB
help
MLX5 IPoIB offloads & acceleration support.
-config MLX5_EN_MACSEC
+config MLX5_MACSEC
bool "Connect-X support for MACSec offload"
depends on MLX5_CORE_EN
depends on MACSEC
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 35f00700a4d6..7e94caca4888 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -69,16 +69,20 @@ mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
ecpf.o rdma.o esw/legacy.o \
- esw/devlink_port.o esw/vporttbl.o esw/qos.o
+ esw/devlink_port.o esw/vporttbl.o esw/qos.o esw/ipsec.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o
+ifneq ($(CONFIG_MLX5_EN_IPSEC),)
+ mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/ipsec_fs.o
+endif
+
mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o esw/bridge_mcast.o esw/bridge_debugfs.o \
en/rep/bridge.o
-mlx5_core-$(CONFIG_THERMAL) += thermal.o
+mlx5_core-$(CONFIG_HWMON) += hwmon.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
@@ -94,7 +98,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
#
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
-mlx5_core-$(CONFIG_MLX5_EN_MACSEC) += en_accel/macsec.o en_accel/macsec_fs.o \
+mlx5_core-$(CONFIG_MLX5_MACSEC) += en_accel/macsec.o lib/macsec_fs.o \
en_accel/macsec_stats.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index d532883b42d7..afb348579577 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -162,18 +162,18 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd)
int ret;
spin_lock_irqsave(&cmd->alloc_lock, flags);
- ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
- if (ret < cmd->max_reg_cmds)
- clear_bit(ret, &cmd->bitmask);
+ ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
+ if (ret < cmd->vars.max_reg_cmds)
+ clear_bit(ret, &cmd->vars.bitmask);
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
- return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
+ return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM;
}
static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
{
lockdep_assert_held(&cmd->alloc_lock);
- set_bit(idx, &cmd->bitmask);
+ set_bit(idx, &cmd->vars.bitmask);
}
static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
@@ -192,7 +192,7 @@ static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
if (ent->idx >= 0) {
cmd_free_index(cmd, ent->idx);
- up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
+ up(ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem);
}
cmd_free_ent(ent);
@@ -202,7 +202,7 @@ out:
static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
{
- return cmd->cmd_buf + (idx << cmd->log_stride);
+ return cmd->cmd_buf + (idx << cmd->vars.log_stride);
}
static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
@@ -974,7 +974,7 @@ static void cmd_work_handler(struct work_struct *work)
cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
complete(&ent->handling);
- sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
+ sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
down(sem);
if (!ent->page_queue) {
alloc_ret = cmd_alloc_index(cmd);
@@ -994,9 +994,9 @@ static void cmd_work_handler(struct work_struct *work)
}
ent->idx = alloc_ret;
} else {
- ent->idx = cmd->max_reg_cmds;
+ ent->idx = cmd->vars.max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags);
- clear_bit(ent->idx, &cmd->bitmask);
+ clear_bit(ent->idx, &cmd->vars.bitmask);
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
@@ -1225,8 +1225,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
goto out_free;
ds = ent->ts2 - ent->ts1;
- if (ent->op < MLX5_CMD_OP_MAX) {
- stats = &cmd->stats[ent->op];
+ stats = xa_load(&cmd->stats, ent->op);
+ if (stats) {
spin_lock_irq(&stats->lock);
stats->sum += ds;
++stats->n;
@@ -1548,7 +1548,6 @@ static void clean_debug_files(struct mlx5_core_dev *dev)
if (!mlx5_debugfs_root)
return;
- mlx5_cmdif_debugfs_cleanup(dev);
debugfs_remove_recursive(dbg->dbg_root);
}
@@ -1563,8 +1562,6 @@ static void create_debugfs_files(struct mlx5_core_dev *dev)
debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops);
debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status);
debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
-
- mlx5_cmdif_debugfs_init(dev);
}
void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
@@ -1572,15 +1569,15 @@ void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
struct mlx5_cmd *cmd = &dev->cmd;
int i;
- for (i = 0; i < cmd->max_reg_cmds; i++)
- down(&cmd->sem);
- down(&cmd->pages_sem);
+ for (i = 0; i < cmd->vars.max_reg_cmds; i++)
+ down(&cmd->vars.sem);
+ down(&cmd->vars.pages_sem);
cmd->allowed_opcode = opcode;
- up(&cmd->pages_sem);
- for (i = 0; i < cmd->max_reg_cmds; i++)
- up(&cmd->sem);
+ up(&cmd->vars.pages_sem);
+ for (i = 0; i < cmd->vars.max_reg_cmds; i++)
+ up(&cmd->vars.sem);
}
static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
@@ -1588,15 +1585,15 @@ static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
struct mlx5_cmd *cmd = &dev->cmd;
int i;
- for (i = 0; i < cmd->max_reg_cmds; i++)
- down(&cmd->sem);
- down(&cmd->pages_sem);
+ for (i = 0; i < cmd->vars.max_reg_cmds; i++)
+ down(&cmd->vars.sem);
+ down(&cmd->vars.pages_sem);
cmd->mode = mode;
- up(&cmd->pages_sem);
- for (i = 0; i < cmd->max_reg_cmds; i++)
- up(&cmd->sem);
+ up(&cmd->vars.pages_sem);
+ for (i = 0; i < cmd->vars.max_reg_cmds; i++)
+ up(&cmd->vars.sem);
}
static int cmd_comp_notifier(struct notifier_block *nb,
@@ -1655,7 +1652,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
/* there can be at most 32 command queues */
vector = vec & 0xffffffff;
- for (i = 0; i < (1 << cmd->log_sz); i++) {
+ for (i = 0; i < (1 << cmd->vars.log_sz); i++) {
if (test_bit(i, &vector)) {
ent = cmd->ent_arr[i];
@@ -1698,8 +1695,8 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
if (ent->callback) {
ds = ent->ts2 - ent->ts1;
- if (ent->op < MLX5_CMD_OP_MAX) {
- stats = &cmd->stats[ent->op];
+ stats = xa_load(&cmd->stats, ent->op);
+ if (stats) {
spin_lock_irqsave(&stats->lock, flags);
stats->sum += ds;
++stats->n;
@@ -1744,7 +1741,7 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
/* wait for pending handlers to complete */
mlx5_eq_synchronize_cmd_irq(dev);
spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
- vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
+ vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1);
if (!vector)
goto no_trig;
@@ -1753,14 +1750,14 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
* to guarantee pending commands will not get freed in the meanwhile.
* For that reason, it also has to be done inside the alloc_lock.
*/
- for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
+ for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz))
cmd_ent_get(cmd->ent_arr[i]);
vector |= MLX5_TRIGGERED_CMD_COMP;
spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
mlx5_cmd_comp_handler(dev, vector, true);
- for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
+ for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz))
cmd_ent_put(cmd->ent_arr[i]);
return;
@@ -1773,22 +1770,22 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
struct mlx5_cmd *cmd = &dev->cmd;
int i;
- for (i = 0; i < cmd->max_reg_cmds; i++) {
- while (down_trylock(&cmd->sem)) {
+ for (i = 0; i < cmd->vars.max_reg_cmds; i++) {
+ while (down_trylock(&cmd->vars.sem)) {
mlx5_cmd_trigger_completions(dev);
cond_resched();
}
}
- while (down_trylock(&cmd->pages_sem)) {
+ while (down_trylock(&cmd->vars.pages_sem)) {
mlx5_cmd_trigger_completions(dev);
cond_resched();
}
/* Unlock cmdif */
- up(&cmd->pages_sem);
- for (i = 0; i < cmd->max_reg_cmds; i++)
- up(&cmd->sem);
+ up(&cmd->vars.pages_sem);
+ for (i = 0; i < cmd->vars.max_reg_cmds; i++)
+ up(&cmd->vars.sem);
}
static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
@@ -1858,7 +1855,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
/* atomic context may not sleep */
if (callback)
return -EINVAL;
- down(&dev->cmd.throttle_sem);
+ down(&dev->cmd.vars.throttle_sem);
}
pages_queue = is_manage_pages(in);
@@ -1903,7 +1900,7 @@ out_in:
free_msg(dev, inb);
out_up:
if (throttle_op)
- up(&dev->cmd.throttle_sem);
+ up(&dev->cmd.vars.throttle_sem);
return err;
}
@@ -1926,7 +1923,9 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
if (!err || !(strcmp(namep, "unknown command opcode")))
return;
- stats = &dev->cmd.stats[opcode];
+ stats = xa_load(&dev->cmd.stats, opcode);
+ if (!stats)
+ return;
spin_lock_irq(&stats->lock);
stats->failed++;
if (err < 0)
@@ -2190,19 +2189,8 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
int size = sizeof(struct mlx5_cmd_prot_block);
int align = roundup_pow_of_two(size);
struct mlx5_cmd *cmd = &dev->cmd;
- u32 cmd_h, cmd_l;
- u16 cmd_if_rev;
+ u32 cmd_l;
int err;
- int i;
-
- memset(cmd, 0, sizeof(*cmd));
- cmd_if_rev = cmdif_rev(dev);
- if (cmd_if_rev != CMD_IF_REV) {
- mlx5_core_err(dev,
- "Driver cmdif rev(%d) differs from firmware's(%d)\n",
- CMD_IF_REV, cmd_if_rev);
- return -EINVAL;
- }
cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
if (!cmd->pool)
@@ -2212,62 +2200,16 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
if (err)
goto err_free_pool;
- cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
- cmd->log_sz = cmd_l >> 4 & 0xf;
- cmd->log_stride = cmd_l & 0xf;
- if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
- mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
- 1 << cmd->log_sz);
- err = -EINVAL;
- goto err_free_page;
- }
-
- if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
- mlx5_core_err(dev, "command queue size overflow\n");
- err = -EINVAL;
- goto err_free_page;
- }
-
- cmd->state = MLX5_CMDIF_STATE_DOWN;
- cmd->checksum_disabled = 1;
- cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
- cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
-
- cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
- if (cmd->cmdif_rev > CMD_IF_REV) {
- mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n",
- CMD_IF_REV, cmd->cmdif_rev);
- err = -EOPNOTSUPP;
- goto err_free_page;
- }
-
- spin_lock_init(&cmd->alloc_lock);
- spin_lock_init(&cmd->token_lock);
- for (i = 0; i < MLX5_CMD_OP_MAX; i++)
- spin_lock_init(&cmd->stats[i].lock);
-
- sema_init(&cmd->sem, cmd->max_reg_cmds);
- sema_init(&cmd->pages_sem, 1);
- sema_init(&cmd->throttle_sem, DIV_ROUND_UP(cmd->max_reg_cmds, 2));
-
- cmd_h = (u32)((u64)(cmd->dma) >> 32);
cmd_l = (u32)(cmd->dma);
if (cmd_l & 0xfff) {
mlx5_core_err(dev, "invalid command queue address\n");
err = -ENOMEM;
- goto err_free_page;
+ goto err_cmd_page;
}
+ cmd->checksum_disabled = 1;
- iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
- iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
-
- /* Make sure firmware sees the complete address before we proceed */
- wmb();
-
- mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
-
- cmd->mode = CMD_MODE_POLLING;
- cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
+ spin_lock_init(&cmd->alloc_lock);
+ spin_lock_init(&cmd->token_lock);
create_msg_cache(dev);
@@ -2279,16 +2221,14 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
goto err_cache;
}
- create_debugfs_files(dev);
+ mlx5_cmdif_debugfs_init(dev);
return 0;
err_cache:
destroy_msg_cache(dev);
-
-err_free_page:
+err_cmd_page:
free_cmd_page(dev, cmd);
-
err_free_pool:
dma_pool_destroy(cmd->pool);
return err;
@@ -2298,13 +2238,78 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
- clean_debug_files(dev);
+ mlx5_cmdif_debugfs_cleanup(dev);
destroy_workqueue(cmd->wq);
destroy_msg_cache(dev);
free_cmd_page(dev, cmd);
dma_pool_destroy(cmd->pool);
}
+int mlx5_cmd_enable(struct mlx5_core_dev *dev)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+ u32 cmd_h, cmd_l;
+
+ memset(&cmd->vars, 0, sizeof(cmd->vars));
+ cmd->vars.cmdif_rev = cmdif_rev(dev);
+ if (cmd->vars.cmdif_rev != CMD_IF_REV) {
+ mlx5_core_err(dev,
+ "Driver cmdif rev(%d) differs from firmware's(%d)\n",
+ CMD_IF_REV, cmd->vars.cmdif_rev);
+ return -EINVAL;
+ }
+
+ cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
+ cmd->vars.log_sz = cmd_l >> 4 & 0xf;
+ cmd->vars.log_stride = cmd_l & 0xf;
+ if (1 << cmd->vars.log_sz > MLX5_MAX_COMMANDS) {
+ mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
+ 1 << cmd->vars.log_sz);
+ return -EINVAL;
+ }
+
+ if (cmd->vars.log_sz + cmd->vars.log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
+ mlx5_core_err(dev, "command queue size overflow\n");
+ return -EINVAL;
+ }
+
+ cmd->state = MLX5_CMDIF_STATE_DOWN;
+ cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1;
+ cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1;
+
+ sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
+ sema_init(&cmd->vars.pages_sem, 1);
+ sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
+
+ cmd_h = (u32)((u64)(cmd->dma) >> 32);
+ cmd_l = (u32)(cmd->dma);
+ if (WARN_ON(cmd_l & 0xfff))
+ return -EINVAL;
+
+ iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
+ iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
+
+ /* Make sure firmware sees the complete address before we proceed */
+ wmb();
+
+ mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
+
+ cmd->mode = CMD_MODE_POLLING;
+ cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
+
+ create_debugfs_files(dev);
+
+ return 0;
+}
+
+void mlx5_cmd_disable(struct mlx5_core_dev *dev)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+
+ clean_debug_files(dev);
+ flush_workqueue(cmd->wq);
+}
+
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
enum mlx5_cmdif_state cmdif_state)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 2138f28a2931..09652dc89115 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -176,8 +176,8 @@ static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
int ret;
cmd = filp->private_data;
- weight = bitmap_weight(&cmd->bitmask, cmd->max_reg_cmds);
- field = cmd->max_reg_cmds - weight;
+ weight = bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
+ field = cmd->vars.max_reg_cmds - weight;
ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
@@ -188,6 +188,24 @@ static const struct file_operations slots_fops = {
.read = slots_read,
};
+static struct mlx5_cmd_stats *
+mlx5_cmdif_alloc_stats(struct xarray *stats_xa, int opcode)
+{
+ struct mlx5_cmd_stats *stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ int err;
+
+ if (!stats)
+ return NULL;
+
+ err = xa_insert(stats_xa, opcode, stats, GFP_KERNEL);
+ if (err) {
+ kfree(stats);
+ return NULL;
+ }
+ spin_lock_init(&stats->lock);
+ return stats;
+}
+
void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_stats *stats;
@@ -200,10 +218,14 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
debugfs_create_file("slots_inuse", 0400, *cmd, &dev->cmd, &slots_fops);
+ xa_init(&dev->cmd.stats);
+
for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
- stats = &dev->cmd.stats[i];
namep = mlx5_command_str(i);
if (strcmp(namep, "unknown command opcode")) {
+ stats = mlx5_cmdif_alloc_stats(&dev->cmd.stats, i);
+ if (!stats)
+ continue;
stats->root = debugfs_create_dir(namep, *cmd);
debugfs_create_file("average", 0400, stats->root, stats,
@@ -224,7 +246,13 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
{
+ struct mlx5_cmd_stats *stats;
+ unsigned long i;
+
debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs);
+ xa_for_each(&dev->cmd.stats, i, stats)
+ kfree(stats);
+ xa_destroy(&dev->cmd.stats);
}
void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index edb06fb9bbc5..7909f378dc93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
#include "devlink.h"
+#include "lag/lag.h"
/* intf dev list mutex */
static DEFINE_MUTEX(mlx5_intf_mutex);
@@ -587,10 +588,7 @@ static int next_phys_dev_lag(struct device *dev, const void *data)
if (!mdev)
return 0;
- if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
- !MLX5_CAP_GEN(mdev, lag_master) ||
- (MLX5_CAP_GEN(mdev, num_lag_ports) > MLX5_MAX_PORTS ||
- MLX5_CAP_GEN(mdev, num_lag_ports) <= 1))
+ if (!mlx5_lag_is_supported(mdev))
return 0;
return _next_phys_dev(mdev, data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 3d82ec890666..af8460bb257b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -212,6 +212,9 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
/* On fw_activate action, also driver is reloaded and reinit performed */
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
ret = mlx5_load_one_devl_locked(dev, true);
+ if (ret)
+ return ret;
+ ret = mlx5_fw_reset_verify_fw_complete(dev, extack);
break;
default:
/* Unsupported action should not get to this function */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index defba5bd91d9..961f75da6227 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -6,6 +6,14 @@
#include <net/devlink.h>
+enum mlx5_devlink_resource_id {
+ MLX5_DL_RES_MAX_LOCAL_SFS = 1,
+ MLX5_DL_RES_MAX_EXTERNAL_SFS,
+
+ __MLX5_ID_RES_MAX,
+ MLX5_ID_RES_MAX = __MLX5_ID_RES_MAX - 1,
+};
+
enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b1807bfb815f..86f2690c5e01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -193,7 +193,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return is_kdump_kernel() ?
MLX5E_MIN_NUM_CHANNELS :
- min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
+ min_t(int, mlx5_comp_vectors_max(mdev), MLX5E_MAX_NUM_CHANNELS);
}
/* The maximum WQE size can be retrieved by max_wqe_sz_sq in
@@ -917,7 +917,7 @@ struct mlx5e_priv {
const struct mlx5e_profile *profile;
void *ppriv;
-#ifdef CONFIG_MLX5_EN_MACSEC
+#ifdef CONFIG_MLX5_MACSEC
struct mlx5e_macsec *macsec;
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
@@ -1167,9 +1167,6 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
const u8 hfunc);
-int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- u32 *rule_locs);
-int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
index 0107e4e73bb0..415840c3ef84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -18,6 +18,7 @@ void mlx5e_reporter_tx_create(struct mlx5e_priv *priv);
void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv);
void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq);
int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq);
+void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq);
int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 5ce28ff7685f..e097f336e1c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -6,6 +6,7 @@
#include "en/port.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec.h"
+#include <net/page_pool/types.h>
#include <net/xdp_sock_drv.h>
static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index b0b429a0321e..bb11e644d24f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -2,9 +2,12 @@
// Copyright (c) 2020 Mellanox Technologies
#include "en/ptp.h"
+#include "en/health.h"
#include "en/txrx.h"
#include "en/params.h"
#include "en/fs_tt_redirect.h"
+#include <linux/list.h>
+#include <linux/spinlock.h>
struct mlx5e_ptp_fs {
struct mlx5_flow_handle *l2_rule;
@@ -19,6 +22,48 @@ struct mlx5e_ptp_params {
struct mlx5e_rq_param rq_param;
};
+struct mlx5e_ptp_port_ts_cqe_tracker {
+ u8 metadata_id;
+ bool inuse : 1;
+ struct list_head entry;
+};
+
+struct mlx5e_ptp_port_ts_cqe_list {
+ struct mlx5e_ptp_port_ts_cqe_tracker *nodes;
+ struct list_head tracker_list_head;
+ /* Sync list operations in xmit and napi_poll contexts */
+ spinlock_t tracker_list_lock;
+};
+
+static inline void
+mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata)
+{
+ struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata];
+
+ WARN_ON_ONCE(tracker->inuse);
+ tracker->inuse = true;
+ spin_lock(&list->tracker_list_lock);
+ list_add_tail(&tracker->entry, &list->tracker_list_head);
+ spin_unlock(&list->tracker_list_lock);
+}
+
+static void
+mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata)
+{
+ struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata];
+
+ WARN_ON_ONCE(!tracker->inuse);
+ tracker->inuse = false;
+ spin_lock(&list->tracker_list_lock);
+ list_del(&tracker->entry);
+ spin_unlock(&list->tracker_list_lock);
+}
+
+void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata)
+{
+ mlx5e_ptp_port_ts_cqe_list_add(ptpsq->ts_cqe_pending_list, metadata);
+}
+
struct mlx5e_skb_cb_hwtstamp {
ktime_t cqe_hwtstamp;
ktime_t port_hwtstamp;
@@ -79,75 +124,97 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
}
-#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
-
-static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_ci, u16 skb_id)
+static struct sk_buff *
+mlx5e_ptp_metadata_map_lookup(struct mlx5e_ptp_metadata_map *map, u16 metadata)
{
- return (ptpsq->ts_cqe_ctr_mask && (skb_ci != skb_id));
+ return map->data[metadata];
}
-static bool mlx5e_ptp_ts_cqe_ooo(struct mlx5e_ptpsq *ptpsq, u16 skb_id)
+static struct sk_buff *
+mlx5e_ptp_metadata_map_remove(struct mlx5e_ptp_metadata_map *map, u16 metadata)
{
- u16 skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
- u16 skb_pi = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_pc);
+ struct sk_buff *skb;
- if (PTP_WQE_CTR2IDX(skb_id - skb_ci) >= PTP_WQE_CTR2IDX(skb_pi - skb_ci))
- return true;
+ skb = map->data[metadata];
+ map->data[metadata] = NULL;
- return false;
+ return skb;
}
-static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_ci,
- u16 skb_id, int budget)
+static bool mlx5e_ptp_metadata_map_unhealthy(struct mlx5e_ptp_metadata_map *map)
{
- struct skb_shared_hwtstamps hwts = {};
- struct sk_buff *skb;
+ /* Considered beginning unhealthy state if size * 15 / 2^4 cannot be reclaimed. */
+ return map->undelivered_counter > (map->capacity >> 4) * 15;
+}
- ptpsq->cq_stats->resync_event++;
+static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
+ ktime_t port_tstamp)
+{
+ struct mlx5e_ptp_port_ts_cqe_list *cqe_list = ptpsq->ts_cqe_pending_list;
+ ktime_t timeout = ns_to_ktime(MLX5E_PTP_TS_CQE_UNDELIVERED_TIMEOUT);
+ struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
+ struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n;
+
+ spin_lock(&cqe_list->tracker_list_lock);
+ list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) {
+ struct sk_buff *skb =
+ mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id);
+ ktime_t dma_tstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
+
+ if (!dma_tstamp ||
+ ktime_after(ktime_add(dma_tstamp, timeout), port_tstamp))
+ break;
- while (skb_ci != skb_id) {
- skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
- hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
- skb_tstamp_tx(skb, &hwts);
- ptpsq->cq_stats->resync_cqe++;
- napi_consume_skb(skb, budget);
- skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+ metadata_map->undelivered_counter++;
+ WARN_ON_ONCE(!pos->inuse);
+ pos->inuse = false;
+ list_del(&pos->entry);
}
+ spin_unlock(&cqe_list->tracker_list_lock);
}
+#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
+
static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
struct mlx5_cqe64 *cqe,
int budget)
{
- u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
- u16 skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+ struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list;
+ u8 metadata_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
+ bool is_err_cqe = !!MLX5E_RX_ERR_CQE(cqe);
struct mlx5e_txqsq *sq = &ptpsq->txqsq;
struct sk_buff *skb;
ktime_t hwtstamp;
- if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
- skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
- ptpsq->cq_stats->err_cqe++;
- goto out;
+ if (likely(pending_cqe_list->nodes[metadata_id].inuse)) {
+ mlx5e_ptp_port_ts_cqe_list_remove(pending_cqe_list, metadata_id);
+ } else {
+ /* Reclaim space in the unlikely event CQE was delivered after
+ * marking it late.
+ */
+ ptpsq->metadata_map.undelivered_counter--;
+ ptpsq->cq_stats->late_cqe++;
}
- if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_ci, skb_id)) {
- if (mlx5e_ptp_ts_cqe_ooo(ptpsq, skb_id)) {
- /* already handled by a previous resync */
- ptpsq->cq_stats->ooo_cqe_drop++;
- return;
- }
- mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_ci, skb_id, budget);
+ skb = mlx5e_ptp_metadata_map_remove(&ptpsq->metadata_map, metadata_id);
+
+ if (unlikely(is_err_cqe)) {
+ ptpsq->cq_stats->err_cqe++;
+ goto out;
}
- skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
hwtstamp, ptpsq->cq_stats);
ptpsq->cq_stats->cqe++;
+ mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
out:
napi_consume_skb(skb, budget);
+ mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist, metadata_id);
+ if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
+ !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+ queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
}
static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
@@ -291,36 +358,86 @@ static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
{
- int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
- struct mlx5_core_dev *mdev = ptpsq->txqsq.mdev;
+ struct mlx5e_ptp_metadata_fifo *metadata_freelist = &ptpsq->metadata_freelist;
+ struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
+ struct mlx5e_ptp_port_ts_cqe_list *cqe_list;
+ int db_sz;
+ int md;
- ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
- GFP_KERNEL, numa);
- if (!ptpsq->skb_fifo.fifo)
+ cqe_list = kvzalloc_node(sizeof(*ptpsq->ts_cqe_pending_list), GFP_KERNEL, numa);
+ if (!cqe_list)
return -ENOMEM;
+ ptpsq->ts_cqe_pending_list = cqe_list;
+
+ db_sz = min_t(u32, mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq),
+ 1 << MLX5_CAP_GEN_2(ptpsq->txqsq.mdev,
+ ts_cqe_metadata_size2wqe_counter));
+ ptpsq->ts_cqe_ctr_mask = db_sz - 1;
+
+ cqe_list->nodes = kvzalloc_node(array_size(db_sz, sizeof(*cqe_list->nodes)),
+ GFP_KERNEL, numa);
+ if (!cqe_list->nodes)
+ goto free_cqe_list;
+ INIT_LIST_HEAD(&cqe_list->tracker_list_head);
+ spin_lock_init(&cqe_list->tracker_list_lock);
+
+ metadata_freelist->data =
+ kvzalloc_node(array_size(db_sz, sizeof(*metadata_freelist->data)),
+ GFP_KERNEL, numa);
+ if (!metadata_freelist->data)
+ goto free_cqe_list_nodes;
+ metadata_freelist->mask = ptpsq->ts_cqe_ctr_mask;
+
+ for (md = 0; md < db_sz; ++md) {
+ cqe_list->nodes[md].metadata_id = md;
+ metadata_freelist->data[md] = md;
+ }
+ metadata_freelist->pc = db_sz;
+
+ metadata_map->data =
+ kvzalloc_node(array_size(db_sz, sizeof(*metadata_map->data)),
+ GFP_KERNEL, numa);
+ if (!metadata_map->data)
+ goto free_metadata_freelist;
+ metadata_map->capacity = db_sz;
- ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc;
- ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc;
- ptpsq->skb_fifo.mask = wq_sz - 1;
- if (MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
- ptpsq->ts_cqe_ctr_mask =
- (1 << MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - 1;
return 0;
+
+free_metadata_freelist:
+ kvfree(metadata_freelist->data);
+free_cqe_list_nodes:
+ kvfree(cqe_list->nodes);
+free_cqe_list:
+ kvfree(cqe_list);
+ return -ENOMEM;
}
-static void mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo *skb_fifo)
+static void mlx5e_ptp_drain_metadata_map(struct mlx5e_ptp_metadata_map *map)
{
- while (*skb_fifo->pc != *skb_fifo->cc) {
- struct sk_buff *skb = mlx5e_skb_fifo_pop(skb_fifo);
+ int idx;
+
+ for (idx = 0; idx < map->capacity; ++idx) {
+ struct sk_buff *skb = map->data[idx];
dev_kfree_skb_any(skb);
}
}
-static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo)
+static void mlx5e_ptp_free_traffic_db(struct mlx5e_ptpsq *ptpsq)
{
- mlx5e_ptp_drain_skb_fifo(skb_fifo);
- kvfree(skb_fifo->fifo);
+ mlx5e_ptp_drain_metadata_map(&ptpsq->metadata_map);
+ kvfree(ptpsq->metadata_map.data);
+ kvfree(ptpsq->metadata_freelist.data);
+ kvfree(ptpsq->ts_cqe_pending_list->nodes);
+ kvfree(ptpsq->ts_cqe_pending_list);
+}
+
+static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work)
+{
+ struct mlx5e_ptpsq *ptpsq =
+ container_of(work, struct mlx5e_ptpsq, report_unhealthy_work);
+
+ mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq);
}
static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
@@ -348,11 +465,12 @@ static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
if (err)
goto err_free_txqsq;
- err = mlx5e_ptp_alloc_traffic_db(ptpsq,
- dev_to_node(mlx5_core_dma_dev(c->mdev)));
+ err = mlx5e_ptp_alloc_traffic_db(ptpsq, dev_to_node(mlx5_core_dma_dev(c->mdev)));
if (err)
goto err_free_txqsq;
+ INIT_WORK(&ptpsq->report_unhealthy_work, mlx5e_ptpsq_unhealthy_work);
+
return 0;
err_free_txqsq:
@@ -366,7 +484,9 @@ static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
struct mlx5e_txqsq *sq = &ptpsq->txqsq;
struct mlx5_core_dev *mdev = sq->mdev;
- mlx5e_ptp_free_traffic_db(&ptpsq->skb_fifo);
+ if (current_work() != &ptpsq->report_unhealthy_work)
+ cancel_work_sync(&ptpsq->report_unhealthy_work);
+ mlx5e_ptp_free_traffic_db(ptpsq);
cancel_work_sync(&sq->recover_work);
mlx5e_ptp_destroy_sq(mdev, sq->sqn);
mlx5e_free_txqsq_descs(sq);
@@ -534,7 +654,10 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
/* SQ */
if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
- params->log_sq_size = orig->log_sq_size;
+ params->log_sq_size =
+ min(MLX5_CAP_GEN_2(c->mdev, ts_cqe_metadata_size2wqe_counter),
+ MLX5E_PTP_MAX_LOG_SQ_SIZE);
+ params->log_sq_size = min(params->log_sq_size, orig->log_sq_size);
mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
}
/* RQ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index cc7efde88ac3..7b700d0f956a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -7,18 +7,38 @@
#include "en.h"
#include "en_stats.h"
#include "en/txrx.h"
+#include <linux/ktime.h>
#include <linux/ptp_classify.h>
+#include <linux/time64.h>
+#include <linux/workqueue.h>
#define MLX5E_PTP_CHANNEL_IX 0
+#define MLX5E_PTP_MAX_LOG_SQ_SIZE (8U)
+#define MLX5E_PTP_TS_CQE_UNDELIVERED_TIMEOUT (1 * NSEC_PER_SEC)
+
+struct mlx5e_ptp_metadata_fifo {
+ u8 cc;
+ u8 pc;
+ u8 mask;
+ u8 *data;
+};
+
+struct mlx5e_ptp_metadata_map {
+ u16 undelivered_counter;
+ u16 capacity;
+ struct sk_buff **data;
+};
struct mlx5e_ptpsq {
struct mlx5e_txqsq txqsq;
struct mlx5e_cq ts_cq;
- u16 skb_fifo_cc;
- u16 skb_fifo_pc;
- struct mlx5e_skb_fifo skb_fifo;
struct mlx5e_ptp_cq_stats *cq_stats;
u16 ts_cqe_ctr_mask;
+
+ struct work_struct report_unhealthy_work;
+ struct mlx5e_ptp_port_ts_cqe_list *ts_cqe_pending_list;
+ struct mlx5e_ptp_metadata_fifo metadata_freelist;
+ struct mlx5e_ptp_metadata_map metadata_map;
};
enum {
@@ -69,12 +89,35 @@ static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
fk.ports.dst == htons(PTP_EV_PORT));
}
-static inline bool mlx5e_ptpsq_fifo_has_room(struct mlx5e_txqsq *sq)
+static inline void mlx5e_ptp_metadata_fifo_push(struct mlx5e_ptp_metadata_fifo *fifo, u8 metadata)
{
- if (!sq->ptpsq)
- return true;
+ fifo->data[fifo->mask & fifo->pc++] = metadata;
+}
+
+static inline u8
+mlx5e_ptp_metadata_fifo_pop(struct mlx5e_ptp_metadata_fifo *fifo)
+{
+ return fifo->data[fifo->mask & fifo->cc++];
+}
- return mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo);
+static inline void
+mlx5e_ptp_metadata_map_put(struct mlx5e_ptp_metadata_map *map,
+ struct sk_buff *skb, u8 metadata)
+{
+ WARN_ON_ONCE(map->data[metadata]);
+ map->data[metadata] = skb;
+}
+
+static inline bool mlx5e_ptpsq_metadata_freelist_empty(struct mlx5e_ptpsq *ptpsq)
+{
+ struct mlx5e_ptp_metadata_fifo *freelist;
+
+ if (likely(!ptpsq))
+ return false;
+
+ freelist = &ptpsq->metadata_freelist;
+
+ return freelist->pc == freelist->cc;
}
int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
@@ -89,6 +132,8 @@ void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs,
const struct mlx5e_profile *profile);
int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set);
+void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata);
+
enum {
MLX5E_SKB_CB_CQE_HWTSTAMP = BIT(0),
MLX5E_SKB_CB_PORT_HWTSTAMP = BIT(1),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 1874c2f0587f..244bc15a42ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -379,9 +379,9 @@ int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb_
if (!htb && htb_qopt->command != TC_HTB_CREATE)
return -EINVAL;
- if (htb_qopt->prio) {
+ if (htb_qopt->prio || htb_qopt->quantum) {
NL_SET_ERR_MSG_MOD(htb_qopt->extack,
- "prio parameter is not supported by device with HTB offload enabled.");
+ "prio and quantum parameters are not supported by device with HTB offload enabled.");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 560800246573..0fef853eab62 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -77,6 +77,10 @@ mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_es
return NULL;
priv = netdev_priv(dev);
+
+ if (!priv->mdev->priv.eswitch->br_offloads)
+ return NULL;
+
rpriv = priv->ppriv;
*vport_num = rpriv->rep->vport;
*esw_owner_vhca_id = MLX5_CAP_GEN(priv->mdev, vhca_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index b5c773ffc763..b12fe3c5a258 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -715,9 +715,20 @@ void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
uplink_priv = &uplink_rpriv->uplink_priv;
ct_priv = uplink_priv->ct_priv;
- if (!mlx5_ipsec_is_rx_flow(cqe) &&
- !mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv, zone_restore_id, tunnel_id,
- &tc_priv))
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (!(tunnel_id >> ESW_TUN_OPTS_BITS)) {
+ u32 mapped_id;
+ u32 metadata;
+
+ mapped_id = tunnel_id & ESW_IPSEC_RX_MAPPED_ID_MASK;
+ if (mapped_id &&
+ !mlx5_esw_ipsec_rx_make_metadata(priv, mapped_id, &metadata))
+ mlx5e_ipsec_offload_handle_rx_skb(priv->netdev, skb, metadata);
+ }
+#endif
+
+ if (!mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv,
+ zone_restore_id, tunnel_id, &tc_priv))
goto free_skb;
forward:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index b35ff289af49..ff8242f67c54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -164,6 +164,43 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
return err;
}
+static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
+{
+ struct mlx5e_ptpsq *ptpsq = ctx;
+ struct mlx5e_channels *chs;
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int carrier_ok;
+ int err;
+
+ if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &ptpsq->txqsq.state))
+ return 0;
+
+ priv = ptpsq->txqsq.priv;
+
+ mutex_lock(&priv->state_lock);
+ chs = &priv->channels;
+ netdev = priv->netdev;
+
+ carrier_ok = netif_carrier_ok(netdev);
+ netif_carrier_off(netdev);
+
+ mlx5e_deactivate_priv_channels(priv);
+
+ mlx5e_ptp_close(chs->ptp);
+ err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
+
+ mlx5e_activate_priv_channels(priv);
+
+ /* return carrier back if needed */
+ if (carrier_ok)
+ netif_carrier_on(netdev);
+
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
/* state lock cannot be grabbed within this function.
* It can cause a dead lock or a read-after-free.
*/
@@ -516,6 +553,15 @@ static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlin
return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
}
+static int mlx5e_tx_reporter_ptpsq_unhealthy_dump(struct mlx5e_priv *priv,
+ struct devlink_fmsg *fmsg,
+ void *ctx)
+{
+ struct mlx5e_ptpsq *ptpsq = ctx;
+
+ return mlx5e_tx_reporter_dump_sq(priv, fmsg, &ptpsq->txqsq);
+}
+
static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
struct devlink_fmsg *fmsg)
{
@@ -621,6 +667,25 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
return to_ctx.status;
}
+void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq)
+{
+ struct mlx5e_ptp_metadata_map *map = &ptpsq->metadata_map;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
+ struct mlx5e_cq *ts_cq = &ptpsq->ts_cq;
+ struct mlx5e_priv *priv = txqsq->priv;
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = ptpsq;
+ err_ctx.recover = mlx5e_tx_reporter_ptpsq_unhealthy_recover;
+ err_ctx.dump = mlx5e_tx_reporter_ptpsq_unhealthy_dump;
+ snprintf(err_str, sizeof(err_str),
+ "Unhealthy TX port TS queue: %d, SQ: 0x%x, CQ: 0x%x, Undelivered CQEs: %u Map Capacity: %u",
+ txqsq->ch_ix, txqsq->sqn, ts_cq->mcq.cqn, map->undelivered_counter, map->capacity);
+
+ mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
+}
+
static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
.name = "tx",
.recover = mlx5e_tx_reporter_recover,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index e1095bc36543..56e6b8c7501f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -218,17 +218,32 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, res->rss_nch);
}
-u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
+int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
+ enum mlx5_traffic_types tt)
{
- struct mlx5e_rss *rss = res->rss[0];
+ struct mlx5e_rss *rss;
+
+ if (rss_idx >= MLX5E_MAX_NUM_RSS)
+ return -EINVAL;
+
+ rss = res->rss[rss_idx];
+ if (!rss)
+ return -ENOENT;
return mlx5e_rss_get_hash_fields(rss, tt);
}
-int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt,
- u8 rx_hash_fields)
+int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
+ enum mlx5_traffic_types tt, u8 rx_hash_fields)
{
- struct mlx5e_rss *rss = res->rss[0];
+ struct mlx5e_rss *rss;
+
+ if (rss_idx >= MLX5E_MAX_NUM_RSS)
+ return -EINVAL;
+
+ rss = res->rss[rss_idx];
+ if (!rss)
+ return -ENOENT;
return mlx5e_rss_set_hash_fields(rss, tt, rx_hash_fields);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 5d5f64fab60f..580fe8bc3cd2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -48,9 +48,10 @@ int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
const u32 *indir, const u8 *key, const u8 *hfunc);
-u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
-int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt,
- u8 rx_hash_fields);
+int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
+ enum mlx5_traffic_types tt);
+int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
+ enum mlx5_traffic_types tt, u8 rx_hash_fields);
int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
struct mlx5e_packet_merge_param *pkt_merge_param);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
index 2b80fe73549d..8c531f4ec912 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -221,16 +221,21 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
}
static inline bool
-mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys)
+mlx5_tc_ct_valid_used_dissector_keys(const u64 used_keys)
{
-#define DISS_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
- const u32 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) | DISS_BIT(META);
- const u32 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
- const u32 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
- const u32 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS);
- const u32 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS);
- const u32 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
- const u32 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
+#define DISS_BIT(name) BIT_ULL(FLOW_DISSECTOR_KEY_ ## name)
+ const u64 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) |
+ DISS_BIT(META);
+ const u64 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) |
+ DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u64 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) |
+ DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u64 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) |
+ DISS_BIT(PORTS);
+ const u64 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) |
+ DISS_BIT(PORTS);
+ const u64 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
+ const u64 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
@@ -247,7 +252,7 @@ mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *f
struct flow_match_tcp tcp;
if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
- ct_dbg("rule uses unexpected dissectors (0x%08x)",
+ ct_dbg("rule uses unexpected dissectors (0x%016llx)",
flow_rule->match.dissector->used_keys);
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 201ac7dd338f..5620d9f97518 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies */
-#include <net/page_pool.h>
#include "en/txrx.h"
#include "en/params.h"
#include "en/trap.h"
@@ -128,7 +127,7 @@ static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev,
static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
{
- int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, 0));
+ int cpu = mlx5_comp_vector_get_cpu(priv->mdev, 0);
struct net_device *netdev = priv->netdev;
struct mlx5e_trap *t;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 40589cebb773..12f56d0db0af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -35,6 +35,7 @@
#include "en/xdp.h"
#include "en/params.h"
#include <linux/bitfield.h>
+#include <net/page_pool/helpers.h>
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index bac4717548c6..caa34b9c161e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -138,7 +138,7 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
}
#endif
-#ifdef CONFIG_MLX5_EN_MACSEC
+#ifdef CONFIG_MLX5_MACSEC
if (unlikely(mlx5e_macsec_skb_is_offload(skb))) {
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -173,7 +173,7 @@ static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
#endif
-#ifdef CONFIG_MLX5_EN_MACSEC
+#ifdef CONFIG_MLX5_MACSEC
if (unlikely(mlx5e_macsec_skb_is_offload(skb)))
mlx5e_macsec_tx_build_eseg(priv->macsec, skb, eseg);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 891d39b4bfd4..7d4ceb9b9c16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -38,8 +38,10 @@
#include <net/netevent.h>
#include "en.h"
+#include "eswitch.h"
#include "ipsec.h"
#include "ipsec_rxtx.h"
+#include "en_rep.h"
#define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000)
#define MLX5E_IPSEC_TUNNEL_SA XA_MARK_1
@@ -354,6 +356,12 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
mlx5e_ipsec_init_limits(sa_entry, attrs);
mlx5e_ipsec_init_macs(sa_entry, attrs);
+
+ if (x->encap) {
+ attrs->encap = true;
+ attrs->sport = x->encap->encap_sport;
+ attrs->dport = x->encap->encap_dport;
+ }
}
static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
@@ -387,8 +395,25 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return -EINVAL;
}
if (x->encap) {
- NL_SET_ERR_MSG_MOD(extack, "Encapsulated xfrm state may not be offloaded");
- return -EINVAL;
+ if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) {
+ NL_SET_ERR_MSG_MOD(extack, "Encapsulation is not supported");
+ return -EINVAL;
+ }
+
+ if (x->encap->encap_type != UDP_ENCAP_ESPINUDP) {
+ NL_SET_ERR_MSG_MOD(extack, "Encapsulation other than UDP is not supported");
+ return -EINVAL;
+ }
+
+ if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) {
+ NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in packet offload mode only");
+ return -EINVAL;
+ }
+
+ if (x->props.mode != XFRM_MODE_TRANSPORT) {
+ NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in transport mode only");
+ return -EINVAL;
+ }
}
if (!x->aead) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead");
@@ -416,9 +441,9 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return -EINVAL;
}
- if (x->sel.proto != IPPROTO_IP &&
- (x->sel.proto != IPPROTO_UDP || x->xso.dir != XFRM_DEV_OFFLOAD_OUT)) {
- NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction");
+ if (x->sel.proto != IPPROTO_IP && x->sel.proto != IPPROTO_UDP &&
+ x->sel.proto != IPPROTO_TCP) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
return -EINVAL;
}
@@ -646,6 +671,11 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
if (err)
goto err_xfrm;
+ if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
+ err = -EBUSY;
+ goto err_xfrm;
+ }
+
/* check esn */
if (x->props.flags & XFRM_STATE_ESN)
mlx5e_ipsec_update_esn_state(sa_entry);
@@ -654,7 +684,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
err = mlx5_ipsec_create_work(sa_entry);
if (err)
- goto err_xfrm;
+ goto unblock_ipsec;
err = mlx5e_ipsec_create_dwork(sa_entry);
if (err)
@@ -711,6 +741,8 @@ release_work:
if (sa_entry->work)
kfree(sa_entry->work->data);
kfree(sa_entry->work);
+unblock_ipsec:
+ mlx5_eswitch_unblock_ipsec(priv->mdev);
err_xfrm:
kfree(sa_entry);
NL_SET_ERR_MSG_WEAK_MOD(extack, "Device failed to offload this state");
@@ -740,6 +772,7 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
goto sa_entry_free;
@@ -756,6 +789,7 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
if (sa_entry->work)
kfree(sa_entry->work->data);
kfree(sa_entry->work);
+ mlx5_eswitch_unblock_ipsec(ipsec->mdev);
sa_entry_free:
kfree(sa_entry);
}
@@ -835,6 +869,7 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
goto clear_aso;
}
+ ipsec->is_uplink_rep = mlx5e_is_uplink_rep(priv);
ret = mlx5e_accel_ipsec_fs_init(ipsec);
if (ret)
goto err_fs_init;
@@ -958,9 +993,10 @@ static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
return -EINVAL;
}
- if (sel->proto != IPPROTO_IP &&
- (sel->proto != IPPROTO_UDP || x->xdo.dir != XFRM_DEV_OFFLOAD_OUT)) {
- NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction");
+ if (x->selector.proto != IPPROTO_IP &&
+ x->selector.proto != IPPROTO_UDP &&
+ x->selector.proto != IPPROTO_TCP) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
return -EINVAL;
}
@@ -1029,6 +1065,11 @@ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
pol_entry->x = x;
pol_entry->ipsec = priv->ipsec;
+ if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
+ err = -EBUSY;
+ goto ipsec_busy;
+ }
+
mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs);
err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
if (err)
@@ -1038,6 +1079,8 @@ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
return 0;
err_fs:
+ mlx5_eswitch_unblock_ipsec(priv->mdev);
+ipsec_busy:
kfree(pol_entry);
NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy");
return err;
@@ -1048,6 +1091,7 @@ static void mlx5e_xfrm_del_policy(struct xfrm_policy *x)
struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
mlx5e_accel_ipsec_fs_del_pol(pol_entry);
+ mlx5_eswitch_unblock_ipsec(pol_entry->ipsec->mdev);
}
static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 4e9887171508..9e7c42c2f77b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -94,13 +94,20 @@ struct mlx5_accel_esp_xfrm_attrs {
u8 dir : 2;
u8 type : 2;
u8 drop : 1;
+ u8 encap : 1;
u8 family;
struct mlx5_replay_esn replay_esn;
u32 authsize;
u32 reqid;
struct mlx5_ipsec_lft lft;
- u8 smac[ETH_ALEN];
- u8 dmac[ETH_ALEN];
+ union {
+ u8 smac[ETH_ALEN];
+ __be16 sport;
+ };
+ union {
+ u8 dmac[ETH_ALEN];
+ __be16 dport;
+ };
};
enum mlx5_ipsec_cap {
@@ -110,6 +117,7 @@ enum mlx5_ipsec_cap {
MLX5_IPSEC_CAP_ROCE = 1 << 3,
MLX5_IPSEC_CAP_PRIO = 1 << 4,
MLX5_IPSEC_CAP_TUNNEL = 1 << 5,
+ MLX5_IPSEC_CAP_ESPINUDP = 1 << 6,
};
struct mlx5e_priv;
@@ -135,7 +143,7 @@ struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_tx_drop_trailer;
};
-struct mlx5e_ipsec_rx;
+struct mlx5e_ipsec_fc;
struct mlx5e_ipsec_tx;
struct mlx5e_ipsec_work {
@@ -161,6 +169,58 @@ struct mlx5e_ipsec_aso {
spinlock_t lock;
};
+struct mlx5e_ipsec_rx_create_attr {
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_ttc_table *ttc;
+ u32 family;
+ int prio;
+ int pol_level;
+ int sa_level;
+ int status_level;
+ enum mlx5_flow_namespace_type chains_ns;
+};
+
+struct mlx5e_ipsec_ft {
+ struct mutex mutex; /* Protect changes to this struct */
+ struct mlx5_flow_table *pol;
+ struct mlx5_flow_table *sa;
+ struct mlx5_flow_table *status;
+ u32 refcnt;
+};
+
+struct mlx5e_ipsec_rule {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ struct mlx5_fc *fc;
+};
+
+struct mlx5e_ipsec_miss {
+ struct mlx5_flow_group *group;
+ struct mlx5_flow_handle *rule;
+};
+
+struct mlx5e_ipsec_rx {
+ struct mlx5e_ipsec_ft ft;
+ struct mlx5e_ipsec_miss pol;
+ struct mlx5e_ipsec_miss sa;
+ struct mlx5e_ipsec_rule status;
+ struct mlx5e_ipsec_miss status_drop;
+ struct mlx5_fc *status_drop_cnt;
+ struct mlx5e_ipsec_fc *fc;
+ struct mlx5_fs_chains *chains;
+ u8 allow_tunnel_mode : 1;
+ struct xarray ipsec_obj_id_map;
+};
+
+struct mlx5e_ipsec_tx_create_attr {
+ int prio;
+ int pol_level;
+ int sa_level;
+ int cnt_level;
+ enum mlx5_flow_namespace_type chains_ns;
+};
+
struct mlx5e_ipsec {
struct mlx5_core_dev *mdev;
struct xarray sadb;
@@ -170,11 +230,14 @@ struct mlx5e_ipsec {
struct mlx5e_flow_steering *fs;
struct mlx5e_ipsec_rx *rx_ipv4;
struct mlx5e_ipsec_rx *rx_ipv6;
+ struct mlx5e_ipsec_rx *rx_esw;
struct mlx5e_ipsec_tx *tx;
+ struct mlx5e_ipsec_tx *tx_esw;
struct mlx5e_ipsec_aso *aso;
struct notifier_block nb;
struct notifier_block netevent_nb;
struct mlx5_ipsec_fs *roce;
+ u8 is_uplink_rep: 1;
};
struct mlx5e_ipsec_esn_state {
@@ -183,13 +246,6 @@ struct mlx5e_ipsec_esn_state {
u8 overlap: 1;
};
-struct mlx5e_ipsec_rule {
- struct mlx5_flow_handle *rule;
- struct mlx5_modify_hdr *modify_hdr;
- struct mlx5_pkt_reformat *pkt_reformat;
- struct mlx5_fc *fc;
-};
-
struct mlx5e_ipsec_limits {
u64 round;
u8 soft_limit_hit : 1;
@@ -209,6 +265,7 @@ struct mlx5e_ipsec_sa_entry {
struct mlx5e_ipsec_work *work;
struct mlx5e_ipsec_dwork *dwork;
struct mlx5e_ipsec_limits limits;
+ u32 rx_mapped_id;
};
struct mlx5_accel_pol_xfrm_attrs {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 832d36be4a17..7dba4221993f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -9,6 +9,8 @@
#include "fs_core.h"
#include "lib/ipsec_fs_roce.h"
#include "lib/fs_chains.h"
+#include "esw/ipsec_fs.h"
+#include "en_rep.h"
#define NUM_IPSEC_FTE BIT(15)
#define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
@@ -19,32 +21,10 @@ struct mlx5e_ipsec_fc {
struct mlx5_fc *drop;
};
-struct mlx5e_ipsec_ft {
- struct mutex mutex; /* Protect changes to this struct */
- struct mlx5_flow_table *pol;
- struct mlx5_flow_table *sa;
- struct mlx5_flow_table *status;
- u32 refcnt;
-};
-
-struct mlx5e_ipsec_miss {
- struct mlx5_flow_group *group;
- struct mlx5_flow_handle *rule;
-};
-
-struct mlx5e_ipsec_rx {
- struct mlx5e_ipsec_ft ft;
- struct mlx5e_ipsec_miss pol;
- struct mlx5e_ipsec_miss sa;
- struct mlx5e_ipsec_rule status;
- struct mlx5e_ipsec_fc *fc;
- struct mlx5_fs_chains *chains;
- u8 allow_tunnel_mode : 1;
-};
-
struct mlx5e_ipsec_tx {
struct mlx5e_ipsec_ft ft;
struct mlx5e_ipsec_miss pol;
+ struct mlx5e_ipsec_miss sa;
struct mlx5e_ipsec_rule status;
struct mlx5_flow_namespace *ns;
struct mlx5e_ipsec_fc *fc;
@@ -60,14 +40,25 @@ static enum mlx5_traffic_types family2tt(u32 family)
return MLX5_TT_IPV6_IPSEC_ESP;
}
-static struct mlx5e_ipsec_rx *ipsec_rx(struct mlx5e_ipsec *ipsec, u32 family)
+static struct mlx5e_ipsec_rx *ipsec_rx(struct mlx5e_ipsec *ipsec, u32 family, int type)
{
+ if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
+ return ipsec->rx_esw;
+
if (family == AF_INET)
return ipsec->rx_ipv4;
return ipsec->rx_ipv6;
}
+static struct mlx5e_ipsec_tx *ipsec_tx(struct mlx5e_ipsec *ipsec, int type)
+{
+ if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
+ return ipsec->tx_esw;
+
+ return ipsec->tx;
+}
+
static struct mlx5_fs_chains *
ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft,
enum mlx5_flow_namespace_type ns, int base_prio,
@@ -238,13 +229,19 @@ out:
return err;
}
-static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx, u32 family)
+static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
- /* disconnect */
mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
+}
+
+static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx, u32 family)
+{
+ /* disconnect */
+ if (rx != ipsec->rx_esw)
+ ipsec_rx_ft_disconnect(ipsec, family);
if (rx->chains) {
ipsec_chains_destroy(rx->chains);
@@ -259,51 +256,105 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_destroy_flow_table(rx->ft.sa);
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
- mlx5_del_flow_rules(rx->status.rule);
- mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
+ if (rx == ipsec->rx_esw) {
+ mlx5_esw_ipsec_rx_status_destroy(ipsec, rx);
+ } else {
+ mlx5_del_flow_rules(rx->status.rule);
+ mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
+ }
mlx5_destroy_flow_table(rx->ft.status);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
}
+static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ u32 family,
+ struct mlx5e_ipsec_rx_create_attr *attr)
+{
+ if (rx == ipsec->rx_esw) {
+ /* For packet offload in switchdev mode, RX & TX use FDB namespace */
+ attr->ns = ipsec->tx_esw->ns;
+ mlx5_esw_ipsec_rx_create_attr_set(ipsec, attr);
+ return;
+ }
+
+ attr->ns = mlx5e_fs_get_ns(ipsec->fs, false);
+ attr->ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+ attr->family = family;
+ attr->prio = MLX5E_NIC_PRIO;
+ attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL;
+ attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
+ attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
+ attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL;
+}
+
+static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_table *ft;
+ int err;
+
+ if (rx == ipsec->rx_esw)
+ return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest);
+
+ *dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family));
+ err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest,
+ attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
+ attr->prio);
+ if (err)
+ return err;
+
+ ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, attr->family);
+ if (ft) {
+ dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest->ft = ft;
+ }
+
+ return 0;
+}
+
+static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr)
+{
+ struct mlx5_flow_destination dest = {};
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = rx->ft.pol;
+ mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
+}
+
static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx, u32 family)
{
- struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
- struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
- struct mlx5_flow_destination default_dest;
+ struct mlx5e_ipsec_rx_create_attr attr;
struct mlx5_flow_destination dest[2];
struct mlx5_flow_table *ft;
u32 flags = 0;
int err;
- default_dest = mlx5_ttc_get_default_dest(ttc, family2tt(family));
- err = mlx5_ipsec_fs_roce_rx_create(mdev, ipsec->roce, ns, &default_dest,
- family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
- MLX5E_NIC_PRIO);
+ ipsec_rx_create_attr_set(ipsec, rx, family, &attr);
+
+ err = ipsec_rx_status_pass_dest_get(ipsec, rx, &attr, &dest[0]);
if (err)
return err;
- ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
- MLX5E_NIC_PRIO, 1, 0);
+ ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 1, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft_status;
}
-
rx->ft.status = ft;
- ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
- if (ft) {
- dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[0].ft = ft;
- } else {
- dest[0] = default_dest;
- }
-
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
- err = ipsec_status_rule(mdev, rx, dest);
+ if (rx == ipsec->rx_esw)
+ err = mlx5_esw_ipsec_rx_status_create(ipsec, rx, dest);
+ else
+ err = ipsec_status_rule(mdev, rx, dest);
if (err)
goto err_add;
@@ -312,8 +363,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (rx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_LEVEL, MLX5E_NIC_PRIO, 2,
- flags);
+ ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
@@ -326,9 +376,9 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
- MLX5_FLOW_NAMESPACE_KERNEL,
- MLX5E_NIC_PRIO,
- MLX5E_ACCEL_FS_POL_FT_LEVEL,
+ attr.chains_ns,
+ attr.prio,
+ attr.pol_level,
&rx->ft.pol);
if (IS_ERR(rx->chains)) {
err = PTR_ERR(rx->chains);
@@ -338,8 +388,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
goto connect;
}
- ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_POL_FT_LEVEL, MLX5E_NIC_PRIO,
- 2, 0);
+ ft = ipsec_ft_create(attr.ns, attr.pol_level, attr.prio, 2, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_pol_ft;
@@ -354,10 +403,8 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
connect:
/* connect */
- memset(dest, 0x00, sizeof(*dest));
- dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[0].ft = rx->ft.pol;
- mlx5_ttc_fwd_dest(ttc, family2tt(family), &dest[0]);
+ if (rx != ipsec->rx_esw)
+ ipsec_rx_ft_connect(ipsec, rx, &attr);
return 0;
err_pol_miss:
@@ -387,10 +434,16 @@ static int rx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (rx->ft.refcnt)
goto skip;
- err = rx_create(mdev, ipsec, rx, family);
+ err = mlx5_eswitch_block_mode(mdev);
if (err)
return err;
+ err = rx_create(mdev, ipsec, rx, family);
+ if (err) {
+ mlx5_eswitch_unblock_mode(mdev);
+ return err;
+ }
+
skip:
rx->ft.refcnt++;
return 0;
@@ -403,12 +456,14 @@ static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
return;
rx_destroy(ipsec->mdev, ipsec, rx, family);
+ mlx5_eswitch_unblock_mode(ipsec->mdev);
}
static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
- struct mlx5e_ipsec *ipsec, u32 family)
+ struct mlx5e_ipsec *ipsec, u32 family,
+ int type)
{
- struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family);
+ struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
int err;
mutex_lock(&rx->ft.mutex);
@@ -422,9 +477,9 @@ static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
static struct mlx5_flow_table *rx_ft_get_policy(struct mlx5_core_dev *mdev,
struct mlx5e_ipsec *ipsec,
- u32 family, u32 prio)
+ u32 family, u32 prio, int type)
{
- struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family);
+ struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
struct mlx5_flow_table *ft;
int err;
@@ -449,18 +504,18 @@ err_get:
return ERR_PTR(err);
}
-static void rx_ft_put(struct mlx5e_ipsec *ipsec, u32 family)
+static void rx_ft_put(struct mlx5e_ipsec *ipsec, u32 family, int type)
{
- struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family);
+ struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
mutex_lock(&rx->ft.mutex);
rx_put(ipsec, rx, family);
mutex_unlock(&rx->ft.mutex);
}
-static void rx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 family, u32 prio)
+static void rx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 family, u32 prio, int type)
{
- struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family);
+ struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
mutex_lock(&rx->ft.mutex);
if (rx->chains)
@@ -504,7 +559,7 @@ err_rule:
}
/* IPsec TX flow steering */
-static void tx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
+static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
struct mlx5_ipsec_fs *roce)
{
mlx5_ipsec_fs_roce_tx_destroy(roce);
@@ -516,22 +571,45 @@ static void tx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
mlx5_destroy_flow_table(tx->ft.pol);
}
+ if (tx == ipsec->tx_esw) {
+ mlx5_del_flow_rules(tx->sa.rule);
+ mlx5_destroy_flow_group(tx->sa.group);
+ }
mlx5_destroy_flow_table(tx->ft.sa);
if (tx->allow_tunnel_mode)
- mlx5_eswitch_unblock_encap(mdev);
+ mlx5_eswitch_unblock_encap(ipsec->mdev);
mlx5_del_flow_rules(tx->status.rule);
mlx5_destroy_flow_table(tx->ft.status);
}
-static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
+static void ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_tx *tx,
+ struct mlx5e_ipsec_tx_create_attr *attr)
+{
+ if (tx == ipsec->tx_esw) {
+ mlx5_esw_ipsec_tx_create_attr_set(ipsec, attr);
+ return;
+ }
+
+ attr->prio = 0;
+ attr->pol_level = 0;
+ attr->sa_level = 1;
+ attr->cnt_level = 2;
+ attr->chains_ns = MLX5_FLOW_NAMESPACE_EGRESS_IPSEC;
+}
+
+static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
struct mlx5_ipsec_fs *roce)
{
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5e_ipsec_tx_create_attr attr;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_table *ft;
u32 flags = 0;
int err;
- ft = ipsec_ft_create(tx->ns, 2, 0, 1, 0);
+ ipsec_tx_create_attr_set(ipsec, tx, &attr);
+ ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 0);
if (IS_ERR(ft))
return PTR_ERR(ft);
tx->ft.status = ft;
@@ -544,16 +622,25 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (tx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft = ipsec_ft_create(tx->ns, 1, 0, 4, flags);
+ ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_sa_ft;
}
tx->ft.sa = ft;
+ if (tx == ipsec->tx_esw) {
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport.num = MLX5_VPORT_UPLINK;
+ err = ipsec_miss_create(mdev, tx->ft.sa, &tx->sa, &dest);
+ if (err)
+ goto err_sa_miss;
+ memset(&dest, 0, sizeof(dest));
+ }
+
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
tx->chains = ipsec_chains_create(
- mdev, tx->ft.sa, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC, 0, 0,
+ mdev, tx->ft.sa, attr.chains_ns, attr.prio, attr.pol_level,
&tx->ft.pol);
if (IS_ERR(tx->chains)) {
err = PTR_ERR(tx->chains);
@@ -563,7 +650,7 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
goto connect_roce;
}
- ft = ipsec_ft_create(tx->ns, 0, 0, 2, 0);
+ ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 2, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_pol_ft;
@@ -592,6 +679,11 @@ err_roce:
mlx5_destroy_flow_table(tx->ft.pol);
}
err_pol_ft:
+ if (tx == ipsec->tx_esw) {
+ mlx5_del_flow_rules(tx->sa.rule);
+ mlx5_destroy_flow_group(tx->sa.group);
+ }
+err_sa_miss:
mlx5_destroy_flow_table(tx->ft.sa);
err_sa_ft:
if (tx->allow_tunnel_mode)
@@ -602,6 +694,25 @@ err_status_rule:
return err;
}
+static void ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_table *ft)
+{
+#ifdef CONFIG_MLX5_ESWITCH
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_priv *priv;
+
+ esw->offloads.ft_ipsec_tx_pol = ft;
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ priv = netdev_priv(uplink_rpriv->netdev);
+ if (!priv->channels.num)
+ return;
+
+ mlx5e_rep_deactivate_channels(priv);
+ mlx5e_rep_activate_channels(priv);
+#endif
+}
+
static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx *tx)
{
@@ -610,10 +721,19 @@ static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (tx->ft.refcnt)
goto skip;
- err = tx_create(mdev, tx, ipsec->roce);
+ err = mlx5_eswitch_block_mode(mdev);
if (err)
return err;
+ err = tx_create(ipsec, tx, ipsec->roce);
+ if (err) {
+ mlx5_eswitch_unblock_mode(mdev);
+ return err;
+ }
+
+ if (tx == ipsec->tx_esw)
+ ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol);
+
skip:
tx->ft.refcnt++;
return 0;
@@ -624,14 +744,20 @@ static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
if (--tx->ft.refcnt)
return;
- tx_destroy(ipsec->mdev, tx, ipsec->roce);
+ if (tx == ipsec->tx_esw) {
+ mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev);
+ ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
+ }
+
+ tx_destroy(ipsec, tx, ipsec->roce);
+ mlx5_eswitch_unblock_mode(ipsec->mdev);
}
static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
struct mlx5e_ipsec *ipsec,
- u32 prio)
+ u32 prio, int type)
{
- struct mlx5e_ipsec_tx *tx = ipsec->tx;
+ struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
struct mlx5_flow_table *ft;
int err;
@@ -657,9 +783,9 @@ err_get:
}
static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
- struct mlx5e_ipsec *ipsec)
+ struct mlx5e_ipsec *ipsec, int type)
{
- struct mlx5e_ipsec_tx *tx = ipsec->tx;
+ struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
int err;
mutex_lock(&tx->ft.mutex);
@@ -671,18 +797,18 @@ static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
return tx;
}
-static void tx_ft_put(struct mlx5e_ipsec *ipsec)
+static void tx_ft_put(struct mlx5e_ipsec *ipsec, int type)
{
- struct mlx5e_ipsec_tx *tx = ipsec->tx;
+ struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
mutex_lock(&tx->ft.mutex);
tx_put(ipsec, tx);
mutex_unlock(&tx->ft.mutex);
}
-static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio)
+static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type)
{
- struct mlx5e_ipsec_tx *tx = ipsec->tx;
+ struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
mutex_lock(&tx->ft.mutex);
if (tx->chains)
@@ -782,43 +908,75 @@ static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
}
-static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid)
+static void setup_fte_reg_c4(struct mlx5_flow_spec *spec, u32 reqid)
{
/* Pass policy check before choosing this SA */
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
- MLX5_SET(fte_match_param, spec->match_criteria,
- misc_parameters_2.metadata_reg_c_0, reqid);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.metadata_reg_c_0, reqid);
+ misc_parameters_2.metadata_reg_c_4, reqid);
}
static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
{
- if (upspec->proto != IPPROTO_UDP)
+ switch (upspec->proto) {
+ case IPPROTO_UDP:
+ if (upspec->dport) {
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
+ udp_dport, upspec->dport_mask);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
+ udp_dport, upspec->dport);
+ }
+ if (upspec->sport) {
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
+ udp_sport, upspec->sport_mask);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
+ udp_sport, upspec->sport);
+ }
+ break;
+ case IPPROTO_TCP:
+ if (upspec->dport) {
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
+ tcp_dport, upspec->dport_mask);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
+ tcp_dport, upspec->dport);
+ }
+ if (upspec->sport) {
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
+ tcp_sport, upspec->sport_mask);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
+ tcp_sport, upspec->sport);
+ }
+ break;
+ default:
return;
+ }
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
- if (upspec->dport) {
- MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport,
- upspec->dport_mask);
- MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, upspec->dport);
- }
+}
- if (upspec->sport) {
- MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_sport,
- upspec->sport_mask);
- MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_sport, upspec->sport);
- }
+static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec,
+ int type, u8 dir)
+{
+ if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
+ return MLX5_FLOW_NAMESPACE_FDB;
+
+ if (dir == XFRM_DEV_OFFLOAD_IN)
+ return MLX5_FLOW_NAMESPACE_KERNEL;
+
+ return MLX5_FLOW_NAMESPACE_EGRESS;
}
-static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
+static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8 dir,
struct mlx5_flow_act *flow_act)
{
+ enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
- enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_modify_hdr *modify_hdr;
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
@@ -826,12 +984,10 @@ static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
case XFRM_DEV_OFFLOAD_IN:
MLX5_SET(set_action_in, action, field,
MLX5_ACTION_IN_FIELD_METADATA_REG_B);
- ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
break;
case XFRM_DEV_OFFLOAD_OUT:
MLX5_SET(set_action_in, action, field,
- MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
- ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
break;
default:
return -EINVAL;
@@ -951,37 +1107,70 @@ free_reformatbf:
return -EINVAL;
}
+static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ switch (attrs->dir) {
+ case XFRM_DEV_OFFLOAD_IN:
+ if (attrs->encap)
+ return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
+ return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
+ case XFRM_DEV_OFFLOAD_OUT:
+ if (attrs->family == AF_INET) {
+ if (attrs->encap)
+ return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
+ return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
+ }
+
+ if (attrs->encap)
+ return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6;
+ return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
+ default:
+ WARN_ON(true);
+ }
+
+ return -EINVAL;
+}
+
static int
setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5_pkt_reformat_params *reformat_params)
{
- u8 *reformatbf;
+ struct udphdr *udphdr;
+ char *reformatbf;
+ size_t bfflen;
__be32 spi;
+ void *hdr;
+
+ reformat_params->type = get_reformat_type(attrs);
+ if (reformat_params->type < 0)
+ return reformat_params->type;
switch (attrs->dir) {
case XFRM_DEV_OFFLOAD_IN:
- reformat_params->type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
break;
case XFRM_DEV_OFFLOAD_OUT:
- if (attrs->family == AF_INET)
- reformat_params->type =
- MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
- else
- reformat_params->type =
- MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
-
- reformatbf = kzalloc(MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE,
- GFP_KERNEL);
+ bfflen = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE;
+ if (attrs->encap)
+ bfflen += sizeof(*udphdr);
+
+ reformatbf = kzalloc(bfflen, GFP_KERNEL);
if (!reformatbf)
return -ENOMEM;
+ hdr = reformatbf;
+ if (attrs->encap) {
+ udphdr = (struct udphdr *)reformatbf;
+ udphdr->source = attrs->sport;
+ udphdr->dest = attrs->dport;
+ hdr += sizeof(*udphdr);
+ }
+
/* convert to network format */
spi = htonl(attrs->spi);
- memcpy(reformatbf, &spi, sizeof(spi));
+ memcpy(hdr, &spi, sizeof(spi));
reformat_params->param_0 = attrs->authsize;
- reformat_params->size =
- MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE;
+ reformat_params->size = bfflen;
reformat_params->data = reformatbf;
break;
default:
@@ -991,26 +1180,17 @@ setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs,
return 0;
}
-static int setup_pkt_reformat(struct mlx5_core_dev *mdev,
+static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5_flow_act *flow_act)
{
+ enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->type,
+ attrs->dir);
struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_pkt_reformat *pkt_reformat;
- enum mlx5_flow_namespace_type ns_type;
int ret;
- switch (attrs->dir) {
- case XFRM_DEV_OFFLOAD_IN:
- ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
- break;
- case XFRM_DEV_OFFLOAD_OUT:
- ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
- break;
- default:
- return -EINVAL;
- }
-
switch (attrs->mode) {
case XFRM_MODE_TRANSPORT:
ret = setup_pkt_transport_reformat(attrs, &reformat_params);
@@ -1047,9 +1227,9 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5_flow_spec *spec;
struct mlx5e_ipsec_rx *rx;
struct mlx5_fc *counter;
- int err;
+ int err = 0;
- rx = rx_ft_get(mdev, ipsec, attrs->family);
+ rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type);
if (IS_ERR(rx))
return PTR_ERR(rx);
@@ -1067,15 +1247,21 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
setup_fte_spi(spec, attrs->spi);
setup_fte_esp(spec);
setup_fte_no_frags(spec);
+ setup_fte_upper_proto_match(spec, &attrs->upspec);
+
+ if (rx != ipsec->rx_esw)
+ err = setup_modify_header(ipsec, attrs->type,
+ sa_entry->ipsec_obj_id | BIT(31),
+ XFRM_DEV_OFFLOAD_IN, &flow_act);
+ else
+ err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
- err = setup_modify_header(mdev, sa_entry->ipsec_obj_id | BIT(31),
- XFRM_DEV_OFFLOAD_IN, &flow_act);
if (err)
goto err_mod_header;
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_PACKET:
- err = setup_pkt_reformat(mdev, attrs, &flow_act);
+ err = setup_pkt_reformat(ipsec, attrs, &flow_act);
if (err)
goto err_pkt_reformat;
break;
@@ -1125,7 +1311,7 @@ err_pkt_reformat:
err_mod_header:
kvfree(spec);
err_alloc:
- rx_ft_put(ipsec, attrs->family);
+ rx_ft_put(ipsec, attrs->family, attrs->type);
return err;
}
@@ -1142,7 +1328,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5_fc *counter;
int err;
- tx = tx_ft_get(mdev, ipsec);
+ tx = tx_ft_get(mdev, ipsec, attrs->type);
if (IS_ERR(tx))
return PTR_ERR(tx);
@@ -1168,8 +1354,8 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
break;
case XFRM_DEV_OFFLOAD_PACKET:
if (attrs->reqid)
- setup_fte_reg_c0(spec, attrs->reqid);
- err = setup_pkt_reformat(mdev, attrs, &flow_act);
+ setup_fte_reg_c4(spec, attrs->reqid);
+ err = setup_pkt_reformat(ipsec, attrs, &flow_act);
if (err)
goto err_pkt_reformat;
break;
@@ -1218,7 +1404,7 @@ err_add_cnt:
err_pkt_reformat:
kvfree(spec);
err_alloc:
- tx_ft_put(ipsec);
+ tx_ft_put(ipsec, attrs->type);
return err;
}
@@ -1226,15 +1412,16 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
{
struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
- struct mlx5e_ipsec_tx *tx = pol_entry->ipsec->tx;
+ struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft;
+ struct mlx5e_ipsec_tx *tx;
int err, dstn = 0;
- ft = tx_ft_get_policy(mdev, pol_entry->ipsec, attrs->prio);
+ ft = tx_ft_get_policy(mdev, ipsec, attrs->prio, attrs->type);
if (IS_ERR(ft))
return PTR_ERR(ft);
@@ -1244,6 +1431,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
goto err_alloc;
}
+ tx = ipsec_tx(ipsec, attrs->type);
if (attrs->family == AF_INET)
setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
else
@@ -1258,7 +1446,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
if (!attrs->reqid)
break;
- err = setup_modify_header(mdev, attrs->reqid,
+ err = setup_modify_header(ipsec, attrs->type, attrs->reqid,
XFRM_DEV_OFFLOAD_OUT, &flow_act);
if (err)
goto err_mod_header;
@@ -1277,6 +1465,8 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
}
flow_act.flags |= FLOW_ACT_NO_APPEND;
+ if (tx == ipsec->tx_esw && tx->chains)
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[dstn].ft = tx->ft.sa;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dstn++;
@@ -1298,7 +1488,7 @@ err_action:
err_mod_header:
kvfree(spec);
err_alloc:
- tx_ft_put_policy(pol_entry->ipsec, attrs->prio);
+ tx_ft_put_policy(ipsec, attrs->prio, attrs->type);
return err;
}
@@ -1306,6 +1496,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
{
struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
+ struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
struct mlx5_flow_destination dest[2];
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
@@ -1314,11 +1505,12 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
struct mlx5e_ipsec_rx *rx;
int err, dstn = 0;
- ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio);
+ ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio,
+ attrs->type);
if (IS_ERR(ft))
return PTR_ERR(ft);
- rx = ipsec_rx(pol_entry->ipsec, attrs->family);
+ rx = ipsec_rx(pol_entry->ipsec, attrs->family, attrs->type);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
@@ -1332,6 +1524,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
setup_fte_no_frags(spec);
+ setup_fte_upper_proto_match(spec, &attrs->upspec);
switch (attrs->action) {
case XFRM_POLICY_ALLOW:
@@ -1350,6 +1543,8 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
}
flow_act.flags |= FLOW_ACT_NO_APPEND;
+ if (rx == ipsec->rx_esw && rx->chains)
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[dstn].ft = rx->ft.sa;
dstn++;
@@ -1367,88 +1562,110 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
err_action:
kvfree(spec);
err_alloc:
- rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio);
+ rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio, attrs->type);
return err;
}
+static void ipsec_fs_destroy_single_counter(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec_fc *fc)
+{
+ mlx5_fc_destroy(mdev, fc->drop);
+ mlx5_fc_destroy(mdev, fc->cnt);
+ kfree(fc);
+}
+
static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
{
- struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
struct mlx5_core_dev *mdev = ipsec->mdev;
- struct mlx5e_ipsec_tx *tx = ipsec->tx;
- mlx5_fc_destroy(mdev, tx->fc->drop);
- mlx5_fc_destroy(mdev, tx->fc->cnt);
- kfree(tx->fc);
- mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
- mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
- kfree(rx_ipv4->fc);
+ ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
+ ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
+ if (ipsec->is_uplink_rep) {
+ ipsec_fs_destroy_single_counter(mdev, ipsec->tx_esw->fc);
+ ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
+ }
}
-static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
+static struct mlx5e_ipsec_fc *ipsec_fs_init_single_counter(struct mlx5_core_dev *mdev)
{
- struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
- struct mlx5e_ipsec_rx *rx_ipv6 = ipsec->rx_ipv6;
- struct mlx5_core_dev *mdev = ipsec->mdev;
- struct mlx5e_ipsec_tx *tx = ipsec->tx;
struct mlx5e_ipsec_fc *fc;
struct mlx5_fc *counter;
int err;
- fc = kzalloc(sizeof(*rx_ipv4->fc), GFP_KERNEL);
+ fc = kzalloc(sizeof(*fc), GFP_KERNEL);
if (!fc)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- /* Both IPv4 and IPv6 point to same flow counters struct. */
- rx_ipv4->fc = fc;
- rx_ipv6->fc = fc;
counter = mlx5_fc_create(mdev, false);
if (IS_ERR(counter)) {
err = PTR_ERR(counter);
- goto err_rx_cnt;
+ goto err_cnt;
}
-
fc->cnt = counter;
+
counter = mlx5_fc_create(mdev, false);
if (IS_ERR(counter)) {
err = PTR_ERR(counter);
- goto err_rx_drop;
+ goto err_drop;
}
-
fc->drop = counter;
- fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL);
- if (!fc) {
- err = -ENOMEM;
- goto err_tx_fc;
+
+ return fc;
+
+err_drop:
+ mlx5_fc_destroy(mdev, fc->cnt);
+err_cnt:
+ kfree(fc);
+ return ERR_PTR(err);
+}
+
+static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
+{
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5e_ipsec_fc *fc;
+ int err;
+
+ fc = ipsec_fs_init_single_counter(mdev);
+ if (IS_ERR(fc)) {
+ err = PTR_ERR(fc);
+ goto err_rx_cnt;
}
+ ipsec->rx_ipv4->fc = fc;
- tx->fc = fc;
- counter = mlx5_fc_create(mdev, false);
- if (IS_ERR(counter)) {
- err = PTR_ERR(counter);
+ fc = ipsec_fs_init_single_counter(mdev);
+ if (IS_ERR(fc)) {
+ err = PTR_ERR(fc);
goto err_tx_cnt;
}
+ ipsec->tx->fc = fc;
- fc->cnt = counter;
- counter = mlx5_fc_create(mdev, false);
- if (IS_ERR(counter)) {
- err = PTR_ERR(counter);
- goto err_tx_drop;
+ if (ipsec->is_uplink_rep) {
+ fc = ipsec_fs_init_single_counter(mdev);
+ if (IS_ERR(fc)) {
+ err = PTR_ERR(fc);
+ goto err_rx_esw_cnt;
+ }
+ ipsec->rx_esw->fc = fc;
+
+ fc = ipsec_fs_init_single_counter(mdev);
+ if (IS_ERR(fc)) {
+ err = PTR_ERR(fc);
+ goto err_tx_esw_cnt;
+ }
+ ipsec->tx_esw->fc = fc;
}
- fc->drop = counter;
+ /* Both IPv4 and IPv6 point to same flow counters struct. */
+ ipsec->rx_ipv6->fc = ipsec->rx_ipv4->fc;
return 0;
-err_tx_drop:
- mlx5_fc_destroy(mdev, tx->fc->cnt);
+err_tx_esw_cnt:
+ ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
+err_rx_esw_cnt:
+ ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
err_tx_cnt:
- kfree(tx->fc);
-err_tx_fc:
- mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
-err_rx_drop:
- mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
+ ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
err_rx_cnt:
- kfree(rx_ipv4->fc);
return err;
}
@@ -1458,6 +1675,7 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
struct mlx5e_ipsec *ipsec = priv->ipsec;
struct mlx5e_ipsec_hw_stats *stats;
struct mlx5e_ipsec_fc *fc;
+ u64 packets, bytes;
stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats;
@@ -1479,14 +1697,94 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts,
&stats->ipsec_tx_drop_bytes);
+
+ if (ipsec->is_uplink_rep) {
+ fc = ipsec->rx_esw->fc;
+ if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
+ stats->ipsec_rx_pkts += packets;
+ stats->ipsec_rx_bytes += bytes;
+ }
+
+ if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
+ stats->ipsec_rx_drop_pkts += packets;
+ stats->ipsec_rx_drop_bytes += bytes;
+ }
+
+ fc = ipsec->tx_esw->fc;
+ if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
+ stats->ipsec_tx_pkts += packets;
+ stats->ipsec_tx_bytes += bytes;
+ }
+
+ if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
+ stats->ipsec_tx_drop_pkts += packets;
+ stats->ipsec_tx_drop_bytes += bytes;
+ }
+ }
+}
+
+#ifdef CONFIG_MLX5_ESWITCH
+static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int err = 0;
+
+ if (esw)
+ down_write(&esw->mode_lock);
+
+ if (mdev->num_block_ipsec) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ mdev->num_block_tc++;
+
+unlock:
+ if (esw)
+ up_write(&esw->mode_lock);
+
+ return err;
+}
+#else
+static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
+{
+ if (mdev->num_block_ipsec)
+ return -EBUSY;
+
+ mdev->num_block_tc++;
+ return 0;
+}
+#endif
+
+static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev)
+{
+ mdev->num_block_tc++;
}
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
{
+ int err;
+
+ if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET) {
+ err = mlx5e_ipsec_block_tc_offload(sa_entry->ipsec->mdev);
+ if (err)
+ return err;
+ }
+
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
- return tx_add_rule(sa_entry);
+ err = tx_add_rule(sa_entry);
+ else
+ err = rx_add_rule(sa_entry);
+
+ if (err)
+ goto err_out;
- return rx_add_rule(sa_entry);
+ return 0;
+
+err_out:
+ if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
+ mlx5e_ipsec_unblock_tc_offload(sa_entry->ipsec->mdev);
+ return err;
}
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
@@ -1499,21 +1797,40 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
if (ipsec_rule->pkt_reformat)
mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
+ if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
+ mlx5e_ipsec_unblock_tc_offload(mdev);
+
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
- tx_ft_put(sa_entry->ipsec);
+ tx_ft_put(sa_entry->ipsec, sa_entry->attrs.type);
return;
}
mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
- rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family);
+ mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
+ rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
}
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
{
+ int err;
+
+ err = mlx5e_ipsec_block_tc_offload(pol_entry->ipsec->mdev);
+ if (err)
+ return err;
+
if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
- return tx_add_policy(pol_entry);
+ err = tx_add_policy(pol_entry);
+ else
+ err = rx_add_policy(pol_entry);
+
+ if (err)
+ goto err_out;
- return rx_add_policy(pol_entry);
+ return 0;
+
+err_out:
+ mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
+ return err;
}
void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
@@ -1523,16 +1840,18 @@ void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
mlx5_del_flow_rules(ipsec_rule->rule);
+ mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
+
if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family,
- pol_entry->attrs.prio);
+ pol_entry->attrs.prio, pol_entry->attrs.type);
return;
}
if (ipsec_rule->modify_hdr)
mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
- tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio);
+ tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio, pol_entry->attrs.type);
}
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
@@ -1540,7 +1859,7 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
if (!ipsec->tx)
return;
- if (mlx5_ipsec_device_caps(ipsec->mdev) & MLX5_IPSEC_CAP_ROCE)
+ if (ipsec->roce)
mlx5_ipsec_fs_roce_cleanup(ipsec->roce);
ipsec_fs_destroy_counters(ipsec);
@@ -1555,12 +1874,24 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
WARN_ON(ipsec->rx_ipv6->ft.refcnt);
kfree(ipsec->rx_ipv6);
+
+ if (ipsec->is_uplink_rep) {
+ xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map);
+
+ mutex_destroy(&ipsec->tx_esw->ft.mutex);
+ WARN_ON(ipsec->tx_esw->ft.refcnt);
+ kfree(ipsec->tx_esw);
+
+ mutex_destroy(&ipsec->rx_esw->ft.mutex);
+ WARN_ON(ipsec->rx_esw->ft.refcnt);
+ kfree(ipsec->rx_esw);
+ }
}
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
{
struct mlx5_core_dev *mdev = ipsec->mdev;
- struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_namespace *ns, *ns_esw;
int err = -ENOMEM;
ns = mlx5_get_flow_namespace(ipsec->mdev,
@@ -1568,9 +1899,23 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
if (!ns)
return -EOPNOTSUPP;
+ if (ipsec->is_uplink_rep) {
+ ns_esw = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!ns_esw)
+ return -EOPNOTSUPP;
+
+ ipsec->tx_esw = kzalloc(sizeof(*ipsec->tx_esw), GFP_KERNEL);
+ if (!ipsec->tx_esw)
+ return -ENOMEM;
+
+ ipsec->rx_esw = kzalloc(sizeof(*ipsec->rx_esw), GFP_KERNEL);
+ if (!ipsec->rx_esw)
+ goto err_rx_esw;
+ }
+
ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
if (!ipsec->tx)
- return -ENOMEM;
+ goto err_tx;
ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
if (!ipsec->rx_ipv4)
@@ -1589,8 +1934,14 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
mutex_init(&ipsec->rx_ipv6->ft.mutex);
ipsec->tx->ns = ns;
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE)
+ if (ipsec->is_uplink_rep) {
+ mutex_init(&ipsec->tx_esw->ft.mutex);
+ mutex_init(&ipsec->rx_esw->ft.mutex);
+ ipsec->tx_esw->ns = ns_esw;
+ xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
+ } else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
ipsec->roce = mlx5_ipsec_fs_roce_init(mdev);
+ }
return 0;
@@ -1600,6 +1951,10 @@ err_rx_ipv6:
kfree(ipsec->rx_ipv4);
err_rx_ipv4:
kfree(ipsec->tx);
+err_tx:
+ kfree(ipsec->rx_esw);
+err_rx_esw:
+ kfree(ipsec->tx_esw);
return err;
}
@@ -1621,10 +1976,12 @@ void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
{
- struct mlx5e_ipsec_rx *rx =
- ipsec_rx(sa_entry->ipsec, sa_entry->attrs.family);
- struct mlx5e_ipsec_tx *tx = sa_entry->ipsec->tx;
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
+ struct mlx5e_ipsec_rx *rx;
+ struct mlx5e_ipsec_tx *tx;
+ rx = ipsec_rx(sa_entry->ipsec, attrs->family, attrs->type);
+ tx = ipsec_tx(sa_entry->ipsec, attrs->type);
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
return tx->allow_tunnel_mode;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index a3554bde3e07..3245d1c9d539 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -45,8 +45,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
- if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
- MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level))
+ if ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
+ MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
caps |= MLX5_IPSEC_CAP_PRIO;
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
@@ -54,6 +55,12 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
reformat_l3_esp_tunnel_to_l2))
caps |= MLX5_IPSEC_CAP_TUNNEL;
+
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
+ reformat_add_esp_transport_over_udp) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ reformat_del_esp_transport_over_udp))
+ caps |= MLX5_IPSEC_CAP_ESPINUDP;
}
if (mlx5_get_roce_state(mdev) &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 8d995e304869..51a144246ea6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -37,6 +37,7 @@
#include "ipsec.h"
#include "ipsec_rxtx.h"
#include "en.h"
+#include "esw/ipsec_fs.h"
enum {
MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
@@ -311,9 +312,8 @@ enum {
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
- struct mlx5_cqe64 *cqe)
+ u32 ipsec_meta_data)
{
- u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_ipsec *ipsec = priv->ipsec;
struct mlx5e_ipsec_sa_entry *sa_entry;
@@ -358,3 +358,24 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
}
}
+
+int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata)
+{
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+ u32 ipsec_obj_id;
+ int err;
+
+ if (!ipsec || !ipsec->is_uplink_rep)
+ return -EINVAL;
+
+ err = mlx5_esw_ipsec_rx_ipsec_obj_id_search(priv, id, &ipsec_obj_id);
+ if (err) {
+ atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
+ return err;
+ }
+
+ *metadata = MLX5_IPSEC_METADATA_CREATE(ipsec_obj_id,
+ MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 1878a70b9031..9ee014a8ad24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -43,6 +43,7 @@
#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
+#define MLX5_IPSEC_METADATA_CREATE(id, syndrome) ((id) | ((syndrome) << 24))
struct mlx5e_accel_tx_ipsec_state {
struct xfrm_offload *xo;
@@ -66,7 +67,8 @@ void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
struct mlx5_wqe_inline_seg *inlseg);
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
- struct mlx5_cqe64 *cqe);
+ u32 ipsec_meta_data);
+int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata);
static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
{
return ipsec_st->tailen;
@@ -145,7 +147,7 @@ mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
static inline
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
- struct mlx5_cqe64 *cqe)
+ u32 ipsec_meta_data)
{}
static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index 592b165530ff..c9c1db971652 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -10,7 +10,6 @@
#include "lib/aso.h"
#include "lib/crypto.h"
#include "en_accel/macsec.h"
-#include "en_accel/macsec_fs.h"
#define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
#define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
@@ -66,9 +65,7 @@ struct mlx5e_macsec_sa {
ssci_t ssci;
salt_t salt;
- struct rhash_head hash;
- u32 fs_id;
- union mlx5e_macsec_rule *macsec_rule;
+ union mlx5_macsec_rule *macsec_rule;
struct rcu_head rcu_head;
struct mlx5e_macsec_epn_state epn_state;
};
@@ -106,14 +103,6 @@ struct mlx5e_macsec_aso {
u32 pdn;
};
-static const struct rhashtable_params rhash_sci = {
- .key_len = sizeof_field(struct mlx5e_macsec_sa, sci),
- .key_offset = offsetof(struct mlx5e_macsec_sa, sci),
- .head_offset = offsetof(struct mlx5e_macsec_sa, hash),
- .automatic_shrinking = true,
- .min_size = 1,
-};
-
struct mlx5e_macsec_device {
const struct net_device *netdev;
struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN];
@@ -125,20 +114,13 @@ struct mlx5e_macsec_device {
struct mlx5e_macsec {
struct list_head macsec_device_list_head;
int num_of_devices;
- struct mlx5e_macsec_fs *macsec_fs;
struct mutex lock; /* Protects mlx5e_macsec internal contexts */
- /* Tx sci -> fs id mapping handling */
- struct rhashtable sci_hash; /* sci -> mlx5e_macsec_sa */
-
/* Rx fs_id -> rx_sc mapping */
struct xarray sc_xarray;
struct mlx5_core_dev *mdev;
- /* Stats manage */
- struct mlx5e_macsec_stats stats;
-
/* ASO */
struct mlx5e_macsec_aso aso;
@@ -330,36 +312,30 @@ static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_o
static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
struct mlx5e_macsec_sa *sa,
- bool is_tx)
+ bool is_tx, struct net_device *netdev, u32 fs_id)
{
int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
- if ((is_tx) && sa->fs_id) {
- /* Make sure ongoing datapath readers sees a valid SA */
- rhashtable_remove_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
- sa->fs_id = 0;
- }
-
if (!sa->macsec_rule)
return;
- mlx5e_macsec_fs_del_rule(macsec->macsec_fs, sa->macsec_rule, action);
+ mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev,
+ fs_id);
mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
sa->macsec_rule = NULL;
}
static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
struct mlx5e_macsec_sa *sa,
- bool encrypt,
- bool is_tx)
+ bool encrypt, bool is_tx, u32 *fs_id)
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5e_macsec *macsec = priv->macsec;
struct mlx5_macsec_rule_attrs rule_attrs;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_macsec_obj_attrs obj_attrs;
- union mlx5e_macsec_rule *macsec_rule;
+ union mlx5_macsec_rule *macsec_rule;
int err;
obj_attrs.next_pn = sa->next_pn;
@@ -387,7 +363,7 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
- macsec_rule = mlx5e_macsec_fs_add_rule(macsec->macsec_fs, ctx, &rule_attrs, &sa->fs_id);
+ macsec_rule = mlx5_macsec_fs_add_rule(mdev->macsec_fs, ctx, &rule_attrs, fs_id);
if (!macsec_rule) {
err = -ENOMEM;
goto destroy_macsec_object;
@@ -395,16 +371,8 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
sa->macsec_rule = macsec_rule;
- if (is_tx) {
- err = rhashtable_insert_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
- if (err)
- goto destroy_macsec_object_and_rule;
- }
-
return 0;
-destroy_macsec_object_and_rule:
- mlx5e_macsec_cleanup_sa(macsec, sa, is_tx);
destroy_macsec_object:
mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id);
@@ -426,7 +394,7 @@ mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
static int macsec_rx_sa_active_update(struct macsec_context *ctx,
struct mlx5e_macsec_sa *rx_sa,
- bool active)
+ bool active, u32 *fs_id)
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5e_macsec *macsec = priv->macsec;
@@ -437,11 +405,11 @@ static int macsec_rx_sa_active_update(struct macsec_context *ctx,
rx_sa->active = active;
if (!active) {
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, *fs_id);
return 0;
}
- err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, fs_id);
if (err)
rx_sa->active = false;
@@ -563,7 +531,7 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
!tx_sa->active)
goto out;
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto destroy_encryption_key;
@@ -627,7 +595,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
if (ctx_tx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
} else {
@@ -636,7 +604,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
out:
mutex_unlock(&macsec->lock);
@@ -669,7 +637,7 @@ static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
kfree_rcu_mightsleep(tx_sa);
macsec_device->tx_sa[assoc_num] = NULL;
@@ -680,20 +648,6 @@ out:
return err;
}
-static u32 mlx5e_macsec_get_sa_from_hashtable(struct rhashtable *sci_hash, sci_t *sci)
-{
- struct mlx5e_macsec_sa *macsec_sa;
- u32 fs_id = 0;
-
- rcu_read_lock();
- macsec_sa = rhashtable_lookup(sci_hash, sci, rhash_sci);
- if (macsec_sa)
- fs_id = macsec_sa->fs_id;
- rcu_read_unlock();
-
- return fs_id;
-}
-
static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
{
struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
@@ -813,7 +767,8 @@ static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
if (!rx_sa)
continue;
- err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active);
+ err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active,
+ &rx_sc->sc_xarray_element->fs_id);
if (err)
goto out;
}
@@ -824,7 +779,8 @@ out:
return err;
}
-static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc)
+static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc,
+ struct net_device *netdev)
{
struct mlx5e_macsec_sa *rx_sa;
int i;
@@ -834,7 +790,8 @@ static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec
if (!rx_sa)
continue;
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, netdev,
+ rx_sc->sc_xarray_element->fs_id);
mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
kfree(rx_sa);
@@ -882,7 +839,7 @@ static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
goto out;
}
- macsec_del_rxsc_ctx(macsec, rx_sc);
+ macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev);
out:
mutex_unlock(&macsec->lock);
@@ -941,7 +898,6 @@ static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
rx_sa->next_pn = ctx_rx_sa->next_pn;
rx_sa->sci = sci;
rx_sa->assoc_num = assoc_num;
- rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
if (ctx->secy->xpn)
update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves,
@@ -958,7 +914,7 @@ static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
goto out;
//TODO - add support for both authentication and encryption flows
- err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, &rx_sc->sc_xarray_element->fs_id);
if (err)
goto destroy_encryption_key;
@@ -1025,7 +981,8 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
goto out;
}
- err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active);
+ err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active,
+ &rx_sc->sc_xarray_element->fs_id);
out:
mutex_unlock(&macsec->lock);
@@ -1073,7 +1030,8 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
+ rx_sc->sc_xarray_element->fs_id);
mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
kfree(rx_sa);
rx_sc->rx_sa[assoc_num] = NULL;
@@ -1154,7 +1112,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
if (!rx_sa || !rx_sa->macsec_rule)
continue;
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
+ rx_sc->sc_xarray_element->fs_id);
}
}
@@ -1165,7 +1124,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
continue;
if (rx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false,
+ &rx_sc->sc_xarray_element->fs_id);
if (err)
goto out;
}
@@ -1218,7 +1178,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
if (!tx_sa)
continue;
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
for (i = 0; i < MACSEC_NUM_AN; ++i) {
@@ -1227,7 +1187,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
continue;
if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
}
@@ -1265,7 +1225,7 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
if (!tx_sa)
continue;
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
kfree(tx_sa);
macsec_device->tx_sa[i] = NULL;
@@ -1273,7 +1233,7 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
list = &macsec_device->macsec_rx_sc_list_head;
list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element)
- macsec_del_rxsc_ctx(macsec, rx_sc);
+ macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev);
kfree(macsec_device->dev_addr);
macsec_device->dev_addr = NULL;
@@ -1647,50 +1607,6 @@ static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_c
mlx5_core_dealloc_pd(mdev, aso->pdn);
}
-bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
-{
- if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
- MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD))
- return false;
-
- if (!MLX5_CAP_GEN(mdev, log_max_dek))
- return false;
-
- if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload))
- return false;
-
- if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) ||
- !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec))
- return false;
-
- if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) ||
- !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec))
- return false;
-
- if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) &&
- !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt))
- return false;
-
- if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) &&
- !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt))
- return false;
-
- return true;
-}
-
-void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats)
-{
- mlx5e_macsec_fs_get_stats_fill(macsec->macsec_fs, macsec_stats);
-}
-
-struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec)
-{
- if (!macsec)
- return NULL;
-
- return &macsec->stats;
-}
-
static const struct macsec_ops macsec_offload_ops = {
.mdo_add_txsa = mlx5e_macsec_add_txsa,
.mdo_upd_txsa = mlx5e_macsec_upd_txsa,
@@ -1711,7 +1627,8 @@ bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb
struct metadata_dst *md_dst = skb_metadata_dst(skb);
u32 fs_id;
- fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
+ fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs,
+ &md_dst->u.macsec_info.sci);
if (!fs_id)
goto err_out;
@@ -1729,7 +1646,8 @@ void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
struct metadata_dst *md_dst = skb_metadata_dst(skb);
u32 fs_id;
- fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
+ fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs,
+ &md_dst->u.macsec_info.sci);
if (!fs_id)
return;
@@ -1782,7 +1700,7 @@ int mlx5e_macsec_init(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_macsec *macsec = NULL;
- struct mlx5e_macsec_fs *macsec_fs;
+ struct mlx5_macsec_fs *macsec_fs;
int err;
if (!mlx5e_is_macsec_device(priv->mdev)) {
@@ -1797,13 +1715,6 @@ int mlx5e_macsec_init(struct mlx5e_priv *priv)
INIT_LIST_HEAD(&macsec->macsec_device_list_head);
mutex_init(&macsec->lock);
- err = rhashtable_init(&macsec->sci_hash, &rhash_sci);
- if (err) {
- mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n",
- err);
- goto err_hash;
- }
-
err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
if (err) {
mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
@@ -1822,13 +1733,13 @@ int mlx5e_macsec_init(struct mlx5e_priv *priv)
macsec->mdev = mdev;
- macsec_fs = mlx5e_macsec_fs_init(mdev, priv->netdev);
+ macsec_fs = mlx5_macsec_fs_init(mdev);
if (!macsec_fs) {
err = -ENOMEM;
goto err_out;
}
- macsec->macsec_fs = macsec_fs;
+ mdev->macsec_fs = macsec_fs;
macsec->nb.notifier_call = macsec_obj_change_event;
mlx5_notifier_register(mdev, &macsec->nb);
@@ -1842,8 +1753,6 @@ err_out:
err_wq:
mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
err_aso:
- rhashtable_destroy(&macsec->sci_hash);
-err_hash:
kfree(macsec);
priv->macsec = NULL;
return err;
@@ -1858,10 +1767,9 @@ void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
return;
mlx5_notifier_unregister(mdev, &macsec->nb);
- mlx5e_macsec_fs_cleanup(macsec->macsec_fs);
+ mlx5_macsec_fs_cleanup(mdev->macsec_fs);
destroy_workqueue(macsec->wq);
mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
- rhashtable_destroy(&macsec->sci_hash);
mutex_destroy(&macsec->lock);
kfree(macsec);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
index 347380a2cd9c..27df72e23106 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
@@ -4,32 +4,16 @@
#ifndef __MLX5_EN_ACCEL_MACSEC_H__
#define __MLX5_EN_ACCEL_MACSEC_H__
-#ifdef CONFIG_MLX5_EN_MACSEC
+#ifdef CONFIG_MLX5_MACSEC
#include <linux/mlx5/driver.h>
#include <net/macsec.h>
#include <net/dst_metadata.h>
-
-/* Bit31 - 30: MACsec marker, Bit15-0: MACsec id */
-#define MLX5_MACEC_RX_FS_ID_MAX USHRT_MAX /* Must be power of two */
-#define MLX5_MACSEC_RX_FS_ID_MASK MLX5_MACEC_RX_FS_ID_MAX
-#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1)
-#define MLX5_MACSEC_RX_METADAT_HANDLE(metadata) ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK)
+#include "lib/macsec_fs.h"
struct mlx5e_priv;
struct mlx5e_macsec;
-struct mlx5e_macsec_stats {
- u64 macsec_rx_pkts;
- u64 macsec_rx_bytes;
- u64 macsec_rx_pkts_drop;
- u64 macsec_rx_bytes_drop;
- u64 macsec_tx_pkts;
- u64 macsec_tx_bytes;
- u64 macsec_tx_pkts_drop;
- u64 macsec_tx_bytes_drop;
-};
-
void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv);
int mlx5e_macsec_init(struct mlx5e_priv *priv);
void mlx5e_macsec_cleanup(struct mlx5e_priv *priv);
@@ -52,9 +36,6 @@ static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe)
void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
struct mlx5_cqe64 *cqe);
-bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev);
-void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats);
-struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec);
#else
@@ -67,7 +48,6 @@ static inline void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5_cqe64 *cqe)
{}
-static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) { return false; }
-#endif /* CONFIG_MLX5_EN_MACSEC */
+#endif /* CONFIG_MLX5_MACSEC */
#endif /* __MLX5_ACCEL_EN_MACSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
deleted file mode 100644
index 414e28584881..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
+++ /dev/null
@@ -1,1394 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
-
-#include <net/macsec.h>
-#include <linux/netdevice.h>
-#include <linux/mlx5/qp.h>
-#include <linux/if_vlan.h>
-#include "fs_core.h"
-#include "en/fs.h"
-#include "en_accel/macsec_fs.h"
-#include "mlx5_core.h"
-
-/* MACsec TX flow steering */
-#define CRYPTO_NUM_MAXSEC_FTE BIT(15)
-#define CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE 1
-
-#define TX_CRYPTO_TABLE_LEVEL 0
-#define TX_CRYPTO_TABLE_NUM_GROUPS 3
-#define TX_CRYPTO_TABLE_MKE_GROUP_SIZE 1
-#define TX_CRYPTO_TABLE_SA_GROUP_SIZE \
- (CRYPTO_NUM_MAXSEC_FTE - (TX_CRYPTO_TABLE_MKE_GROUP_SIZE + \
- CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE))
-#define TX_CHECK_TABLE_LEVEL 1
-#define TX_CHECK_TABLE_NUM_FTE 2
-#define RX_CRYPTO_TABLE_LEVEL 0
-#define RX_CHECK_TABLE_LEVEL 1
-#define RX_CHECK_TABLE_NUM_FTE 3
-#define RX_CRYPTO_TABLE_NUM_GROUPS 3
-#define RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE \
- ((CRYPTO_NUM_MAXSEC_FTE - CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE) / 2)
-#define RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE \
- (CRYPTO_NUM_MAXSEC_FTE - RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE)
-#define RX_NUM_OF_RULES_PER_SA 2
-
-#define MLX5_MACSEC_TAG_LEN 8 /* SecTAG length with ethertype and without the optional SCI */
-#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK 0x23
-#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET 0x8
-#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET 0x5
-#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT (0x1 << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET)
-#define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8
-#define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN)
-
-/* MACsec RX flow steering */
-#define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E
-
-struct mlx5_sectag_header {
- __be16 ethertype;
- u8 tci_an;
- u8 sl;
- u32 pn;
- u8 sci[MACSEC_SCI_LEN]; /* optional */
-} __packed;
-
-struct mlx5e_macsec_tx_rule {
- struct mlx5_flow_handle *rule;
- struct mlx5_pkt_reformat *pkt_reformat;
- u32 fs_id;
-};
-
-struct mlx5e_macsec_tables {
- struct mlx5e_flow_table ft_crypto;
- struct mlx5_flow_handle *crypto_miss_rule;
-
- struct mlx5_flow_table *ft_check;
- struct mlx5_flow_group *ft_check_group;
- struct mlx5_fc *check_miss_rule_counter;
- struct mlx5_flow_handle *check_miss_rule;
- struct mlx5_fc *check_rule_counter;
-
- u32 refcnt;
-};
-
-struct mlx5e_macsec_tx {
- struct mlx5_flow_handle *crypto_mke_rule;
- struct mlx5_flow_handle *check_rule;
-
- struct ida tx_halloc;
-
- struct mlx5e_macsec_tables tables;
-};
-
-struct mlx5e_macsec_rx_rule {
- struct mlx5_flow_handle *rule[RX_NUM_OF_RULES_PER_SA];
- struct mlx5_modify_hdr *meta_modhdr;
-};
-
-struct mlx5e_macsec_rx {
- struct mlx5_flow_handle *check_rule[2];
- struct mlx5_pkt_reformat *check_rule_pkt_reformat[2];
-
- struct mlx5e_macsec_tables tables;
-};
-
-union mlx5e_macsec_rule {
- struct mlx5e_macsec_tx_rule tx_rule;
- struct mlx5e_macsec_rx_rule rx_rule;
-};
-
-struct mlx5e_macsec_fs {
- struct mlx5_core_dev *mdev;
- struct net_device *netdev;
- struct mlx5e_macsec_tx *tx_fs;
- struct mlx5e_macsec_rx *rx_fs;
-};
-
-static void macsec_fs_tx_destroy(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
- struct mlx5e_macsec_tables *tx_tables;
-
- tx_tables = &tx_fs->tables;
-
- /* Tx check table */
- if (tx_fs->check_rule) {
- mlx5_del_flow_rules(tx_fs->check_rule);
- tx_fs->check_rule = NULL;
- }
-
- if (tx_tables->check_miss_rule) {
- mlx5_del_flow_rules(tx_tables->check_miss_rule);
- tx_tables->check_miss_rule = NULL;
- }
-
- if (tx_tables->ft_check_group) {
- mlx5_destroy_flow_group(tx_tables->ft_check_group);
- tx_tables->ft_check_group = NULL;
- }
-
- if (tx_tables->ft_check) {
- mlx5_destroy_flow_table(tx_tables->ft_check);
- tx_tables->ft_check = NULL;
- }
-
- /* Tx crypto table */
- if (tx_fs->crypto_mke_rule) {
- mlx5_del_flow_rules(tx_fs->crypto_mke_rule);
- tx_fs->crypto_mke_rule = NULL;
- }
-
- if (tx_tables->crypto_miss_rule) {
- mlx5_del_flow_rules(tx_tables->crypto_miss_rule);
- tx_tables->crypto_miss_rule = NULL;
- }
-
- mlx5e_destroy_flow_table(&tx_tables->ft_crypto);
-}
-
-static int macsec_fs_tx_create_crypto_table_groups(struct mlx5e_flow_table *ft)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
- int ix = 0;
- u32 *in;
- int err;
- u8 *mc;
-
- ft->g = kcalloc(TX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
- if (!ft->g)
- return -ENOMEM;
- in = kvzalloc(inlen, GFP_KERNEL);
-
- if (!in) {
- kfree(ft->g);
- ft->g = NULL;
- return -ENOMEM;
- }
-
- mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
-
- /* Flow Group for MKE match */
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += TX_CRYPTO_TABLE_MKE_GROUP_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- /* Flow Group for SA rules */
- memset(in, 0, inlen);
- memset(mc, 0, mclen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
- MLX5_SET(fte_match_param, mc, misc_parameters_2.metadata_reg_a,
- MLX5_ETH_WQE_FT_META_MACSEC_MASK);
-
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += TX_CRYPTO_TABLE_SA_GROUP_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- /* Flow Group for l2 traps */
- memset(in, 0, inlen);
- memset(mc, 0, mclen);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- kvfree(in);
- return 0;
-
-err:
- err = PTR_ERR(ft->g[ft->num_groups]);
- ft->g[ft->num_groups] = NULL;
- kvfree(in);
-
- return err;
-}
-
-static struct mlx5_flow_table
- *macsec_fs_auto_group_table_create(struct mlx5_flow_namespace *ns, int flags,
- int level, int max_fte)
-{
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5_flow_table *fdb = NULL;
-
- /* reserve entry for the match all miss group and rule */
- ft_attr.autogroup.num_reserved_entries = 1;
- ft_attr.autogroup.max_num_groups = 1;
- ft_attr.prio = 0;
- ft_attr.flags = flags;
- ft_attr.level = level;
- ft_attr.max_fte = max_fte;
-
- fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
-
- return fdb;
-}
-
-static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
- struct net_device *netdev = macsec_fs->netdev;
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5_flow_destination dest = {};
- struct mlx5e_macsec_tables *tx_tables;
- struct mlx5_flow_act flow_act = {};
- struct mlx5e_flow_table *ft_crypto;
- struct mlx5_flow_table *flow_table;
- struct mlx5_flow_group *flow_group;
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_spec *spec;
- u32 *flow_group_in;
- int err;
-
- ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
- if (!ns)
- return -ENOMEM;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
-
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!flow_group_in) {
- err = -ENOMEM;
- goto out_spec;
- }
-
- tx_tables = &tx_fs->tables;
- ft_crypto = &tx_tables->ft_crypto;
-
- /* Tx crypto table */
- ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft_attr.level = TX_CRYPTO_TABLE_LEVEL;
- ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
-
- flow_table = mlx5_create_flow_table(ns, &ft_attr);
- if (IS_ERR(flow_table)) {
- err = PTR_ERR(flow_table);
- netdev_err(netdev, "Failed to create MACsec Tx crypto table err(%d)\n", err);
- goto out_flow_group;
- }
- ft_crypto->t = flow_table;
-
- /* Tx crypto table groups */
- err = macsec_fs_tx_create_crypto_table_groups(ft_crypto);
- if (err) {
- netdev_err(netdev,
- "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
- err);
- goto err;
- }
-
- /* Tx crypto table MKE rule - MKE packets shouldn't be offloaded */
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_PAE);
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
-
- rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, NULL, 0);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev, "Failed to add MACsec TX MKE rule, err=%d\n", err);
- goto err;
- }
- tx_fs->crypto_mke_rule = rule;
-
- /* Tx crypto table Default miss rule */
- memset(&flow_act, 0, sizeof(flow_act));
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
- rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev, "Failed to add MACsec Tx table default miss rule %d\n", err);
- goto err;
- }
- tx_tables->crypto_miss_rule = rule;
-
- /* Tx check table */
- flow_table = macsec_fs_auto_group_table_create(ns, 0, TX_CHECK_TABLE_LEVEL,
- TX_CHECK_TABLE_NUM_FTE);
- if (IS_ERR(flow_table)) {
- err = PTR_ERR(flow_table);
- netdev_err(netdev, "fail to create MACsec TX check table, err(%d)\n", err);
- goto err;
- }
- tx_tables->ft_check = flow_table;
-
- /* Tx check table Default miss group/rule */
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
- flow_group = mlx5_create_flow_group(tx_tables->ft_check, flow_group_in);
- if (IS_ERR(flow_group)) {
- err = PTR_ERR(flow_group);
- netdev_err(netdev,
- "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
- err);
- goto err;
- }
- tx_tables->ft_check_group = flow_group;
-
- /* Tx check table default drop rule */
- memset(&dest, 0, sizeof(struct mlx5_flow_destination));
- memset(&flow_act, 0, sizeof(flow_act));
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter);
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
- rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev, "Failed to added MACsec tx check drop rule, err(%d)\n", err);
- goto err;
- }
- tx_tables->check_miss_rule = rule;
-
- /* Tx check table rule */
- memset(spec, 0, sizeof(struct mlx5_flow_spec));
- memset(&dest, 0, sizeof(struct mlx5_flow_destination));
- memset(&flow_act, 0, sizeof(flow_act));
-
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
-
- flow_act.flags = FLOW_ACT_NO_APPEND;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter);
- rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev, "Failed to add MACsec check rule, err=%d\n", err);
- goto err;
- }
- tx_fs->check_rule = rule;
-
- goto out_flow_group;
-
-err:
- macsec_fs_tx_destroy(macsec_fs);
-out_flow_group:
- kvfree(flow_group_in);
-out_spec:
- kvfree(spec);
- return err;
-}
-
-static int macsec_fs_tx_ft_get(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
- struct mlx5e_macsec_tables *tx_tables;
- int err = 0;
-
- tx_tables = &tx_fs->tables;
- if (tx_tables->refcnt)
- goto out;
-
- err = macsec_fs_tx_create(macsec_fs);
- if (err)
- return err;
-
-out:
- tx_tables->refcnt++;
- return err;
-}
-
-static void macsec_fs_tx_ft_put(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
-
- if (--tx_tables->refcnt)
- return;
-
- macsec_fs_tx_destroy(macsec_fs);
-}
-
-static int macsec_fs_tx_setup_fte(struct mlx5e_macsec_fs *macsec_fs,
- struct mlx5_flow_spec *spec,
- struct mlx5_flow_act *flow_act,
- u32 macsec_obj_id,
- u32 *fs_id)
-{
- struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
- int err = 0;
- u32 id;
-
- err = ida_alloc_range(&tx_fs->tx_halloc, 1,
- MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES,
- GFP_KERNEL);
- if (err < 0)
- return err;
-
- id = err;
- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
-
- /* Metadata match */
- MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
- MLX5_ETH_WQE_FT_META_MACSEC_MASK);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
- MLX5_ETH_WQE_FT_META_MACSEC | id << 2);
-
- *fs_id = id;
- flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
- flow_act->crypto.obj_id = macsec_obj_id;
-
- mlx5_core_dbg(macsec_fs->mdev, "Tx fte: macsec obj_id %u, fs_id %u\n", macsec_obj_id, id);
- return 0;
-}
-
-static void macsec_fs_tx_create_sectag_header(const struct macsec_context *ctx,
- char *reformatbf,
- size_t *reformat_size)
-{
- const struct macsec_secy *secy = ctx->secy;
- bool sci_present = macsec_send_sci(secy);
- struct mlx5_sectag_header sectag = {};
- const struct macsec_tx_sc *tx_sc;
-
- tx_sc = &secy->tx_sc;
- sectag.ethertype = htons(ETH_P_MACSEC);
-
- if (sci_present) {
- sectag.tci_an |= MACSEC_TCI_SC;
- memcpy(&sectag.sci, &secy->sci,
- sizeof(sectag.sci));
- } else {
- if (tx_sc->end_station)
- sectag.tci_an |= MACSEC_TCI_ES;
- if (tx_sc->scb)
- sectag.tci_an |= MACSEC_TCI_SCB;
- }
-
- /* With GCM, C/E clear for !encrypt, both set for encrypt */
- if (tx_sc->encrypt)
- sectag.tci_an |= MACSEC_TCI_CONFID;
- else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
- sectag.tci_an |= MACSEC_TCI_C;
-
- sectag.tci_an |= tx_sc->encoding_sa;
-
- *reformat_size = MLX5_MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
-
- memcpy(reformatbf, &sectag, *reformat_size);
-}
-
-static void macsec_fs_tx_del_rule(struct mlx5e_macsec_fs *macsec_fs,
- struct mlx5e_macsec_tx_rule *tx_rule)
-{
- if (tx_rule->rule) {
- mlx5_del_flow_rules(tx_rule->rule);
- tx_rule->rule = NULL;
- }
-
- if (tx_rule->pkt_reformat) {
- mlx5_packet_reformat_dealloc(macsec_fs->mdev, tx_rule->pkt_reformat);
- tx_rule->pkt_reformat = NULL;
- }
-
- if (tx_rule->fs_id) {
- ida_free(&macsec_fs->tx_fs->tx_halloc, tx_rule->fs_id);
- tx_rule->fs_id = 0;
- }
-
- kfree(tx_rule);
-
- macsec_fs_tx_ft_put(macsec_fs);
-}
-
-#define MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES 1
-
-static union mlx5e_macsec_rule *
-macsec_fs_tx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
- const struct macsec_context *macsec_ctx,
- struct mlx5_macsec_rule_attrs *attrs,
- u32 *sa_fs_id)
-{
- char reformatbf[MLX5_MACSEC_TAG_LEN + MACSEC_SCI_LEN];
- struct mlx5_pkt_reformat_params reformat_params = {};
- struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
- struct net_device *netdev = macsec_fs->netdev;
- union mlx5e_macsec_rule *macsec_rule = NULL;
- struct mlx5_flow_destination dest = {};
- struct mlx5e_macsec_tables *tx_tables;
- struct mlx5e_macsec_tx_rule *tx_rule;
- struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_spec *spec;
- size_t reformat_size;
- int err = 0;
- u32 fs_id;
-
- tx_tables = &tx_fs->tables;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return NULL;
-
- err = macsec_fs_tx_ft_get(macsec_fs);
- if (err)
- goto out_spec;
-
- macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
- if (!macsec_rule) {
- macsec_fs_tx_ft_put(macsec_fs);
- goto out_spec;
- }
-
- tx_rule = &macsec_rule->tx_rule;
-
- /* Tx crypto table crypto rule */
- macsec_fs_tx_create_sectag_header(macsec_ctx, reformatbf, &reformat_size);
-
- reformat_params.type = MLX5_REFORMAT_TYPE_ADD_MACSEC;
- reformat_params.size = reformat_size;
- reformat_params.data = reformatbf;
-
- if (is_vlan_dev(macsec_ctx->netdev))
- reformat_params.param_0 = MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES;
-
- flow_act.pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev,
- &reformat_params,
- MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
- if (IS_ERR(flow_act.pkt_reformat)) {
- err = PTR_ERR(flow_act.pkt_reformat);
- netdev_err(netdev, "Failed to allocate MACsec Tx reformat context err=%d\n", err);
- goto err;
- }
- tx_rule->pkt_reformat = flow_act.pkt_reformat;
-
- err = macsec_fs_tx_setup_fte(macsec_fs, spec, &flow_act, attrs->macsec_obj_id, &fs_id);
- if (err) {
- netdev_err(netdev,
- "Failed to add packet reformat for MACsec TX crypto rule, err=%d\n",
- err);
- goto err;
- }
-
- tx_rule->fs_id = fs_id;
- *sa_fs_id = fs_id;
-
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
- MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = tx_tables->ft_check;
- rule = mlx5_add_flow_rules(tx_tables->ft_crypto.t, spec, &flow_act, &dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev, "Failed to add MACsec TX crypto rule, err=%d\n", err);
- goto err;
- }
- tx_rule->rule = rule;
-
- goto out_spec;
-
-err:
- macsec_fs_tx_del_rule(macsec_fs, tx_rule);
- macsec_rule = NULL;
-out_spec:
- kvfree(spec);
-
- return macsec_rule;
-}
-
-static void macsec_fs_tx_cleanup(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
- struct mlx5_core_dev *mdev = macsec_fs->mdev;
- struct mlx5e_macsec_tables *tx_tables;
-
- if (!tx_fs)
- return;
-
- tx_tables = &tx_fs->tables;
- if (tx_tables->refcnt) {
- netdev_err(macsec_fs->netdev,
- "Can't destroy MACsec offload tx_fs, refcnt(%u) isn't 0\n",
- tx_tables->refcnt);
- return;
- }
-
- ida_destroy(&tx_fs->tx_halloc);
-
- if (tx_tables->check_miss_rule_counter) {
- mlx5_fc_destroy(mdev, tx_tables->check_miss_rule_counter);
- tx_tables->check_miss_rule_counter = NULL;
- }
-
- if (tx_tables->check_rule_counter) {
- mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
- tx_tables->check_rule_counter = NULL;
- }
-
- kfree(tx_fs);
- macsec_fs->tx_fs = NULL;
-}
-
-static int macsec_fs_tx_init(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct net_device *netdev = macsec_fs->netdev;
- struct mlx5_core_dev *mdev = macsec_fs->mdev;
- struct mlx5e_macsec_tables *tx_tables;
- struct mlx5e_macsec_tx *tx_fs;
- struct mlx5_fc *flow_counter;
- int err;
-
- tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL);
- if (!tx_fs)
- return -ENOMEM;
-
- tx_tables = &tx_fs->tables;
-
- flow_counter = mlx5_fc_create(mdev, false);
- if (IS_ERR(flow_counter)) {
- err = PTR_ERR(flow_counter);
- netdev_err(netdev,
- "Failed to create MACsec Tx encrypt flow counter, err(%d)\n",
- err);
- goto err_encrypt_counter;
- }
- tx_tables->check_rule_counter = flow_counter;
-
- flow_counter = mlx5_fc_create(mdev, false);
- if (IS_ERR(flow_counter)) {
- err = PTR_ERR(flow_counter);
- netdev_err(netdev,
- "Failed to create MACsec Tx drop flow counter, err(%d)\n",
- err);
- goto err_drop_counter;
- }
- tx_tables->check_miss_rule_counter = flow_counter;
-
- ida_init(&tx_fs->tx_halloc);
-
- macsec_fs->tx_fs = tx_fs;
-
- return 0;
-
-err_drop_counter:
- mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
- tx_tables->check_rule_counter = NULL;
-
-err_encrypt_counter:
- kfree(tx_fs);
- macsec_fs->tx_fs = NULL;
-
- return err;
-}
-
-static void macsec_fs_rx_destroy(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
- struct mlx5e_macsec_tables *rx_tables;
- int i;
-
- /* Rx check table */
- for (i = 1; i >= 0; --i) {
- if (rx_fs->check_rule[i]) {
- mlx5_del_flow_rules(rx_fs->check_rule[i]);
- rx_fs->check_rule[i] = NULL;
- }
-
- if (rx_fs->check_rule_pkt_reformat[i]) {
- mlx5_packet_reformat_dealloc(macsec_fs->mdev,
- rx_fs->check_rule_pkt_reformat[i]);
- rx_fs->check_rule_pkt_reformat[i] = NULL;
- }
- }
-
- rx_tables = &rx_fs->tables;
-
- if (rx_tables->check_miss_rule) {
- mlx5_del_flow_rules(rx_tables->check_miss_rule);
- rx_tables->check_miss_rule = NULL;
- }
-
- if (rx_tables->ft_check_group) {
- mlx5_destroy_flow_group(rx_tables->ft_check_group);
- rx_tables->ft_check_group = NULL;
- }
-
- if (rx_tables->ft_check) {
- mlx5_destroy_flow_table(rx_tables->ft_check);
- rx_tables->ft_check = NULL;
- }
-
- /* Rx crypto table */
- if (rx_tables->crypto_miss_rule) {
- mlx5_del_flow_rules(rx_tables->crypto_miss_rule);
- rx_tables->crypto_miss_rule = NULL;
- }
-
- mlx5e_destroy_flow_table(&rx_tables->ft_crypto);
-}
-
-static int macsec_fs_rx_create_crypto_table_groups(struct mlx5e_flow_table *ft)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
- int ix = 0;
- u32 *in;
- int err;
- u8 *mc;
-
- ft->g = kcalloc(RX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
- if (!ft->g)
- return -ENOMEM;
-
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in) {
- kfree(ft->g);
- return -ENOMEM;
- }
-
- mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
-
- /* Flow group for SA rule with SCI */
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
- MLX5_MATCH_MISC_PARAMETERS_5);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-
- MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
- MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK <<
- MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
- MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_2);
- MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_3);
-
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- /* Flow group for SA rule without SCI */
- memset(in, 0, inlen);
- memset(mc, 0, mclen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
- MLX5_MATCH_MISC_PARAMETERS_5);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_47_16);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_15_0);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-
- MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
- MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
-
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- /* Flow Group for l2 traps */
- memset(in, 0, inlen);
- memset(mc, 0, mclen);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
- ft->num_groups++;
-
- kvfree(in);
- return 0;
-
-err:
- err = PTR_ERR(ft->g[ft->num_groups]);
- ft->g[ft->num_groups] = NULL;
- kvfree(in);
-
- return err;
-}
-
-static int macsec_fs_rx_create_check_decap_rule(struct mlx5e_macsec_fs *macsec_fs,
- struct mlx5_flow_destination *dest,
- struct mlx5_flow_act *flow_act,
- struct mlx5_flow_spec *spec,
- int reformat_param_size)
-{
- int rule_index = (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) ? 0 : 1;
- u8 mlx5_reformat_buf[MLX5_SECTAG_HEADER_SIZE_WITH_SCI];
- struct mlx5_pkt_reformat_params reformat_params = {};
- struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
- struct net_device *netdev = macsec_fs->netdev;
- struct mlx5e_macsec_tables *rx_tables;
- struct mlx5_flow_handle *rule;
- int err = 0;
-
- rx_tables = &rx_fs->tables;
-
- /* Rx check table decap 16B rule */
- memset(dest, 0, sizeof(*dest));
- memset(flow_act, 0, sizeof(*flow_act));
- memset(spec, 0, sizeof(*spec));
-
- reformat_params.type = MLX5_REFORMAT_TYPE_DEL_MACSEC;
- reformat_params.size = reformat_param_size;
- reformat_params.data = mlx5_reformat_buf;
- flow_act->pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev,
- &reformat_params,
- MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
- if (IS_ERR(flow_act->pkt_reformat)) {
- err = PTR_ERR(flow_act->pkt_reformat);
- netdev_err(netdev, "Failed to allocate MACsec Rx reformat context err=%d\n", err);
- return err;
- }
- rx_fs->check_rule_pkt_reformat[rule_index] = flow_act->pkt_reformat;
-
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
- /* MACsec syndrome match */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.macsec_syndrome);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.macsec_syndrome, 0);
- /* ASO return reg syndrome match */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
-
- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
- /* Sectag TCI SC present bit*/
- MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
- MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
-
- if (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI)
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
- MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT <<
- MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
-
- flow_act->flags = FLOW_ACT_NO_APPEND;
- flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
- MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- dest->type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest->counter_id = mlx5_fc_id(rx_tables->check_rule_counter);
- rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev, "Failed to add MACsec Rx check rule, err=%d\n", err);
- return err;
- }
-
- rx_fs->check_rule[rule_index] = rule;
-
- return 0;
-}
-
-static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
- struct net_device *netdev = macsec_fs->netdev;
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5_flow_destination dest = {};
- struct mlx5e_macsec_tables *rx_tables;
- struct mlx5e_flow_table *ft_crypto;
- struct mlx5_flow_table *flow_table;
- struct mlx5_flow_group *flow_group;
- struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_spec *spec;
- u32 *flow_group_in;
- int err;
-
- ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
- if (!ns)
- return -ENOMEM;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return -ENOMEM;
-
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!flow_group_in) {
- err = -ENOMEM;
- goto free_spec;
- }
-
- rx_tables = &rx_fs->tables;
- ft_crypto = &rx_tables->ft_crypto;
-
- /* Rx crypto table */
- ft_attr.level = RX_CRYPTO_TABLE_LEVEL;
- ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
-
- flow_table = mlx5_create_flow_table(ns, &ft_attr);
- if (IS_ERR(flow_table)) {
- err = PTR_ERR(flow_table);
- netdev_err(netdev, "Failed to create MACsec Rx crypto table err(%d)\n", err);
- goto out_flow_group;
- }
- ft_crypto->t = flow_table;
-
- /* Rx crypto table groups */
- err = macsec_fs_rx_create_crypto_table_groups(ft_crypto);
- if (err) {
- netdev_err(netdev,
- "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
- err);
- goto err;
- }
-
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
- rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev,
- "Failed to add MACsec Rx crypto table default miss rule %d\n",
- err);
- goto err;
- }
- rx_tables->crypto_miss_rule = rule;
-
- /* Rx check table */
- flow_table = macsec_fs_auto_group_table_create(ns,
- MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT,
- RX_CHECK_TABLE_LEVEL,
- RX_CHECK_TABLE_NUM_FTE);
- if (IS_ERR(flow_table)) {
- err = PTR_ERR(flow_table);
- netdev_err(netdev, "fail to create MACsec RX check table, err(%d)\n", err);
- goto err;
- }
- rx_tables->ft_check = flow_table;
-
- /* Rx check table Default miss group/rule */
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
- flow_group = mlx5_create_flow_group(rx_tables->ft_check, flow_group_in);
- if (IS_ERR(flow_group)) {
- err = PTR_ERR(flow_group);
- netdev_err(netdev,
- "Failed to create default flow group for MACsec Rx check table err(%d)\n",
- err);
- goto err;
- }
- rx_tables->ft_check_group = flow_group;
-
- /* Rx check table default drop rule */
- memset(&flow_act, 0, sizeof(flow_act));
-
- dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter);
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
- rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev, "Failed to added MACsec Rx check drop rule, err(%d)\n", err);
- goto err;
- }
- rx_tables->check_miss_rule = rule;
-
- /* Rx check table decap rules */
- err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
- MLX5_SECTAG_HEADER_SIZE_WITH_SCI);
- if (err)
- goto err;
-
- err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
- MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI);
- if (err)
- goto err;
-
- goto out_flow_group;
-
-err:
- macsec_fs_rx_destroy(macsec_fs);
-out_flow_group:
- kvfree(flow_group_in);
-free_spec:
- kvfree(spec);
- return err;
-}
-
-static int macsec_fs_rx_ft_get(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
- int err = 0;
-
- if (rx_tables->refcnt)
- goto out;
-
- err = macsec_fs_rx_create(macsec_fs);
- if (err)
- return err;
-
-out:
- rx_tables->refcnt++;
- return err;
-}
-
-static void macsec_fs_rx_ft_put(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
-
- if (--rx_tables->refcnt)
- return;
-
- macsec_fs_rx_destroy(macsec_fs);
-}
-
-static void macsec_fs_rx_del_rule(struct mlx5e_macsec_fs *macsec_fs,
- struct mlx5e_macsec_rx_rule *rx_rule)
-{
- int i;
-
- for (i = 0; i < RX_NUM_OF_RULES_PER_SA; ++i) {
- if (rx_rule->rule[i]) {
- mlx5_del_flow_rules(rx_rule->rule[i]);
- rx_rule->rule[i] = NULL;
- }
- }
-
- if (rx_rule->meta_modhdr) {
- mlx5_modify_header_dealloc(macsec_fs->mdev, rx_rule->meta_modhdr);
- rx_rule->meta_modhdr = NULL;
- }
-
- kfree(rx_rule);
-
- macsec_fs_rx_ft_put(macsec_fs);
-}
-
-static void macsec_fs_rx_setup_fte(struct mlx5_flow_spec *spec,
- struct mlx5_flow_act *flow_act,
- struct mlx5_macsec_rule_attrs *attrs,
- bool sci_present)
-{
- u8 tci_an = (sci_present << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) | attrs->assoc_num;
- struct mlx5_flow_act_crypto_params *crypto_params = &flow_act->crypto;
- __be32 *sci_p = (__be32 *)(&attrs->sci);
-
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-
- /* MACsec ethertype */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_MACSEC);
-
- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
-
- /* Sectag AN + TCI SC present bit*/
- MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
- MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
- tci_an << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
-
- if (sci_present) {
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- misc_parameters_5.macsec_tag_2);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_2,
- be32_to_cpu(sci_p[0]));
-
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- misc_parameters_5.macsec_tag_3);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_3,
- be32_to_cpu(sci_p[1]));
- } else {
- /* When SCI isn't present in the Sectag, need to match the source */
- /* MAC address only if the SCI contains the default MACsec PORT */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
- memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.smac_47_16),
- sci_p, ETH_ALEN);
- }
-
- crypto_params->type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
- crypto_params->obj_id = attrs->macsec_obj_id;
-}
-
-static union mlx5e_macsec_rule *
-macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
- struct mlx5_macsec_rule_attrs *attrs,
- u32 fs_id)
-{
- u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
- struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
- struct net_device *netdev = macsec_fs->netdev;
- union mlx5e_macsec_rule *macsec_rule = NULL;
- struct mlx5_modify_hdr *modify_hdr = NULL;
- struct mlx5_flow_destination dest = {};
- struct mlx5e_macsec_tables *rx_tables;
- struct mlx5e_macsec_rx_rule *rx_rule;
- struct mlx5_flow_act flow_act = {};
- struct mlx5e_flow_table *ft_crypto;
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_spec *spec;
- int err = 0;
-
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- return NULL;
-
- err = macsec_fs_rx_ft_get(macsec_fs);
- if (err)
- goto out_spec;
-
- macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
- if (!macsec_rule) {
- macsec_fs_rx_ft_put(macsec_fs);
- goto out_spec;
- }
-
- rx_rule = &macsec_rule->rx_rule;
- rx_tables = &rx_fs->tables;
- ft_crypto = &rx_tables->ft_crypto;
-
- /* Set bit[31 - 30] macsec marker - 0x01 */
- /* Set bit[15-0] fs id */
- MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
- MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
- MLX5_SET(set_action_in, action, data, MLX5_MACSEC_RX_METADAT_HANDLE(fs_id) | BIT(30));
- MLX5_SET(set_action_in, action, offset, 0);
- MLX5_SET(set_action_in, action, length, 32);
-
- modify_hdr = mlx5_modify_header_alloc(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
- 1, action);
- if (IS_ERR(modify_hdr)) {
- err = PTR_ERR(modify_hdr);
- netdev_err(netdev, "fail to alloc MACsec set modify_header_id err=%d\n", err);
- modify_hdr = NULL;
- goto err;
- }
- rx_rule->meta_modhdr = modify_hdr;
-
- /* Rx crypto table with SCI rule */
- macsec_fs_rx_setup_fte(spec, &flow_act, attrs, true);
-
- flow_act.modify_hdr = modify_hdr;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
- MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
-
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = rx_tables->ft_check;
- rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev,
- "Failed to add SA with SCI rule to Rx crypto rule, err=%d\n",
- err);
- goto err;
- }
- rx_rule->rule[0] = rule;
-
- /* Rx crypto table without SCI rule */
- if ((cpu_to_be64((__force u64)attrs->sci) & 0xFFFF) == ntohs(MACSEC_PORT_ES)) {
- memset(spec, 0, sizeof(struct mlx5_flow_spec));
- memset(&dest, 0, sizeof(struct mlx5_flow_destination));
- memset(&flow_act, 0, sizeof(flow_act));
-
- macsec_fs_rx_setup_fte(spec, &flow_act, attrs, false);
-
- flow_act.modify_hdr = modify_hdr;
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
- MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
-
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = rx_tables->ft_check;
- rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- netdev_err(netdev,
- "Failed to add SA without SCI rule to Rx crypto rule, err=%d\n",
- err);
- goto err;
- }
- rx_rule->rule[1] = rule;
- }
-
- kvfree(spec);
- return macsec_rule;
-
-err:
- macsec_fs_rx_del_rule(macsec_fs, rx_rule);
- macsec_rule = NULL;
-out_spec:
- kvfree(spec);
- return macsec_rule;
-}
-
-static int macsec_fs_rx_init(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct net_device *netdev = macsec_fs->netdev;
- struct mlx5_core_dev *mdev = macsec_fs->mdev;
- struct mlx5e_macsec_tables *rx_tables;
- struct mlx5e_macsec_rx *rx_fs;
- struct mlx5_fc *flow_counter;
- int err;
-
- rx_fs = kzalloc(sizeof(*rx_fs), GFP_KERNEL);
- if (!rx_fs)
- return -ENOMEM;
-
- flow_counter = mlx5_fc_create(mdev, false);
- if (IS_ERR(flow_counter)) {
- err = PTR_ERR(flow_counter);
- netdev_err(netdev,
- "Failed to create MACsec Rx encrypt flow counter, err(%d)\n",
- err);
- goto err_encrypt_counter;
- }
-
- rx_tables = &rx_fs->tables;
- rx_tables->check_rule_counter = flow_counter;
-
- flow_counter = mlx5_fc_create(mdev, false);
- if (IS_ERR(flow_counter)) {
- err = PTR_ERR(flow_counter);
- netdev_err(netdev,
- "Failed to create MACsec Rx drop flow counter, err(%d)\n",
- err);
- goto err_drop_counter;
- }
- rx_tables->check_miss_rule_counter = flow_counter;
-
- macsec_fs->rx_fs = rx_fs;
-
- return 0;
-
-err_drop_counter:
- mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
- rx_tables->check_rule_counter = NULL;
-
-err_encrypt_counter:
- kfree(rx_fs);
- macsec_fs->rx_fs = NULL;
-
- return err;
-}
-
-static void macsec_fs_rx_cleanup(struct mlx5e_macsec_fs *macsec_fs)
-{
- struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
- struct mlx5_core_dev *mdev = macsec_fs->mdev;
- struct mlx5e_macsec_tables *rx_tables;
-
- if (!rx_fs)
- return;
-
- rx_tables = &rx_fs->tables;
-
- if (rx_tables->refcnt) {
- netdev_err(macsec_fs->netdev,
- "Can't destroy MACsec offload rx_fs, refcnt(%u) isn't 0\n",
- rx_tables->refcnt);
- return;
- }
-
- if (rx_tables->check_miss_rule_counter) {
- mlx5_fc_destroy(mdev, rx_tables->check_miss_rule_counter);
- rx_tables->check_miss_rule_counter = NULL;
- }
-
- if (rx_tables->check_rule_counter) {
- mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
- rx_tables->check_rule_counter = NULL;
- }
-
- kfree(rx_fs);
- macsec_fs->rx_fs = NULL;
-}
-
-void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats)
-{
- struct mlx5e_macsec_stats *stats = (struct mlx5e_macsec_stats *)macsec_stats;
- struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
- struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
- struct mlx5_core_dev *mdev = macsec_fs->mdev;
-
- if (tx_tables->check_rule_counter)
- mlx5_fc_query(mdev, tx_tables->check_rule_counter,
- &stats->macsec_tx_pkts, &stats->macsec_tx_bytes);
-
- if (tx_tables->check_miss_rule_counter)
- mlx5_fc_query(mdev, tx_tables->check_miss_rule_counter,
- &stats->macsec_tx_pkts_drop, &stats->macsec_tx_bytes_drop);
-
- if (rx_tables->check_rule_counter)
- mlx5_fc_query(mdev, rx_tables->check_rule_counter,
- &stats->macsec_rx_pkts, &stats->macsec_rx_bytes);
-
- if (rx_tables->check_miss_rule_counter)
- mlx5_fc_query(mdev, rx_tables->check_miss_rule_counter,
- &stats->macsec_rx_pkts_drop, &stats->macsec_rx_bytes_drop);
-}
-
-union mlx5e_macsec_rule *
-mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs,
- const struct macsec_context *macsec_ctx,
- struct mlx5_macsec_rule_attrs *attrs,
- u32 *sa_fs_id)
-{
- return (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
- macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, sa_fs_id) :
- macsec_fs_rx_add_rule(macsec_fs, attrs, *sa_fs_id);
-}
-
-void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs,
- union mlx5e_macsec_rule *macsec_rule,
- int action)
-{
- (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
- macsec_fs_tx_del_rule(macsec_fs, &macsec_rule->tx_rule) :
- macsec_fs_rx_del_rule(macsec_fs, &macsec_rule->rx_rule);
-}
-
-void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs)
-{
- macsec_fs_rx_cleanup(macsec_fs);
- macsec_fs_tx_cleanup(macsec_fs);
- kfree(macsec_fs);
-}
-
-struct mlx5e_macsec_fs *
-mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev)
-{
- struct mlx5e_macsec_fs *macsec_fs;
- int err;
-
- macsec_fs = kzalloc(sizeof(*macsec_fs), GFP_KERNEL);
- if (!macsec_fs)
- return NULL;
-
- macsec_fs->mdev = mdev;
- macsec_fs->netdev = netdev;
-
- err = macsec_fs_tx_init(macsec_fs);
- if (err) {
- netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
- goto err;
- }
-
- err = macsec_fs_rx_init(macsec_fs);
- if (err) {
- netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
- goto tx_cleanup;
- }
-
- return macsec_fs;
-
-tx_cleanup:
- macsec_fs_tx_cleanup(macsec_fs);
-err:
- kfree(macsec_fs);
- return NULL;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h
deleted file mode 100644
index b429648d4ee7..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
-
-#ifndef __MLX5_MACSEC_STEERING_H__
-#define __MLX5_MACSEC_STEERING_H__
-
-#ifdef CONFIG_MLX5_EN_MACSEC
-
-#include "en_accel/macsec.h"
-
-#define MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES 16
-
-struct mlx5e_macsec_fs;
-union mlx5e_macsec_rule;
-
-struct mlx5_macsec_rule_attrs {
- sci_t sci;
- u32 macsec_obj_id;
- u8 assoc_num;
- int action;
-};
-
-enum mlx5_macsec_action {
- MLX5_ACCEL_MACSEC_ACTION_ENCRYPT,
- MLX5_ACCEL_MACSEC_ACTION_DECRYPT,
-};
-
-void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs);
-
-struct mlx5e_macsec_fs *
-mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev, struct net_device *netdev);
-
-union mlx5e_macsec_rule *
-mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs,
- const struct macsec_context *ctx,
- struct mlx5_macsec_rule_attrs *attrs,
- u32 *sa_fs_id);
-
-void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs,
- union mlx5e_macsec_rule *macsec_rule,
- int action);
-
-void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats);
-
-#endif
-
-#endif /* __MLX5_MACSEC_STEERING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
index e50a2e3f3d18..4559ee16a11a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
@@ -8,14 +8,14 @@
#include "en_accel/macsec.h"
static const struct counter_desc mlx5e_macsec_hw_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts) },
- { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts_drop) },
- { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes_drop) },
- { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts) },
- { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts_drop) },
- { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_rx_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_rx_pkts_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_rx_bytes_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_tx_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_tx_pkts_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5_macsec_stats, macsec_tx_bytes_drop) },
};
#define NUM_MACSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_macsec_hw_stats_desc)
@@ -52,6 +52,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(macsec_hw)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw)
{
+ struct mlx5_macsec_fs *macsec_fs;
int i;
if (!priv->macsec)
@@ -60,9 +61,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw)
if (!mlx5e_is_macsec_device(priv->mdev))
return idx;
- mlx5e_macsec_get_stats_fill(priv->macsec, mlx5e_macsec_get_stats(priv->macsec));
+ macsec_fs = priv->mdev->macsec_fs;
+ mlx5_macsec_fs_get_stats_fill(macsec_fs, mlx5_macsec_fs_get_stats(macsec_fs));
for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5e_macsec_get_stats(priv->macsec),
+ data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_macsec_fs_get_stats(macsec_fs),
mlx5e_macsec_hw_stats_desc,
i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 5aa51d74f8b4..bb7f86c993e5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -432,8 +432,10 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
}
spin_unlock_bh(&arfs->arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
- if (arfs_rule->rule)
+ if (arfs_rule->rule) {
mlx5_del_flow_rules(arfs_rule->rule);
+ priv->channel_stats[arfs_rule->rxq]->rq.arfs_expired++;
+ }
hlist_del(&arfs_rule->hlist);
kfree(arfs_rule);
}
@@ -509,6 +511,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
+ priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
err = -ENOMEM;
goto out;
}
@@ -519,6 +522,8 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
ntohs(tuple->etype));
arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
if (!arfs_table) {
+ WARN_ONCE(1, "arfs table does not exist for etype %u and ip_proto %u\n",
+ tuple->etype, tuple->ip_proto);
err = -EINVAL;
goto out;
}
@@ -600,9 +605,11 @@ static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dst.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, rxq);
err = mlx5_modify_rule_destination(rule, &dst, NULL);
- if (err)
+ if (err) {
+ priv->channel_stats[rxq]->rq.arfs_err++;
netdev_warn(priv->netdev,
"Failed to modify aRFS rule destination to rq=%d\n", rxq);
+ }
}
static void arfs_handle_work(struct work_struct *work)
@@ -632,6 +639,7 @@ static void arfs_handle_work(struct work_struct *work)
if (IS_ERR(rule))
goto out;
arfs_rule->rule = rule;
+ priv->channel_stats[arfs_rule->rxq]->rq.arfs_add++;
} else {
arfs_modify_rule_rq(priv, arfs_rule->rule,
arfs_rule->rxq);
@@ -650,8 +658,10 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
struct arfs_tuple *tuple;
rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
- if (!rule)
+ if (!rule) {
+ priv->channel_stats[rxq]->rq.arfs_err++;
return NULL;
+ }
rule->priv = priv;
rule->rxq = rxq;
@@ -740,10 +750,13 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
spin_lock_bh(&arfs->arfs_lock);
arfs_rule = arfs_find_rule(arfs_t, &fk);
if (arfs_rule) {
- if (arfs_rule->rxq == rxq_index) {
+ if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) {
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
+
+ priv->channel_stats[rxq_index]->rq.arfs_request_in++;
+ priv->channel_stats[arfs_rule->rxq]->rq.arfs_request_out++;
arfs_rule->rxq = rxq_index;
} else {
arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 27861b68ced5..dff02434ff45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -2061,7 +2061,8 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
struct mlx5e_params new_params;
int err;
- if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
+ if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn) ||
+ !MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
return -EOPNOTSUPP;
/* Don't allow changing the PTP state if HTB offload is active, because
@@ -2163,8 +2164,8 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
-int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- u32 *rule_locs)
+static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -2181,7 +2182,7 @@ int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}
-int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
struct mlx5e_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index aac32e505c14..3eccdadc0357 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -96,10 +96,6 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
case UDP_V4_FLOW:
case TCP_V6_FLOW:
case UDP_V6_FLOW:
- max_tuples = ETHTOOL_NUM_L3_L4_FTS;
- prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
- eth_ft = &ethtool->l3_l4_ft[prio];
- break;
case IP_USER_FLOW:
case IPV6_USER_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
@@ -900,10 +896,16 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
struct ethtool_rxnfc *nfc)
{
u8 rx_hash_field = 0;
+ u32 flow_type = 0;
+ u32 rss_idx = 0;
int err;
int tt;
- tt = flow_type_to_traffic_type(nfc->flow_type);
+ if (nfc->flow_type & FLOW_RSS)
+ rss_idx = nfc->rss_context;
+
+ flow_type = flow_type_mask(nfc->flow_type);
+ tt = flow_type_to_traffic_type(flow_type);
if (tt < 0)
return tt;
@@ -911,10 +913,10 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
* on src IP, dest IP, TCP/UDP src port and TCP/UDP dest
* port.
*/
- if (nfc->flow_type != TCP_V4_FLOW &&
- nfc->flow_type != TCP_V6_FLOW &&
- nfc->flow_type != UDP_V4_FLOW &&
- nfc->flow_type != UDP_V6_FLOW)
+ if (flow_type != TCP_V4_FLOW &&
+ flow_type != TCP_V6_FLOW &&
+ flow_type != UDP_V4_FLOW &&
+ flow_type != UDP_V6_FLOW)
return -EOPNOTSUPP;
if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
@@ -931,7 +933,7 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT;
mutex_lock(&priv->state_lock);
- err = mlx5e_rx_res_rss_set_hash_fields(priv->rx_res, tt, rx_hash_field);
+ err = mlx5e_rx_res_rss_set_hash_fields(priv->rx_res, rss_idx, tt, rx_hash_field);
mutex_unlock(&priv->state_lock);
return err;
@@ -940,14 +942,23 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
struct ethtool_rxnfc *nfc)
{
- u32 hash_field = 0;
+ int hash_field = 0;
+ u32 flow_type = 0;
+ u32 rss_idx = 0;
int tt;
- tt = flow_type_to_traffic_type(nfc->flow_type);
+ if (nfc->flow_type & FLOW_RSS)
+ rss_idx = nfc->rss_context;
+
+ flow_type = flow_type_mask(nfc->flow_type);
+ tt = flow_type_to_traffic_type(flow_type);
if (tt < 0)
return tt;
- hash_field = mlx5e_rx_res_rss_get_hash_fields(priv->rx_res, tt);
+ hash_field = mlx5e_rx_res_rss_get_hash_fields(priv->rx_res, rss_idx, tt);
+ if (hash_field < 0)
+ return hash_field;
+
nfc->data = 0;
if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index f7b494125eee..a2ae791538ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -38,7 +38,7 @@
#include <linux/debugfs.h>
#include <linux/if_bridge.h>
#include <linux/filter.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
#include <net/pkt_sched.h>
#include <net/xdp_sock_drv.h>
#include "eswitch.h"
@@ -1991,7 +1991,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
int eqn;
int err;
- err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn);
+ err = mlx5_comp_eqn_get(mdev, param->eq_ix, &eqn);
if (err)
return err;
@@ -2447,14 +2447,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct xsk_buff_pool *xsk_pool,
struct mlx5e_channel **cp)
{
- int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
+ int cpu = mlx5_comp_vector_get_cpu(priv->mdev, ix);
struct net_device *netdev = priv->netdev;
struct mlx5e_xsk_param xsk;
struct mlx5e_channel *c;
unsigned int irq;
int err;
- err = mlx5_vector2irqn(priv->mdev, ix, &irq);
+ err = mlx5_comp_irqn_get(priv->mdev, ix, &irq);
if (err)
return err;
@@ -2858,13 +2858,13 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
struct mlx5_core_dev *mdev = priv->mdev;
int num_comp_vectors, ix, irq;
- num_comp_vectors = mlx5_comp_vectors_count(mdev);
+ num_comp_vectors = mlx5_comp_vectors_max(mdev);
for (ix = 0; ix < params->num_channels; ix++) {
cpumask_clear(priv->scratchpad.cpumask);
for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
- int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev, irq));
+ int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
}
@@ -4898,9 +4898,6 @@ static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue;
- if (nla_len(attr) < sizeof(mode))
- return -EINVAL;
-
mode = nla_get_u16(attr);
if (mode > BRIDGE_MODE_VEPA)
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 99b3843396f3..2fdb8895aecd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -399,15 +399,13 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
}
static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep,
- struct mlx5_devcom *devcom,
struct mlx5e_rep_sq *rep_sq, int i)
{
- struct mlx5_eswitch *peer_esw = NULL;
struct mlx5_flow_handle *flow_rule;
- int tmp;
+ struct mlx5_devcom_comp_dev *tmp;
+ struct mlx5_eswitch *peer_esw;
- mlx5_devcom_for_each_peer_entry(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
- peer_esw, tmp) {
+ mlx5_devcom_for_each_peer_entry(esw->devcom, peer_esw, tmp) {
u16 peer_rule_idx = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
struct mlx5e_rep_sq_peer *sq_peer;
int err;
@@ -443,7 +441,6 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *flow_rule;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_sq *rep_sq;
- struct mlx5_devcom *devcom;
bool devcom_locked = false;
int err;
int i;
@@ -451,10 +448,10 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
- devcom = esw->dev->priv.devcom;
rpriv = mlx5e_rep_to_rep_priv(rep);
- if (mlx5_devcom_comp_is_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
- mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
+
+ if (mlx5_devcom_comp_is_ready(esw->devcom) &&
+ mlx5_devcom_for_each_peer_begin(esw->devcom))
devcom_locked = true;
for (i = 0; i < sqns_num; i++) {
@@ -477,7 +474,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
xa_init(&rep_sq->sq_peer);
if (devcom_locked) {
- err = mlx5e_sqs2vport_add_peers_rules(esw, rep, devcom, rep_sq, i);
+ err = mlx5e_sqs2vport_add_peers_rules(esw, rep, rep_sq, i);
if (err) {
mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
xa_destroy(&rep_sq->sq_peer);
@@ -490,7 +487,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
}
if (devcom_locked)
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ mlx5_devcom_for_each_peer_end(esw->devcom);
return 0;
@@ -498,7 +495,7 @@ out_err:
mlx5e_sqs2vport_stop(esw, rep);
if (devcom_locked)
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ mlx5_devcom_for_each_peer_end(esw->devcom);
return err;
}
@@ -1339,6 +1336,7 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
#ifdef CONFIG_MLX5_EN_IPSEC
+ &MLX5E_STATS_GRP(ipsec_hw),
&MLX5E_STATS_GRP(ipsec_sw),
#endif
&MLX5E_STATS_GRP(ptp),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 41d37159e027..3fd11b0761e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -36,7 +36,7 @@
#include <linux/bitmap.h>
#include <linux/filter.h>
#include <net/ip6_checksum.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
#include <net/inet_ecn.h>
#include <net/gro.h>
#include <net/udp.h>
@@ -1543,7 +1543,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
- mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
+ mlx5e_ipsec_offload_handle_rx_skb(netdev, skb,
+ be32_to_cpu(cqe->ft_metadata));
if (unlikely(mlx5e_macsec_is_rx_flow(cqe)))
mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 4d77055abd4b..4b96ad657145 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -38,7 +38,7 @@
#include "en/port.h"
#ifdef CONFIG_PAGE_POOL_STATS
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
#endif
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
@@ -180,7 +180,13 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
+#ifdef CONFIG_MLX5_EN_ARFS
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_add) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_request_in) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_request_out) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_expired) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
+#endif
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
#ifdef CONFIG_PAGE_POOL_STATS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
@@ -231,7 +237,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
@@ -321,7 +326,6 @@ static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
- s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
}
static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
@@ -354,7 +358,13 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
s->rx_congst_umr += rq_stats->congst_umr;
+#ifdef CONFIG_MLX5_EN_ARFS
+ s->rx_arfs_add += rq_stats->arfs_add;
+ s->rx_arfs_request_in += rq_stats->arfs_request_in;
+ s->rx_arfs_request_out += rq_stats->arfs_request_out;
+ s->rx_arfs_expired += rq_stats->arfs_expired;
s->rx_arfs_err += rq_stats->arfs_err;
+#endif
s->rx_recover += rq_stats->recover;
#ifdef CONFIG_PAGE_POOL_STATS
s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast;
@@ -1990,7 +2000,13 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
+#ifdef CONFIG_MLX5_EN_ARFS
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_add) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_request_in) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_request_out) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_expired) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
+#endif
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
#ifdef CONFIG_PAGE_POOL_STATS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
@@ -2092,7 +2108,6 @@ static const struct counter_desc xskrq_stats_desc[] = {
{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
- { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
};
static const struct counter_desc xsksq_stats_desc[] = {
@@ -2142,9 +2157,7 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
- { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
- { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
- { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, ooo_cqe_drop) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, late_cqe) },
};
static const struct counter_desc ptp_rq_stats_desc[] = {
@@ -2170,7 +2183,6 @@ static const struct counter_desc ptp_rq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
- { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
};
@@ -2490,7 +2502,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(per_port_buff_congest),
&MLX5E_STATS_GRP(ptp),
&MLX5E_STATS_GRP(qos),
-#ifdef CONFIG_MLX5_EN_MACSEC
+#ifdef CONFIG_MLX5_MACSEC
&MLX5E_STATS_GRP(macsec_hw),
#endif
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 1ff8a06027dc..176fa5976259 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -194,7 +194,13 @@ struct mlx5e_sw_stats {
u64 rx_cqe_compress_blks;
u64 rx_cqe_compress_pkts;
u64 rx_congst_umr;
+#ifdef CONFIG_MLX5_EN_ARFS
+ u64 rx_arfs_add;
+ u64 rx_arfs_request_in;
+ u64 rx_arfs_request_out;
+ u64 rx_arfs_expired;
u64 rx_arfs_err;
+#endif
u64 rx_recover;
u64 ch_events;
u64 ch_poll;
@@ -256,7 +262,6 @@ struct mlx5e_sw_stats {
u64 rx_xsk_cqe_compress_blks;
u64 rx_xsk_cqe_compress_pkts;
u64 rx_xsk_congst_umr;
- u64 rx_xsk_arfs_err;
u64 tx_xsk_xmit;
u64 tx_xsk_mpwqe;
u64 tx_xsk_inlnw;
@@ -358,7 +363,13 @@ struct mlx5e_rq_stats {
u64 cqe_compress_blks;
u64 cqe_compress_pkts;
u64 congst_umr;
+#ifdef CONFIG_MLX5_EN_ARFS
+ u64 arfs_add;
+ u64 arfs_request_in;
+ u64 arfs_request_out;
+ u64 arfs_expired;
u64 arfs_err;
+#endif
u64 recover;
#ifdef CONFIG_PAGE_POOL_STATS
u64 pp_alloc_fast;
@@ -449,9 +460,7 @@ struct mlx5e_ptp_cq_stats {
u64 err_cqe;
u64 abort;
u64 abort_abs_diff_ns;
- u64 resync_cqe;
- u64 resync_event;
- u64 ooo_cqe_drop;
+ u64 late_cqe;
};
struct mlx5e_rep_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 31708d5aa608..318083690fcd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1668,11 +1668,10 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
{
struct mlx5e_priv *out_priv, *route_priv;
struct mlx5_core_dev *route_mdev;
- struct mlx5_devcom *devcom;
+ struct mlx5_devcom_comp_dev *pos;
struct mlx5_eswitch *esw;
u16 vhca_id;
int err;
- int i;
out_priv = netdev_priv(out_dev);
esw = out_priv->mdev->priv.eswitch;
@@ -1688,10 +1687,8 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
return err;
rcu_read_lock();
- devcom = out_priv->mdev->priv.devcom;
err = -ENODEV;
- mlx5_devcom_for_each_peer_entry_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
- esw, i) {
+ mlx5_devcom_for_each_peer_entry_rcu(esw->devcom, esw, pos) {
err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
if (!err)
break;
@@ -2038,15 +2035,15 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
if (mlx5e_is_eswitch_flow(flow)) {
- struct mlx5_devcom *devcom = flow->priv->mdev->priv.devcom;
+ struct mlx5_devcom_comp_dev *devcom = flow->priv->mdev->priv.eswitch->devcom;
- if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) {
+ if (!mlx5_devcom_for_each_peer_begin(devcom)) {
mlx5e_tc_del_fdb_flow(priv, flow);
return;
}
mlx5e_tc_del_fdb_peers_flow(flow);
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ mlx5_devcom_for_each_peer_end(devcom);
mlx5e_tc_del_fdb_flow(priv, flow);
} else {
mlx5e_tc_del_nic_flow(priv, flow);
@@ -2600,29 +2597,29 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
match_level = outer_match_level;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_META) |
- BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_VLAN) |
- BIT(FLOW_DISSECTOR_KEY_CVLAN) |
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_TCP) |
- BIT(FLOW_DISSECTOR_KEY_IP) |
- BIT(FLOW_DISSECTOR_KEY_CT) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
- BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
- BIT(FLOW_DISSECTOR_KEY_ICMP) |
- BIT(FLOW_DISSECTOR_KEY_MPLS))) {
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_META) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_TCP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CT) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_MPLS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
- netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
+ netdev_dbg(priv->netdev, "Unsupported key used: 0x%llx\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
@@ -4223,8 +4220,7 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
flow_flag_test(flow, INGRESS);
bool act_is_encap = !!(attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
- bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.devcom,
- MLX5_DEVCOM_ESW_OFFLOADS);
+ bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.eswitch->devcom);
if (!esw_paired)
return false;
@@ -4491,14 +4487,13 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
- struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
+ struct mlx5_devcom_comp_dev *devcom = priv->mdev->priv.eswitch->devcom, *pos;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *in_rep = rpriv->rep;
struct mlx5_core_dev *in_mdev = priv->mdev;
struct mlx5_eswitch *peer_esw;
struct mlx5e_tc_flow *flow;
int err;
- int i;
flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
in_mdev);
@@ -4510,27 +4505,25 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
return 0;
}
- if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) {
+ if (!mlx5_devcom_for_each_peer_begin(devcom)) {
err = -ENODEV;
goto clean_flow;
}
- mlx5_devcom_for_each_peer_entry(devcom,
- MLX5_DEVCOM_ESW_OFFLOADS,
- peer_esw, i) {
+ mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) {
err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw);
if (err)
goto peer_clean;
}
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ mlx5_devcom_for_each_peer_end(devcom);
*__flow = flow;
return 0;
peer_clean:
mlx5e_tc_del_fdb_peers_flow(flow);
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ mlx5_devcom_for_each_peer_end(devcom);
clean_flow:
mlx5e_tc_del_fdb_flow(priv, flow);
return err;
@@ -4633,6 +4626,46 @@ static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
}
+/* As IPsec and TC order is not aligned between software and hardware-offload,
+ * either IPsec offload or TC offload, not both, is allowed for a specific interface.
+ */
+static bool is_tc_ipsec_order_check_needed(struct net_device *filter, struct mlx5e_priv *priv)
+{
+ if (!IS_ENABLED(CONFIG_MLX5_EN_IPSEC))
+ return false;
+
+ if (filter != priv->netdev)
+ return false;
+
+ if (mlx5e_eswitch_vf_rep(priv->netdev))
+ return false;
+
+ return true;
+}
+
+static int mlx5e_tc_block_ipsec_offload(struct net_device *filter, struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (!is_tc_ipsec_order_check_needed(filter, priv))
+ return 0;
+
+ if (mdev->num_block_tc)
+ return -EBUSY;
+
+ mdev->num_block_ipsec++;
+
+ return 0;
+}
+
+static void mlx5e_tc_unblock_ipsec_offload(struct net_device *filter, struct mlx5e_priv *priv)
+{
+ if (!is_tc_ipsec_order_check_needed(filter, priv))
+ return;
+
+ priv->mdev->num_block_ipsec--;
+}
+
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags)
{
@@ -4645,6 +4678,10 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
if (!mlx5_esw_hold(priv->mdev))
return -EBUSY;
+ err = mlx5e_tc_block_ipsec_offload(dev, priv);
+ if (err)
+ goto esw_release;
+
mlx5_esw_get(priv->mdev);
rcu_read_lock();
@@ -4690,7 +4727,9 @@ rcu_unlock:
err_free:
mlx5e_flow_put(priv, flow);
out:
+ mlx5e_tc_unblock_ipsec_offload(dev, priv);
mlx5_esw_put(priv->mdev);
+esw_release:
mlx5_esw_release(priv->mdev);
return err;
}
@@ -4731,6 +4770,7 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
trace_mlx5e_delete_flower(f);
mlx5e_flow_put(priv, flow);
+ mlx5e_tc_unblock_ipsec_offload(dev, priv);
mlx5_esw_put(priv->mdev);
return 0;
@@ -4748,7 +4788,7 @@ int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags)
{
- struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter;
@@ -4784,7 +4824,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
/* Under multipath it's possible for one rule to be currently
* un-offloaded while the other rule is offloaded.
*/
- if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
+ if (esw && !mlx5_devcom_for_each_peer_begin(esw->devcom))
goto out;
if (flow_flag_test(flow, DUP)) {
@@ -4815,7 +4855,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
}
no_peer_counter:
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ if (esw)
+ mlx5_devcom_for_each_peer_end(esw->devcom);
out:
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);
@@ -5220,11 +5261,12 @@ void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
{
const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
+ struct netdev_phys_item_id ppid;
struct mlx5e_rep_priv *rpriv;
struct mapping_ctx *mapping;
struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
- u64 mapping_id;
+ u64 mapping_id, key;
int err = 0;
rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
@@ -5278,7 +5320,11 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
goto err_action_counter;
}
- mlx5_esw_offloads_devcom_init(esw);
+ err = dev_get_port_parent_id(priv->netdev, &ppid, false);
+ if (!err) {
+ memcpy(&key, &ppid.id, sizeof(key));
+ mlx5_esw_offloads_devcom_init(esw, key);
+ }
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index c7eb6b238c2b..d41435c22ce5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -372,7 +372,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
const struct mlx5e_tx_attr *attr,
const struct mlx5e_tx_wqe_attr *wqe_attr, u8 num_dma,
struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
- bool xmit_more)
+ struct mlx5_wqe_eth_seg *eseg, bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
bool send_doorbell;
@@ -394,11 +394,16 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_tx_check_stop(sq);
- if (unlikely(sq->ptpsq)) {
+ if (unlikely(sq->ptpsq &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
+ u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
+
mlx5e_skb_cb_hwtstamp_init(skb);
- mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb);
+ mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
+ mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
+ metadata_index);
if (!netif_tx_queue_stopped(sq->txq) &&
- !mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo)) {
+ mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
netif_tx_stop_queue(sq->txq);
sq->stats->stopped++;
}
@@ -483,13 +488,16 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (unlikely(num_dma < 0))
goto err_drop;
- mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, xmit_more);
+ mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, eseg, xmit_more);
return;
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
+ if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
+ be32_to_cpu(eseg->flow_table_metadata));
mlx5e_tx_flush(sq);
}
@@ -645,9 +653,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
- if (ptpsq->ts_cqe_ctr_mask && unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
- eseg->flow_table_metadata = cpu_to_be32(ptpsq->skb_fifo_pc &
- ptpsq->ts_cqe_ctr_mask);
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ eseg->flow_table_metadata =
+ cpu_to_be32(mlx5e_ptp_metadata_fifo_pop(&ptpsq->metadata_freelist));
}
static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
@@ -766,7 +774,7 @@ void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq)
{
if (netif_tx_queue_stopped(sq->txq) &&
mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
- mlx5e_ptpsq_fifo_has_room(sq) &&
+ !mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq) &&
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
netif_tx_wake_queue(sq->txq);
sq->stats->wake++;
@@ -1031,7 +1039,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (unlikely(num_dma < 0))
goto err_drop;
- mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, xmit_more);
+ mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, eseg, xmit_more);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 3db4866d7880..ea0405e0a43f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -47,7 +47,7 @@ enum {
static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
struct mlx5_eq_table {
- struct list_head comp_eqs_list;
+ struct xarray comp_eqs;
struct mlx5_eq_async pages_eq;
struct mlx5_eq_async cmd_eq;
struct mlx5_eq_async async_eq;
@@ -58,11 +58,14 @@ struct mlx5_eq_table {
struct mlx5_nb cq_err_nb;
struct mutex lock; /* sync async eqs creations */
- int num_comp_eqs;
+ struct mutex comp_lock; /* sync comp eqs creations */
+ int curr_comp_eqs;
+ int max_comp_eqs;
struct mlx5_irq_table *irq_table;
- struct mlx5_irq **comp_irqs;
+ struct xarray comp_irqs;
struct mlx5_irq *ctrl_irq;
struct cpu_rmap *rmap;
+ struct cpumask used_cpus;
};
#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
@@ -452,13 +455,22 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
eq_table->irq_table = mlx5_irq_table_get(dev);
+ cpumask_clear(&eq_table->used_cpus);
+ xa_init(&eq_table->comp_eqs);
+ xa_init(&eq_table->comp_irqs);
+ mutex_init(&eq_table->comp_lock);
+ eq_table->curr_comp_eqs = 0;
return 0;
}
void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
{
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+
mlx5_eq_debugfs_cleanup(dev);
- kvfree(dev->priv.eq_table);
+ xa_destroy(&table->comp_irqs);
+ xa_destroy(&table->comp_eqs);
+ kvfree(table);
}
/* Async EQs */
@@ -803,88 +815,112 @@ void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
}
EXPORT_SYMBOL(mlx5_eq_update_ci);
-static void comp_irqs_release_pci(struct mlx5_core_dev *dev)
+static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_irq *irq;
+
+ irq = xa_load(&table->comp_irqs, vecidx);
+ if (!irq)
+ return;
- mlx5_irqs_release_vectors(table->comp_irqs, table->num_comp_eqs);
+ xa_erase(&table->comp_irqs, vecidx);
+ mlx5_irq_release_vector(irq);
}
-static int comp_irqs_request_pci(struct mlx5_core_dev *dev)
+static int mlx5_cpumask_default_spread(int numa_node, int index)
{
- struct mlx5_eq_table *table = dev->priv.eq_table;
const struct cpumask *prev = cpu_none_mask;
const struct cpumask *mask;
- int ncomp_eqs;
- u16 *cpus;
- int ret;
+ int found_cpu = 0;
+ int i = 0;
int cpu;
- int i;
-
- ncomp_eqs = table->num_comp_eqs;
- cpus = kcalloc(ncomp_eqs, sizeof(*cpus), GFP_KERNEL);
- if (!cpus)
- return -ENOMEM;
- i = 0;
rcu_read_lock();
- for_each_numa_hop_mask(mask, dev->priv.numa_node) {
+ for_each_numa_hop_mask(mask, numa_node) {
for_each_cpu_andnot(cpu, mask, prev) {
- cpus[i] = cpu;
- if (++i == ncomp_eqs)
+ if (i++ == index) {
+ found_cpu = cpu;
goto spread_done;
+ }
}
prev = mask;
}
+
spread_done:
rcu_read_unlock();
- ret = mlx5_irqs_request_vectors(dev, cpus, ncomp_eqs, table->comp_irqs, &table->rmap);
- kfree(cpus);
- return ret;
+ return found_cpu;
}
-static void comp_irqs_release_sf(struct mlx5_core_dev *dev)
+static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = dev->priv.eq_table;
-
- mlx5_irq_affinity_irqs_release(dev, table->comp_irqs, table->num_comp_eqs);
+#ifdef CONFIG_RFS_ACCEL
+#ifdef CONFIG_MLX5_SF
+ if (mlx5_core_is_sf(dev))
+ return dev->priv.parent_mdev->priv.eq_table->rmap;
+#endif
+ return dev->priv.eq_table->rmap;
+#else
+ return NULL;
+#endif
}
-static int comp_irqs_request_sf(struct mlx5_core_dev *dev)
+static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- int ncomp_eqs = table->num_comp_eqs;
+ struct cpu_rmap *rmap;
+ struct mlx5_irq *irq;
+ int cpu;
+
+ rmap = mlx5_eq_table_get_pci_rmap(dev);
+ cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx);
+ irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap);
+ if (IS_ERR(irq))
+ return PTR_ERR(irq);
- return mlx5_irq_affinity_irqs_request_auto(dev, ncomp_eqs, table->comp_irqs);
+ return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
}
-static void comp_irqs_release(struct mlx5_core_dev *dev)
+static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_irq *irq;
- mlx5_core_is_sf(dev) ? comp_irqs_release_sf(dev) :
- comp_irqs_release_pci(dev);
+ irq = xa_load(&table->comp_irqs, vecidx);
+ if (!irq)
+ return;
- kfree(table->comp_irqs);
+ xa_erase(&table->comp_irqs, vecidx);
+ mlx5_irq_affinity_irq_release(dev, irq);
}
-static int comp_irqs_request(struct mlx5_core_dev *dev)
+static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- int ncomp_eqs;
- int ret;
+ struct mlx5_irq *irq;
- ncomp_eqs = table->num_comp_eqs;
- table->comp_irqs = kcalloc(ncomp_eqs, sizeof(*table->comp_irqs), GFP_KERNEL);
- if (!table->comp_irqs)
- return -ENOMEM;
+ irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, vecidx);
+ if (IS_ERR(irq)) {
+ /* In case SF irq pool does not exist, fallback to the PF irqs*/
+ if (PTR_ERR(irq) == -ENOENT)
+ return comp_irq_request_pci(dev, vecidx);
+
+ return PTR_ERR(irq);
+ }
+
+ return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
+}
- ret = mlx5_core_is_sf(dev) ? comp_irqs_request_sf(dev) :
- comp_irqs_request_pci(dev);
- if (ret < 0)
- kfree(table->comp_irqs);
+static void comp_irq_release(struct mlx5_core_dev *dev, u16 vecidx)
+{
+ mlx5_core_is_sf(dev) ? comp_irq_release_sf(dev, vecidx) :
+ comp_irq_release_pci(dev, vecidx);
+}
- return ret;
+static int comp_irq_request(struct mlx5_core_dev *dev, u16 vecidx)
+{
+ return mlx5_core_is_sf(dev) ? comp_irq_request_sf(dev, vecidx) :
+ comp_irq_request_pci(dev, vecidx);
}
#ifdef CONFIG_RFS_ACCEL
@@ -901,7 +937,7 @@ static int alloc_rmap(struct mlx5_core_dev *mdev)
if (mlx5_core_is_sf(mdev))
return 0;
- eq_table->rmap = alloc_irq_cpu_rmap(eq_table->num_comp_eqs);
+ eq_table->rmap = alloc_irq_cpu_rmap(eq_table->max_comp_eqs);
if (!eq_table->rmap)
return -ENOMEM;
return 0;
@@ -921,22 +957,19 @@ static int alloc_rmap(struct mlx5_core_dev *mdev) { return 0; }
static void free_rmap(struct mlx5_core_dev *mdev) {}
#endif
-static void destroy_comp_eqs(struct mlx5_core_dev *dev)
+static void destroy_comp_eq(struct mlx5_core_dev *dev, struct mlx5_eq_comp *eq, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- struct mlx5_eq_comp *eq, *n;
-
- list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
- list_del(&eq->list);
- mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
- if (destroy_unmap_eq(dev, &eq->core))
- mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
- eq->core.eqn);
- tasklet_disable(&eq->tasklet_ctx.task);
- kfree(eq);
- }
- comp_irqs_release(dev);
- free_rmap(dev);
+
+ xa_erase(&table->comp_eqs, vecidx);
+ mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
+ if (destroy_unmap_eq(dev, &eq->core))
+ mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
+ eq->core.eqn);
+ tasklet_disable(&eq->tasklet_ctx.task);
+ kfree(eq);
+ comp_irq_release(dev, vecidx);
+ table->curr_comp_eqs--;
}
static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
@@ -954,129 +987,149 @@ static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
return MLX5_COMP_EQ_SIZE;
}
-static int create_comp_eqs(struct mlx5_core_dev *dev)
+/* Must be called with EQ table comp_lock held */
+static int create_comp_eq(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_param param = {};
struct mlx5_eq_comp *eq;
- int ncomp_eqs;
+ struct mlx5_irq *irq;
int nent;
int err;
- int i;
- err = alloc_rmap(dev);
+ lockdep_assert_held(&table->comp_lock);
+ if (table->curr_comp_eqs == table->max_comp_eqs) {
+ mlx5_core_err(dev, "maximum number of vectors is allocated, %d\n",
+ table->max_comp_eqs);
+ return -ENOMEM;
+ }
+
+ err = comp_irq_request(dev, vecidx);
if (err)
return err;
- ncomp_eqs = comp_irqs_request(dev);
- if (ncomp_eqs < 0) {
- err = ncomp_eqs;
- goto err_irqs_req;
- }
-
- INIT_LIST_HEAD(&table->comp_eqs_list);
nent = comp_eq_depth_devlink_param_get(dev);
- for (i = 0; i < ncomp_eqs; i++) {
- struct mlx5_eq_param param = {};
+ eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node);
+ if (!eq) {
+ err = -ENOMEM;
+ goto clean_irq;
+ }
- eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node);
- if (!eq) {
- err = -ENOMEM;
- goto clean;
- }
+ INIT_LIST_HEAD(&eq->tasklet_ctx.list);
+ INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
+ spin_lock_init(&eq->tasklet_ctx.lock);
+ tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
- INIT_LIST_HEAD(&eq->tasklet_ctx.list);
- INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
- spin_lock_init(&eq->tasklet_ctx.lock);
- tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
-
- eq->irq_nb.notifier_call = mlx5_eq_comp_int;
- param = (struct mlx5_eq_param) {
- .irq = table->comp_irqs[i],
- .nent = nent,
- };
-
- err = create_map_eq(dev, &eq->core, &param);
- if (err)
- goto clean_eq;
- err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
- if (err) {
- destroy_unmap_eq(dev, &eq->core);
- goto clean_eq;
- }
+ irq = xa_load(&table->comp_irqs, vecidx);
+ eq->irq_nb.notifier_call = mlx5_eq_comp_int;
+ param = (struct mlx5_eq_param) {
+ .irq = irq,
+ .nent = nent,
+ };
- mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
- /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
- list_add_tail(&eq->list, &table->comp_eqs_list);
+ err = create_map_eq(dev, &eq->core, &param);
+ if (err)
+ goto clean_eq;
+ err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
+ if (err) {
+ destroy_unmap_eq(dev, &eq->core);
+ goto clean_eq;
}
- table->num_comp_eqs = ncomp_eqs;
- return 0;
+ mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
+ err = xa_err(xa_store(&table->comp_eqs, vecidx, eq, GFP_KERNEL));
+ if (err)
+ goto disable_eq;
+
+ table->curr_comp_eqs++;
+ return eq->core.eqn;
+disable_eq:
+ mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
clean_eq:
kfree(eq);
-clean:
- destroy_comp_eqs(dev);
-err_irqs_req:
- free_rmap(dev);
+clean_irq:
+ comp_irq_release(dev, vecidx);
return err;
}
-static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn,
- unsigned int *irqn)
+int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq;
- int err = -ENOENT;
- int i = 0;
+ int ret = 0;
- list_for_each_entry(eq, &table->comp_eqs_list, list) {
- if (i++ == vector) {
- if (irqn)
- *irqn = eq->core.irqn;
- if (eqn)
- *eqn = eq->core.eqn;
- err = 0;
- break;
- }
+ mutex_lock(&table->comp_lock);
+ eq = xa_load(&table->comp_eqs, vecidx);
+ if (eq) {
+ *eqn = eq->core.eqn;
+ goto out;
}
- return err;
-}
+ ret = create_comp_eq(dev, vecidx);
+ if (ret < 0) {
+ mutex_unlock(&table->comp_lock);
+ return ret;
+ }
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn)
-{
- return vector2eqnirqn(dev, vector, eqn, NULL);
+ *eqn = ret;
+out:
+ mutex_unlock(&table->comp_lock);
+ return 0;
}
-EXPORT_SYMBOL(mlx5_vector2eqn);
+EXPORT_SYMBOL(mlx5_comp_eqn_get);
-int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
+int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
{
- return vector2eqnirqn(dev, vector, NULL, irqn);
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_comp *eq;
+ int eqn;
+ int err;
+
+ /* Allocate the EQ if not allocated yet */
+ err = mlx5_comp_eqn_get(dev, vector, &eqn);
+ if (err)
+ return err;
+
+ eq = xa_load(&table->comp_eqs, vector);
+ *irqn = eq->core.irqn;
+ return 0;
}
-unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
+unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev)
{
- return dev->priv.eq_table->num_comp_eqs;
+ return dev->priv.eq_table->max_comp_eqs;
}
-EXPORT_SYMBOL(mlx5_comp_vectors_count);
+EXPORT_SYMBOL(mlx5_comp_vectors_max);
-struct cpumask *
+static struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq;
- int i = 0;
- list_for_each_entry(eq, &table->comp_eqs_list, list) {
- if (i++ == vector)
- return mlx5_irq_get_affinity_mask(eq->core.irq);
- }
+ eq = xa_load(&table->comp_eqs, vector);
+ if (eq)
+ return mlx5_irq_get_affinity_mask(eq->core.irq);
- WARN_ON_ONCE(1);
return NULL;
}
-EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
+
+int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector)
+{
+ struct cpumask *mask;
+ int cpu;
+
+ mask = mlx5_comp_irq_get_affinity_mask(dev, vector);
+ if (mask)
+ cpu = cpumask_first(mask);
+ else
+ cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vector);
+
+ return cpu;
+}
+EXPORT_SYMBOL(mlx5_comp_vector_get_cpu);
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
@@ -1089,11 +1142,11 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq;
+ unsigned long index;
- list_for_each_entry(eq, &table->comp_eqs_list, list) {
+ xa_for_each(&table->comp_eqs, index, eq)
if (eq->core.eqn == eqn)
return eq;
- }
return ERR_PTR(-ENOENT);
}
@@ -1101,11 +1154,7 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
/* This function should only be called after mlx5_cmd_force_teardown_hca */
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = dev->priv.eq_table;
-
- mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
mlx5_irq_table_free_irqs(dev);
- mutex_unlock(&table->lock);
}
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
@@ -1148,22 +1197,22 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int err;
- eq_table->num_comp_eqs = get_num_eqs(dev);
+ eq_table->max_comp_eqs = get_num_eqs(dev);
err = create_async_eqs(dev);
if (err) {
mlx5_core_err(dev, "Failed to create async EQs\n");
goto err_async_eqs;
}
- err = create_comp_eqs(dev);
+ err = alloc_rmap(dev);
if (err) {
- mlx5_core_err(dev, "Failed to create completion EQs\n");
- goto err_comp_eqs;
+ mlx5_core_err(dev, "Failed to allocate rmap\n");
+ goto err_rmap;
}
return 0;
-err_comp_eqs:
+err_rmap:
destroy_async_eqs(dev);
err_async_eqs:
return err;
@@ -1171,7 +1220,14 @@ err_async_eqs:
void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
{
- destroy_comp_eqs(dev);
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_comp *eq;
+ unsigned long index;
+
+ xa_for_each(&table->comp_eqs, index, eq)
+ destroy_comp_eq(dev, eq, index);
+
+ free_rmap(dev);
destroy_async_eqs(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index f4fe1daa4afd..e36294b7ade2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -652,30 +652,30 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
struct mlx5_esw_bridge *bridge)
{
- struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom;
+ struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos;
struct mlx5_eswitch *tmp, *peer_esw = NULL;
static struct mlx5_flow_handle *handle;
- int i;
- if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
+ if (!mlx5_devcom_for_each_peer_begin(devcom))
return ERR_PTR(-ENODEV);
- mlx5_devcom_for_each_peer_entry(devcom,
- MLX5_DEVCOM_ESW_OFFLOADS,
- tmp, i) {
+ mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
if (mlx5_esw_is_owner(tmp, vport_num, esw_owner_vhca_id)) {
peer_esw = tmp;
break;
}
}
+
if (!peer_esw) {
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
- return ERR_PTR(-ENODEV);
+ handle = ERR_PTR(-ENODEV);
+ goto out;
}
handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
bridge, peer_esw);
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+
+out:
+ mlx5_devcom_for_each_peer_end(devcom);
return handle;
}
@@ -1391,8 +1391,8 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
mlx5_fc_id(counter), bridge);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
- esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n",
- vport_num, err);
+ esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n",
+ vport_num, err, peer);
goto err_ingress_flow_create;
}
entry->ingress_handle = handle;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
index 2455f8b93c1e..7a01714b3780 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
@@ -539,30 +539,29 @@ mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port)
static struct mlx5_flow_handle *
mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
{
- struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom;
+ struct mlx5_devcom_comp_dev *devcom = port->bridge->br_offloads->esw->devcom, *pos;
struct mlx5_eswitch *tmp, *peer_esw = NULL;
static struct mlx5_flow_handle *handle;
- int i;
- if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
+ if (!mlx5_devcom_for_each_peer_begin(devcom))
return ERR_PTR(-ENODEV);
- mlx5_devcom_for_each_peer_entry(devcom,
- MLX5_DEVCOM_ESW_OFFLOADS,
- tmp, i) {
+ mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) {
peer_esw = tmp;
break;
}
}
+
if (!peer_esw) {
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
- return ERR_PTR(-ENODEV);
+ handle = ERR_PTR(-ENODEV);
+ goto out;
}
handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+out:
+ mlx5_devcom_for_each_peer_end(devcom);
return handle;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index fdf2be548e85..d8e739cbcbce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -16,39 +16,28 @@ mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_i
static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_num)
{
- return vport_num == MLX5_VPORT_UPLINK ||
- (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) ||
+ return (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) ||
mlx5_eswitch_is_vf_vport(esw, vport_num) ||
mlx5_core_is_ec_vf_vport(esw->dev, vport_num);
}
-static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16 vport_num)
+static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
+ u16 vport_num,
+ struct devlink_port *dl_port)
{
struct mlx5_core_dev *dev = esw->dev;
- struct devlink_port_attrs attrs = {};
struct netdev_phys_item_id ppid = {};
- struct devlink_port *dl_port;
u32 controller_num = 0;
bool external;
u16 pfnum;
- dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL);
- if (!dl_port)
- return NULL;
-
mlx5_esw_get_port_parent_id(dev, &ppid);
pfnum = mlx5_get_dev_index(dev);
external = mlx5_core_is_ecpf_esw_manager(dev);
if (external)
controller_num = dev->priv.eswitch->offloads.host_number + 1;
- if (vport_num == MLX5_VPORT_UPLINK) {
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = pfnum;
- memcpy(attrs.switch_id.id, ppid.id, ppid.id_len);
- attrs.switch_id.id_len = ppid.id_len;
- devlink_port_attrs_set(dl_port, &attrs);
- } else if (vport_num == MLX5_VPORT_PF) {
+ if (vport_num == MLX5_VPORT_PF) {
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_pf_set(dl_port, controller_num, pfnum, external);
@@ -63,91 +52,83 @@ static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16
devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
vport_num - 1, false);
}
- return dl_port;
}
-static void mlx5_esw_dl_port_free(struct devlink_port *dl_port)
+int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- kfree(dl_port);
-}
-
-static const struct devlink_port_ops mlx5_esw_dl_port_ops = {
- .port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get,
- .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set,
- .port_fn_roce_get = mlx5_devlink_port_fn_roce_get,
- .port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
- .port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get,
- .port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
-};
-
-int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num)
-{
- struct mlx5_core_dev *dev = esw->dev;
- struct devlink_port *dl_port;
- unsigned int dl_port_index;
- struct mlx5_vport *vport;
- struct devlink *devlink;
- int err;
+ struct mlx5_devlink_port *dl_port;
+ u16 vport_num = vport->vport;
if (!mlx5_esw_devlink_port_supported(esw, vport_num))
return 0;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
-
- dl_port = mlx5_esw_dl_port_alloc(esw, vport_num);
+ dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL);
if (!dl_port)
return -ENOMEM;
- devlink = priv_to_devlink(dev);
- dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
- err = devl_port_register_with_ops(devlink, dl_port, dl_port_index,
- &mlx5_esw_dl_port_ops);
- if (err)
- goto reg_err;
-
- err = devl_rate_leaf_create(dl_port, vport, NULL);
- if (err)
- goto rate_err;
+ mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(esw, vport_num,
+ &dl_port->dl_port);
vport->dl_port = dl_port;
+ mlx5_devlink_port_init(dl_port, vport);
return 0;
-
-rate_err:
- devl_port_unregister(dl_port);
-reg_err:
- mlx5_esw_dl_port_free(dl_port);
- return err;
}
-void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_vport *vport;
-
- if (!mlx5_esw_devlink_port_supported(esw, vport_num))
+ if (!vport->dl_port)
return;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return;
+ kfree(vport->dl_port);
+ vport->dl_port = NULL;
+}
- if (vport->dl_port->devlink_rate) {
- mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
- devl_rate_leaf_destroy(vport->dl_port);
- }
+static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
+ .port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get,
+ .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set,
+ .port_fn_roce_get = mlx5_devlink_port_fn_roce_get,
+ .port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
+ .port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get,
+ .port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
+#ifdef CONFIG_XFRM_OFFLOAD
+ .port_fn_ipsec_crypto_get = mlx5_devlink_port_fn_ipsec_crypto_get,
+ .port_fn_ipsec_crypto_set = mlx5_devlink_port_fn_ipsec_crypto_set,
+ .port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get,
+ .port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set,
+#endif /* CONFIG_XFRM_OFFLOAD */
+};
- devl_port_unregister(vport->dl_port);
- mlx5_esw_dl_port_free(vport->dl_port);
- vport->dl_port = NULL;
+static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
+ struct devlink_port *dl_port,
+ u32 controller, u32 sfnum)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct netdev_phys_item_id ppid = {};
+ u16 pfnum;
+
+ pfnum = mlx5_get_dev_index(dev);
+ mlx5_esw_get_port_parent_id(dev, &ppid);
+ memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
+ dl_port->attrs.switch_id.id_len = ppid.id_len;
+ devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
}
-struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num)
+int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ struct mlx5_devlink_port *dl_port,
+ u32 controller, u32 sfnum)
{
- struct mlx5_vport *vport;
+ mlx5_esw_offloads_sf_devlink_port_attrs_set(esw, &dl_port->dl_port, controller, sfnum);
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- return IS_ERR(vport) ? ERR_CAST(vport) : vport->dl_port;
+ vport->dl_port = dl_port;
+ mlx5_devlink_port_init(dl_port, vport);
+ return 0;
+}
+
+void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ vport->dl_port = NULL;
}
static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = {
@@ -164,58 +145,62 @@ static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = {
#endif
};
-int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
- u16 vport_num, u32 controller, u32 sfnum)
+int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
struct mlx5_core_dev *dev = esw->dev;
- struct netdev_phys_item_id ppid = {};
+ const struct devlink_port_ops *ops;
+ struct mlx5_devlink_port *dl_port;
+ u16 vport_num = vport->vport;
unsigned int dl_port_index;
- struct mlx5_vport *vport;
struct devlink *devlink;
- u16 pfnum;
int err;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
+ dl_port = vport->dl_port;
+ if (!dl_port)
+ return 0;
+
+ if (mlx5_esw_is_sf_vport(esw, vport_num))
+ ops = &mlx5_esw_dl_sf_port_ops;
+ else if (mlx5_eswitch_is_pf_vf_vport(esw, vport_num))
+ ops = &mlx5_esw_pf_vf_dl_port_ops;
+ else
+ ops = NULL;
- pfnum = mlx5_get_dev_index(dev);
- mlx5_esw_get_port_parent_id(dev, &ppid);
- memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
- dl_port->attrs.switch_id.id_len = ppid.id_len;
- devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
- err = devl_port_register_with_ops(devlink, dl_port, dl_port_index,
- &mlx5_esw_dl_sf_port_ops);
+ err = devl_port_register_with_ops(devlink, &dl_port->dl_port, dl_port_index, ops);
if (err)
return err;
- err = devl_rate_leaf_create(dl_port, vport, NULL);
+ err = devl_rate_leaf_create(&dl_port->dl_port, vport, NULL);
if (err)
goto rate_err;
- vport->dl_port = dl_port;
return 0;
rate_err:
- devl_port_unregister(dl_port);
+ devl_port_unregister(&dl_port->dl_port);
return err;
}
-void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
- struct mlx5_vport *vport;
+ struct mlx5_devlink_port *dl_port;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
+ if (!vport->dl_port)
return;
+ dl_port = vport->dl_port;
- if (vport->dl_port->devlink_rate) {
- mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
- devl_rate_leaf_destroy(vport->dl_port);
- }
+ mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
+ devl_rate_leaf_destroy(&dl_port->dl_port);
- devl_port_unregister(vport->dl_port);
- vport->dl_port = NULL;
+ devl_port_unregister(&dl_port->dl_port);
+}
+
+struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ return IS_ERR(vport) ? ERR_CAST(vport) : &vport->dl_port->dl_port;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
new file mode 100644
index 000000000000..da10e04777cf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_core.h"
+#include "eswitch.h"
+
+static int esw_ipsec_vf_query_generic(struct mlx5_core_dev *dev, u16 vport_num, bool *result)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
+ return -EOPNOTSUPP;
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(dev)) {
+ *result = false;
+ return 0;
+ }
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap);
+ if (err)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ *result = MLX5_GET(cmd_hca_cap, hca_cap, ipsec_offload);
+free:
+ kvfree(query_cap);
+ return err;
+}
+
+enum esw_vport_ipsec_offload {
+ MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD,
+ MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD,
+};
+
+int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, struct mlx5_vport *vport)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ bool ipsec_enabled;
+ int err;
+
+ /* Querying IPsec caps only makes sense when generic ipsec_offload
+ * HCA cap is enabled
+ */
+ err = esw_ipsec_vf_query_generic(dev, vport->vport, &ipsec_enabled);
+ if (err)
+ return err;
+
+ if (!ipsec_enabled) {
+ vport->info.ipsec_crypto_enabled = false;
+ vport->info.ipsec_packet_enabled = false;
+ return 0;
+ }
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC);
+ if (err)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ vport->info.ipsec_crypto_enabled =
+ MLX5_GET(ipsec_cap, hca_cap, ipsec_crypto_offload);
+ vport->info.ipsec_packet_enabled =
+ MLX5_GET(ipsec_cap, hca_cap, ipsec_full_offload);
+free:
+ kvfree(query_cap);
+ return err;
+}
+
+static int esw_ipsec_vf_set_generic(struct mlx5_core_dev *dev, u16 vport_num, bool ipsec_ofld)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *hca_cap, *query_cap, *cap;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
+ return -EOPNOTSUPP;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kvzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ret = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap);
+ if (ret)
+ goto free;
+
+ cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ MLX5_SET(cmd_hca_cap, cap, ipsec_offload, ipsec_ofld);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
+ ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+free:
+ kvfree(hca_cap);
+ kvfree(query_cap);
+ return ret;
+}
+
+static int esw_ipsec_vf_set_bytype(struct mlx5_core_dev *dev, struct mlx5_vport *vport,
+ bool enable, enum esw_vport_ipsec_offload type)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *hca_cap, *query_cap, *cap;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
+ return -EOPNOTSUPP;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kvzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC);
+ if (ret)
+ goto free;
+
+ cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+
+ switch (type) {
+ case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
+ MLX5_SET(ipsec_cap, cap, ipsec_crypto_offload, enable);
+ break;
+ case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
+ MLX5_SET(ipsec_cap, cap, ipsec_full_offload, enable);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto free;
+ }
+
+ MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport->vport);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_IPSEC << 1);
+ ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+free:
+ kvfree(hca_cap);
+ kvfree(query_cap);
+ return ret;
+}
+
+static int esw_ipsec_vf_crypto_aux_caps_set(struct mlx5_core_dev *dev, u16 vport_num, bool enable)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ void *hca_cap, *query_cap, *cap;
+ int ret;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kvzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS);
+ if (ret)
+ goto free;
+
+ cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ MLX5_SET(per_protocol_networking_offload_caps, cap, insert_trailer, enable);
+ MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num);
+ MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_ETHERNET_OFFLOADS << 1);
+ ret = mlx5_cmd_exec_in(esw->dev, set_hca_cap, hca_cap);
+free:
+ kvfree(hca_cap);
+ kvfree(query_cap);
+ return ret;
+}
+
+static int esw_ipsec_vf_offload_set_bytype(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable, enum esw_vport_ipsec_offload type)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ int err;
+
+ if (vport->vport == MLX5_VPORT_PF)
+ return -EOPNOTSUPP;
+
+ if (type == MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD) {
+ err = esw_ipsec_vf_crypto_aux_caps_set(dev, vport->vport, enable);
+ if (err)
+ return err;
+ }
+
+ if (enable) {
+ err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
+ if (err)
+ return err;
+ err = esw_ipsec_vf_set_bytype(dev, vport, enable, type);
+ if (err)
+ return err;
+ } else {
+ err = esw_ipsec_vf_set_bytype(dev, vport, enable, type);
+ if (err)
+ return err;
+ err = mlx5_esw_ipsec_vf_offload_get(dev, vport);
+ if (err)
+ return err;
+
+ /* The generic ipsec_offload cap can be disabled only if both
+ * ipsec_crypto_offload and ipsec_full_offload aren't enabled.
+ */
+ if (!vport->info.ipsec_crypto_enabled &&
+ !vport->info.ipsec_packet_enabled) {
+ err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
+ if (err)
+ return err;
+ }
+ }
+
+ switch (type) {
+ case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
+ vport->info.ipsec_crypto_enabled = enable;
+ break;
+ case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
+ vport->info.ipsec_packet_enabled = enable;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int esw_ipsec_offload_supported(struct mlx5_core_dev *dev, u16 vport_num)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int ret;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_GENERAL);
+ if (ret)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ if (!MLX5_GET(cmd_hca_cap, hca_cap, log_max_dek))
+ ret = -EOPNOTSUPP;
+free:
+ kvfree(query_cap);
+ return ret;
+}
+
+bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev)
+{
+ /* Old firmware doesn't support ipsec_offload capability for VFs. This
+ * can be detected by checking reformat_add_esp_trasport capability -
+ * when this cap isn't supported it means firmware cannot be trusted
+ * about what it reports for ipsec_offload cap.
+ */
+ return MLX5_CAP_FLOWTABLE_NIC_TX(dev, reformat_add_esp_trasport);
+}
+
+int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int err;
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(dev))
+ return -EOPNOTSUPP;
+
+ err = esw_ipsec_offload_supported(dev, vport_num);
+ if (err)
+ return err;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS);
+ if (err)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ if (!MLX5_GET(per_protocol_networking_offload_caps, hca_cap, swp))
+ goto free;
+
+free:
+ kvfree(query_cap);
+ return err;
+}
+
+int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int ret;
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(dev))
+ return -EOPNOTSUPP;
+
+ ret = esw_ipsec_offload_supported(dev, vport_num);
+ if (ret)
+ return ret;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_FLOW_TABLE);
+ if (ret)
+ goto out;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ if (!MLX5_GET(flow_table_nic_cap, hca_cap, flow_table_properties_nic_receive.decap)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+out:
+ kvfree(query_cap);
+ return ret;
+}
+
+int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable)
+{
+ return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
+ MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD);
+}
+
+int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable)
+{
+ return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
+ MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
new file mode 100644
index 000000000000..095f31f380fa
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "fs_core.h"
+#include "eswitch.h"
+#include "en_accel/ipsec.h"
+#include "esw/ipsec_fs.h"
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+#include "en/tc_priv.h"
+#endif
+
+enum {
+ MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
+ MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL,
+ MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL,
+};
+
+enum {
+ MLX5_ESW_IPSEC_TX_POL_FT_LEVEL,
+ MLX5_ESW_IPSEC_TX_ESP_FT_LEVEL,
+ MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL,
+};
+
+static void esw_ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ mlx5_del_flow_rules(rx->status_drop.rule);
+ mlx5_destroy_flow_group(rx->status_drop.group);
+ mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
+}
+
+static void esw_ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ mlx5_del_flow_rules(rx->status.rule);
+ mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
+}
+
+static int esw_ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *flow_counter;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!flow_group_in || !spec) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
+ g = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop flow group, err=%d\n", err);
+ goto err_out;
+ }
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule;
+ }
+
+ rx->status_drop.group = g;
+ rx->status_drop.rule = rule;
+ rx->status_drop_cnt = flow_counter;
+
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, flow_counter);
+err_cnt:
+ mlx5_destroy_flow_group(g);
+err_out:
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return err;
+}
+
+static int esw_ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.ipsec_syndrome, 0);
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(ipsec->mdev,
+ "Failed to add ipsec rx status pass rule, err=%d\n", err);
+ goto err_rule;
+ }
+
+ rx->status.rule = rule;
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ kvfree(spec);
+ return err;
+}
+
+void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ esw_ipsec_rx_status_pass_destroy(ipsec, rx);
+ esw_ipsec_rx_status_drop_destroy(ipsec, rx);
+}
+
+int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ int err;
+
+ err = esw_ipsec_rx_status_drop_create(ipsec, rx);
+ if (err)
+ return err;
+
+ err = esw_ipsec_rx_status_pass_create(ipsec, rx, dest);
+ if (err)
+ goto err_pass_create;
+
+ return 0;
+
+err_pass_create:
+ esw_ipsec_rx_status_drop_destroy(ipsec, rx);
+ return err;
+}
+
+void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx_create_attr *attr)
+{
+ attr->prio = FDB_CRYPTO_INGRESS;
+ attr->pol_level = MLX5_ESW_IPSEC_RX_POL_FT_LEVEL;
+ attr->sa_level = MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL;
+ attr->status_level = MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL;
+ attr->chains_ns = MLX5_FLOW_NAMESPACE_FDB;
+}
+
+int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5_flow_destination *dest)
+{
+ dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest->ft = mlx5_chains_get_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
+
+ return 0;
+}
+
+int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_act *flow_act)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_modify_hdr *modify_hdr;
+ u32 mapped_id;
+ int err;
+
+ err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id,
+ xa_mk_value(sa_entry->ipsec_obj_id),
+ XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0);
+ if (err)
+ return err;
+
+ /* reuse tunnel bits for ipsec,
+ * tun_id is always 0 and tun_opts is mapped to ipsec_obj_id.
+ */
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
+ MLX5_SET(set_action_in, action, offset, ESW_ZONE_ID_BITS);
+ MLX5_SET(set_action_in, action, length,
+ ESW_TUN_ID_BITS + ESW_TUN_OPTS_BITS);
+ MLX5_SET(set_action_in, action, data, mapped_id);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
+ 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ goto err_header_alloc;
+ }
+
+ sa_entry->rx_mapped_id = mapped_id;
+ flow_act->modify_hdr = modify_hdr;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ return 0;
+
+err_header_alloc:
+ xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id);
+ return err;
+}
+
+void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+
+ if (sa_entry->rx_mapped_id)
+ xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map,
+ sa_entry->rx_mapped_id);
+}
+
+int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
+ u32 *ipsec_obj_id)
+{
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+ void *val;
+
+ val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id);
+ if (!val)
+ return -ENOENT;
+
+ *ipsec_obj_id = xa_to_value(val);
+
+ return 0;
+}
+
+void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_tx_create_attr *attr)
+{
+ attr->prio = FDB_CRYPTO_EGRESS;
+ attr->pol_level = MLX5_ESW_IPSEC_TX_POL_FT_LEVEL;
+ attr->sa_level = MLX5_ESW_IPSEC_TX_ESP_FT_LEVEL;
+ attr->cnt_level = MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL;
+ attr->chains_ns = MLX5_FLOW_NAMESPACE_FDB;
+}
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+static int mlx5_esw_ipsec_modify_flow_dests(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_flow_attr *attr;
+ int err;
+
+ attr = flow->attr;
+ esw_attr = attr->esw_attr;
+ if (esw_attr->out_count - esw_attr->split_count > 1)
+ return 0;
+
+ err = mlx5_eswitch_restore_ipsec_rule(esw, flow->rule[0], esw_attr,
+ esw_attr->out_count - 1);
+
+ return err;
+}
+#endif
+
+void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
+{
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep;
+ struct mlx5e_rep_priv *rpriv;
+ struct rhashtable_iter iter;
+ struct mlx5e_tc_flow *flow;
+ unsigned long i;
+ int err;
+
+ xa_for_each(&esw->offloads.vport_reps, i, rep) {
+ rpriv = rep->rep_data[REP_ETH].priv;
+ if (!rpriv || !rpriv->netdev)
+ continue;
+
+ rhashtable_walk_enter(&rpriv->tc_ht, &iter);
+ rhashtable_walk_start(&iter);
+ while ((flow = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(flow))
+ continue;
+
+ err = mlx5_esw_ipsec_modify_flow_dests(esw, flow);
+ if (err)
+ mlx5_core_warn_once(mdev,
+ "Failed to modify flow dests for IPsec");
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+ }
+#endif
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
new file mode 100644
index 000000000000..0c90f7a8b0d3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_ESW_IPSEC_FS_H__
+#define __MLX5_ESW_IPSEC_FS_H__
+
+struct mlx5e_ipsec;
+struct mlx5e_ipsec_sa_entry;
+
+#ifdef CONFIG_MLX5_ESWITCH
+void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx);
+int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest);
+void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx_create_attr *attr);
+int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5_flow_destination *dest);
+int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_act *flow_act);
+void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry);
+int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
+ u32 *ipsec_obj_id);
+void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_tx_create_attr *attr);
+void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
+#else
+static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx) {}
+
+static inline int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ return -EINVAL;
+}
+
+static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx_create_attr *attr) {}
+
+static inline int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5_flow_destination *dest)
+{
+ return -EINVAL;
+}
+
+static inline int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_act *flow_act)
+{
+ return -EINVAL;
+}
+
+static inline void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry) {}
+
+static inline int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
+ u32 *ipsec_obj_id)
+{
+ return -EINVAL;
+}
+
+static inline void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_tx_create_attr *attr) {}
+
+static inline void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) {}
+#endif /* CONFIG_MLX5_ESWITCH */
+#endif /* __MLX5_ESW_IPSEC_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 7c79476cc5f9..1887a24ee414 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -740,7 +740,7 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *name,
u64 *rate, struct netlink_ext_ack *extack)
{
- u32 link_speed_max, reminder;
+ u32 link_speed_max, remainder;
u64 value;
int err;
@@ -750,8 +750,8 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
return err;
}
- value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &reminder);
- if (reminder) {
+ value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &remainder);
+ if (remainder) {
pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n",
name, *rate);
NL_SET_ERR_MSG_MOD(extack, "TX rate value not in link speed units of 1Mbps");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 243c455f1029..6cd7d6497e10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -48,6 +48,7 @@
#include "devlink.h"
#include "ecpf.h"
#include "en/mod_hdr.h"
+#include "en_accel/ipsec.h"
enum {
MLX5_ACTION_NONE = 0,
@@ -77,18 +78,31 @@ static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
return 0;
}
-struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink)
+static struct mlx5_eswitch *__mlx5_devlink_eswitch_get(struct devlink *devlink, bool check)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
int err;
- err = mlx5_eswitch_check(dev);
- if (err)
- return ERR_PTR(err);
+ if (check) {
+ err = mlx5_eswitch_check(dev);
+ if (err)
+ return ERR_PTR(err);
+ }
return dev->priv.eswitch;
}
+struct mlx5_eswitch *__must_check
+mlx5_devlink_eswitch_get(struct devlink *devlink)
+{
+ return __mlx5_devlink_eswitch_get(devlink, true);
+}
+
+struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink)
+{
+ return __mlx5_devlink_eswitch_get(devlink, false);
+}
+
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
@@ -818,6 +832,8 @@ static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
vport->info.mig_enabled = MLX5_GET(cmd_hca_cap_2, hca_caps, migratable);
+
+ err = mlx5_esw_ipsec_vf_offload_get(esw->dev, vport);
out_free:
kfree(query_ctx);
return err;
@@ -882,16 +898,12 @@ static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
esw_vport_cleanup_acl(esw, vport);
}
-int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
+int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
enum mlx5_eswitch_vport_event enabled_events)
{
- struct mlx5_vport *vport;
+ u16 vport_num = vport->vport;
int ret;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
-
mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled);
@@ -904,6 +916,9 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
/* Sync with current vport context */
vport->enabled_events = enabled_events;
vport->enabled = true;
+ if (vport->vport != MLX5_VPORT_PF &&
+ (vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
+ esw->enabled_ipsec_vf_count++;
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
* in smartNIC as it's a vport group manager.
@@ -912,7 +927,7 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
(!vport_num && mlx5_core_is_ecpf(esw->dev)))
vport->info.trusted = true;
- if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
+ if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
ret = mlx5_esw_vport_vhca_id_set(esw, vport_num);
if (ret)
@@ -939,15 +954,12 @@ err_vhca_mapping:
return ret;
}
-void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
- struct mlx5_vport *vport;
-
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return;
+ u16 vport_num = vport->vport;
mutex_lock(&esw->state_lock);
+
if (!vport->enabled)
goto done;
@@ -957,12 +969,16 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
/* Disable events from this vport */
if (MLX5_CAP_GEN(esw->dev, log_max_l2_table))
- arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
+ arm_vport_context_events_cmd(esw->dev, vport_num, 0);
- if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
+ if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
mlx5_esw_vport_vhca_id_clear(esw, vport_num);
+ if (vport->vport != MLX5_VPORT_PF &&
+ (vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
+ esw->enabled_ipsec_vf_count--;
+
/* We don't assume VFs will cleanup after themselves.
* Calling vport change handler while vport is disabled will cleanup
* the vport resources.
@@ -1068,31 +1084,104 @@ static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw)
}
}
-/* Public E-Switch API */
-int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
- enum mlx5_eswitch_vport_event enabled_events)
+static int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ enum mlx5_eswitch_vport_event enabled_events)
{
int err;
- err = mlx5_esw_vport_enable(esw, vport_num, enabled_events);
+ err = mlx5_esw_vport_enable(esw, vport, enabled_events);
if (err)
return err;
- err = esw_offloads_load_rep(esw, vport_num);
+ err = mlx5_esw_offloads_load_rep(esw, vport);
if (err)
goto err_rep;
return err;
err_rep:
- mlx5_esw_vport_disable(esw, vport_num);
+ mlx5_esw_vport_disable(esw, vport);
+ return err;
+}
+
+static void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ mlx5_esw_offloads_unload_rep(esw, vport);
+ mlx5_esw_vport_disable(esw, vport);
+}
+
+static int mlx5_eswitch_load_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num,
+ enum mlx5_eswitch_vport_event enabled_events)
+{
+ struct mlx5_vport *vport;
+ int err;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ err = mlx5_esw_offloads_init_pf_vf_rep(esw, vport);
+ if (err)
+ return err;
+
+ err = mlx5_eswitch_load_vport(esw, vport, enabled_events);
+ if (err)
+ goto err_load;
+ return 0;
+
+err_load:
+ mlx5_esw_offloads_cleanup_pf_vf_rep(esw, vport);
+ return err;
+}
+
+static void mlx5_eswitch_unload_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return;
+
+ mlx5_eswitch_unload_vport(esw, vport);
+ mlx5_esw_offloads_cleanup_pf_vf_rep(esw, vport);
+}
+
+int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num,
+ enum mlx5_eswitch_vport_event enabled_events,
+ struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum)
+{
+ struct mlx5_vport *vport;
+ int err;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ err = mlx5_esw_offloads_init_sf_rep(esw, vport, dl_port, controller, sfnum);
+ if (err)
+ return err;
+
+ err = mlx5_eswitch_load_vport(esw, vport, enabled_events);
+ if (err)
+ goto err_load;
+
+ return 0;
+
+err_load:
+ mlx5_esw_offloads_cleanup_sf_rep(esw, vport);
return err;
}
-void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
- esw_offloads_unload_rep(esw, vport_num);
- mlx5_esw_vport_disable(esw, vport_num);
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return;
+
+ mlx5_eswitch_unload_vport(esw, vport);
+ mlx5_esw_offloads_cleanup_sf_rep(esw, vport);
}
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
@@ -1103,7 +1192,7 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
if (!vport->enabled)
continue;
- mlx5_eswitch_unload_vport(esw, vport->vport);
+ mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
}
}
@@ -1116,7 +1205,7 @@ static void mlx5_eswitch_unload_ec_vf_vports(struct mlx5_eswitch *esw,
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) {
if (!vport->enabled)
continue;
- mlx5_eswitch_unload_vport(esw, vport->vport);
+ mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
}
}
@@ -1128,7 +1217,7 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
int err;
mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
- err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
+ err = mlx5_eswitch_load_pf_vf_vport(esw, vport->vport, enabled_events);
if (err)
goto vf_err;
}
@@ -1148,7 +1237,7 @@ static int mlx5_eswitch_load_ec_vf_vports(struct mlx5_eswitch *esw, u16 num_ec_v
int err;
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) {
- err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
+ err = mlx5_eswitch_load_pf_vf_vport(esw, vport->vport, enabled_events);
if (err)
goto vf_err;
}
@@ -1190,7 +1279,7 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
int ret;
/* Enable PF vport */
- ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
+ ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF, enabled_events);
if (ret)
return ret;
@@ -1201,7 +1290,7 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
/* Enable ECPF vport */
if (mlx5_ecpf_vport_exists(esw->dev)) {
- ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
+ ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_ECPF, enabled_events);
if (ret)
goto ecpf_err;
if (mlx5_core_ec_sriov_enabled(esw->dev)) {
@@ -1224,11 +1313,11 @@ vf_err:
mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
ec_vf_err:
if (mlx5_ecpf_vport_exists(esw->dev))
- mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
+ mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
host_pf_disable_hca(esw->dev);
pf_hca_err:
- mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
+ mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
return ret;
}
@@ -1242,11 +1331,11 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
if (mlx5_ecpf_vport_exists(esw->dev)) {
if (mlx5_core_ec_sriov_enabled(esw->dev))
mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs);
- mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
+ mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
}
host_pf_disable_hca(esw->dev);
- mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
+ mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
}
static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
@@ -1919,6 +2008,12 @@ bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF);
}
+bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return vport_num == MLX5_VPORT_PF ||
+ mlx5_eswitch_is_vf_vport(esw, vport_num);
+}
+
bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);
@@ -2251,3 +2346,34 @@ struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw)
return mlx5_esw_allowed(esw) ? esw->dev : NULL;
}
EXPORT_SYMBOL(mlx5_eswitch_get_core_dev);
+
+bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+
+ if (!mlx5_esw_allowed(esw))
+ return true;
+
+ mutex_lock(&esw->state_lock);
+ if (esw->enabled_ipsec_vf_count) {
+ mutex_unlock(&esw->state_lock);
+ return false;
+ }
+
+ dev->num_ipsec_offloads++;
+ mutex_unlock(&esw->state_lock);
+ return true;
+}
+
+void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+
+ if (!mlx5_esw_allowed(esw))
+ /* Failure means no eswitch => core dev is not a PF */
+ return;
+
+ mutex_lock(&esw->state_lock);
+ dev->num_ipsec_offloads--;
+ mutex_unlock(&esw->state_lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index ae0dc8a3060d..37ab66e7b403 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -163,6 +163,8 @@ struct mlx5_vport_info {
u8 trusted: 1;
u8 roce_enabled: 1;
u8 mig_enabled: 1;
+ u8 ipsec_crypto_enabled: 1;
+ u8 ipsec_packet_enabled: 1;
};
/* Vport context events */
@@ -172,6 +174,29 @@ enum mlx5_eswitch_vport_event {
MLX5_VPORT_PROMISC_CHANGE = BIT(3),
};
+struct mlx5_vport;
+
+struct mlx5_devlink_port {
+ struct devlink_port dl_port;
+ struct mlx5_vport *vport;
+};
+
+static inline void mlx5_devlink_port_init(struct mlx5_devlink_port *dl_port,
+ struct mlx5_vport *vport)
+{
+ dl_port->vport = vport;
+}
+
+static inline struct mlx5_devlink_port *mlx5_devlink_port_get(struct devlink_port *dl_port)
+{
+ return container_of(dl_port, struct mlx5_devlink_port, dl_port);
+}
+
+static inline struct mlx5_vport *mlx5_devlink_port_vport_get(struct devlink_port *dl_port)
+{
+ return mlx5_devlink_port_get(dl_port)->vport;
+}
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
@@ -200,7 +225,7 @@ struct mlx5_vport {
bool enabled;
enum mlx5_eswitch_vport_event enabled_events;
int index;
- struct devlink_port *dl_port;
+ struct mlx5_devlink_port *dl_port;
};
struct mlx5_esw_indir_table;
@@ -254,6 +279,7 @@ struct mlx5_esw_offload {
struct mlx5_flow_group *vport_rx_group;
struct mlx5_flow_group *vport_rx_drop_group;
struct mlx5_flow_handle *vport_rx_drop_rule;
+ struct mlx5_flow_table *ft_ipsec_tx_pol;
struct xarray vport_reps;
struct list_head peer_flows[MLX5_MAX_PORTS];
struct mutex peer_mutex;
@@ -269,6 +295,7 @@ struct mlx5_esw_offload {
u8 inline_mode;
atomic64_t num_flows;
u64 num_block_encap;
+ u64 num_block_mode;
enum devlink_eswitch_encap_mode encap;
struct ida vport_metadata_ida;
unsigned int host_number; /* ECPF supports one external host */
@@ -354,6 +381,8 @@ struct mlx5_eswitch {
} params;
struct blocking_notifier_head n_head;
struct xarray paired;
+ struct mlx5_devcom_comp_dev *devcom;
+ u16 enabled_ipsec_vf_count;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -381,8 +410,9 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
-void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw);
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key);
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
+bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, const u8 *mac);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
@@ -531,6 +561,16 @@ int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enab
struct netlink_ext_ack *extack);
int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack);
+#ifdef CONFIG_XFRM_OFFLOAD
+int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack);
+#endif /* CONFIG_XFRM_OFFLOAD */
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
@@ -671,11 +711,16 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
(last) - 1)
-struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
+struct mlx5_eswitch *__must_check
+mlx5_devlink_eswitch_get(struct devlink *devlink);
+
+struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink);
+
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
+bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
@@ -685,9 +730,9 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
-int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
+int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
enum mlx5_eswitch_vport_event enabled_events);
-void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
@@ -725,31 +770,40 @@ void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
u16 vport,
struct mlx5_flow_spec *spec);
-int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
-void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+
+int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ struct mlx5_devlink_port *dl_port,
+ u32 controller, u32 sfnum);
+void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
-int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num);
-void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
-int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
- enum mlx5_eswitch_vport_event enabled_events);
-void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num,
+ enum mlx5_eswitch_vport_event enabled_events,
+ struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum);
+void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
-int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
-void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
-struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+
+int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ struct mlx5_devlink_port *dl_port,
+ u32 controller, u32 sfnum);
+void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
-int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
- u16 vport_num, u32 controller, u32 sfnum);
-void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
-int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
- u16 vport_num, u32 controller, u32 sfnum);
-void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
@@ -788,6 +842,9 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
+int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev);
+void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev);
+
static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
{
if (mlx5_esw_allowed(esw))
@@ -809,6 +866,24 @@ mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw)
return esw->fdb_table.offloads.slow_fdb;
}
+int mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
+ struct mlx5_esw_flow_attr *esw_attr, int attr_idx);
+bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev);
+void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev);
+bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev);
+int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev,
+ struct mlx5_vport *vport);
+int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num);
+int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable);
+int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable);
+int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num);
+void mlx5_esw_vport_ipsec_offload_enable(struct mlx5_eswitch *esw);
+void mlx5_esw_vport_ipsec_offload_disable(struct mlx5_eswitch *esw);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -816,8 +891,9 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
-static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {}
+static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key) {}
static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
+static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
@@ -866,6 +942,15 @@ static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
{
}
+
+static inline int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { return 0; }
+static inline void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) {}
+static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
+{
+ return false;
+}
+
+static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index e59380ee1ead..752fb0dfb111 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -375,7 +375,6 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- bool ignore_flow_lvl,
int *i)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
@@ -385,8 +384,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
return -EOPNOTSUPP;
for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
- if (ignore_flow_lvl)
- flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
@@ -424,10 +422,51 @@ esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 l
mlx5_chains_put_table(chains, chain, prio, level);
}
+static bool esw_same_vhca_id(struct mlx5_core_dev *mdev1, struct mlx5_core_dev *mdev2)
+{
+ return MLX5_CAP_GEN(mdev1, vhca_id) == MLX5_CAP_GEN(mdev2, vhca_id);
+}
+
+static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *esw_attr,
+ int attr_idx)
+{
+ if (esw->offloads.ft_ipsec_tx_pol &&
+ esw_attr->dests[attr_idx].rep &&
+ esw_attr->dests[attr_idx].rep->vport == MLX5_VPORT_UPLINK &&
+ /* To be aligned with software, encryption is needed only for tunnel device */
+ (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) &&
+ esw_attr->dests[attr_idx].rep != esw_attr->in_rep &&
+ esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev))
+ return true;
+
+ return false;
+}
+
+static bool esw_flow_dests_fwd_ipsec_check(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *esw_attr)
+{
+ int i;
+
+ if (!esw->offloads.ft_ipsec_tx_pol)
+ return true;
+
+ for (i = 0; i < esw_attr->split_count; i++)
+ if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i))
+ return false;
+
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+ if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i) &&
+ (esw_attr->out_count - esw_attr->split_count > 1))
+ return false;
+
+ return true;
+}
+
static void
-esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
- struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
- int attr_idx, int dest_idx, bool pkt_reformat)
+esw_setup_dest_fwd_vport(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+ int attr_idx, int dest_idx, bool pkt_reformat)
{
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
@@ -449,6 +488,33 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
}
}
+static void
+esw_setup_dest_fwd_ipsec(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+ int attr_idx, int dest_idx, bool pkt_reformat)
+{
+ dest[dest_idx].ft = esw->offloads.ft_ipsec_tx_pol;
+ dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ if (pkt_reformat &&
+ esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
+ }
+}
+
+static void
+esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+ int attr_idx, int dest_idx, bool pkt_reformat)
+{
+ if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx))
+ esw_setup_dest_fwd_ipsec(dest, flow_act, esw, esw_attr,
+ attr_idx, dest_idx, pkt_reformat);
+ else
+ esw_setup_dest_fwd_vport(dest, flow_act, esw, esw_attr,
+ attr_idx, dest_idx, pkt_reformat);
+}
+
static int
esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
@@ -469,6 +535,28 @@ esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
}
+static bool
+esw_dests_to_vf_pf_vports(struct mlx5_flow_destination *dests, int max_dest)
+{
+ bool vf_dest = false, pf_dest = false;
+ int i;
+
+ for (i = 0; i < max_dest; i++) {
+ if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
+ continue;
+
+ if (dests[i].vport.num == MLX5_VPORT_UPLINK)
+ pf_dest = true;
+ else
+ vf_dest = true;
+
+ if (vf_dest && pf_dest)
+ return true;
+ }
+
+ return false;
+}
+
static int
esw_setup_dests(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
@@ -501,7 +589,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
(*i)++;
} else if (esw_is_indir_table(esw, attr)) {
- err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i);
+ err = esw_setup_indir_table(dest, flow_act, esw, attr, i);
} else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
} else {
@@ -575,6 +663,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return ERR_PTR(-EOPNOTSUPP);
+ if (!esw_flow_dests_fwd_ipsec_check(esw, esw_attr))
+ return ERR_PTR(-EOPNOTSUPP);
+
dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
if (!dest)
return ERR_PTR(-ENOMEM);
@@ -602,6 +693,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
rule = ERR_PTR(err);
goto err_create_goto_table;
}
+
+ /* Header rewrite with combined wire+loopback in FDB is not allowed */
+ if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) &&
+ esw_dests_to_vf_pf_vports(dest, i)) {
+ esw_warn(esw->dev,
+ "FDB: Header rewrite with forwarding to both PF and VF is not allowed\n");
+ rule = ERR_PTR(-EINVAL);
+ goto err_esw_get;
+ }
}
if (esw_attr->decap_pkt_reformat)
@@ -884,6 +984,17 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (rep->vport == MLX5_VPORT_UPLINK && on_esw->offloads.ft_ipsec_tx_pol) {
+ dest.ft = on_esw->offloads.ft_ipsec_tx_pol;
+ flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ } else {
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport.num = rep->vport;
+ dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
+ dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ }
+
if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) &&
rep->vport == MLX5_VPORT_UPLINK)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
@@ -2390,7 +2501,7 @@ static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
-int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
+static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
@@ -2414,7 +2525,7 @@ err_reps:
return err;
}
-void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
+static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
@@ -2424,39 +2535,63 @@ void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
-int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
+int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
+ return 0;
+
+ return mlx5_esw_offloads_pf_vf_devlink_port_init(esw, vport);
+}
+
+void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
+ return;
+
+ mlx5_esw_offloads_pf_vf_devlink_port_cleanup(esw, vport);
+}
+
+int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ struct mlx5_devlink_port *dl_port,
+ u32 controller, u32 sfnum)
+{
+ return mlx5_esw_offloads_sf_devlink_port_init(esw, vport, dl_port, controller, sfnum);
+}
+
+void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ mlx5_esw_offloads_sf_devlink_port_cleanup(esw, vport);
+}
+
+int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
int err;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
- if (vport_num != MLX5_VPORT_UPLINK) {
- err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
- if (err)
- return err;
- }
+ err = mlx5_esw_offloads_devlink_port_register(esw, vport);
+ if (err)
+ return err;
- err = mlx5_esw_offloads_rep_load(esw, vport_num);
+ err = mlx5_esw_offloads_rep_load(esw, vport->vport);
if (err)
goto load_err;
return err;
load_err:
- if (vport_num != MLX5_VPORT_UPLINK)
- mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
+ mlx5_esw_offloads_devlink_port_unregister(esw, vport);
return err;
}
-void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
- mlx5_esw_offloads_rep_unload(esw, vport_num);
+ mlx5_esw_offloads_rep_unload(esw, vport->vport);
- if (vport_num != MLX5_VPORT_UPLINK)
- mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
+ mlx5_esw_offloads_devlink_port_unregister(esw, vport);
}
static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
@@ -2810,7 +2945,6 @@ static int mlx5_esw_offloads_devcom_event(int event,
void *event_data)
{
struct mlx5_eswitch *esw = my_data;
- struct mlx5_devcom *devcom = esw->dev->priv.devcom;
struct mlx5_eswitch *peer_esw = event_data;
u16 esw_i, peer_esw_i;
bool esw_paired;
@@ -2832,6 +2966,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
if (err)
goto err_out;
+
err = mlx5_esw_offloads_pair(esw, peer_esw);
if (err)
goto err_peer;
@@ -2850,7 +2985,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
esw->num_peers++;
peer_esw->num_peers++;
- mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
+ mlx5_devcom_comp_set_ready(esw->devcom, true);
break;
case ESW_OFFLOADS_DEVCOM_UNPAIR:
@@ -2860,7 +2995,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
peer_esw->num_peers--;
esw->num_peers--;
if (!esw->num_peers && !peer_esw->num_peers)
- mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
+ mlx5_devcom_comp_set_ready(esw->devcom, false);
xa_erase(&peer_esw->paired, esw_i);
xa_erase(&esw->paired, peer_esw_i);
mlx5_esw_offloads_unpair(peer_esw, esw);
@@ -2885,9 +3020,8 @@ err_out:
return err;
}
-void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
{
- struct mlx5_devcom *devcom = esw->dev->priv.devcom;
int i;
for (i = 0; i < MLX5_MAX_PORTS; i++)
@@ -2897,38 +3031,44 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return;
- if (!mlx5_lag_is_supported(esw->dev))
+ if ((MLX5_VPORT_MANAGER(esw->dev) || mlx5_core_is_ecpf_esw_manager(esw->dev)) &&
+ !mlx5_lag_is_supported(esw->dev))
return;
xa_init(&esw->paired);
- mlx5_devcom_register_component(devcom,
- MLX5_DEVCOM_ESW_OFFLOADS,
- mlx5_esw_offloads_devcom_event,
- esw);
-
esw->num_peers = 0;
- mlx5_devcom_send_event(devcom,
- MLX5_DEVCOM_ESW_OFFLOADS,
+ esw->devcom = mlx5_devcom_register_component(esw->dev->priv.devc,
+ MLX5_DEVCOM_ESW_OFFLOADS,
+ key,
+ mlx5_esw_offloads_devcom_event,
+ esw);
+ if (IS_ERR_OR_NULL(esw->devcom))
+ return;
+
+ mlx5_devcom_send_event(esw->devcom,
ESW_OFFLOADS_DEVCOM_PAIR,
- ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
+ ESW_OFFLOADS_DEVCOM_UNPAIR,
+ esw);
}
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
{
- struct mlx5_devcom *devcom = esw->dev->priv.devcom;
-
- if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
- return;
-
- if (!mlx5_lag_is_supported(esw->dev))
+ if (IS_ERR_OR_NULL(esw->devcom))
return;
- mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
+ mlx5_devcom_send_event(esw->devcom,
ESW_OFFLOADS_DEVCOM_UNPAIR,
- ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
+ ESW_OFFLOADS_DEVCOM_UNPAIR,
+ esw);
- mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ mlx5_devcom_unregister_component(esw->devcom);
xa_destroy(&esw->paired);
+ esw->devcom = NULL;
+}
+
+bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw)
+{
+ return mlx5_devcom_comp_is_ready(esw->devcom);
}
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
@@ -3355,7 +3495,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
/* Uplink vport rep must load first. */
- err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
+ err = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
if (err)
goto err_uplink;
@@ -3366,7 +3506,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
return 0;
err_vports:
- esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
+ mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK);
err_uplink:
esw_offloads_steering_cleanup(esw);
err_steering_init:
@@ -3404,7 +3544,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
void esw_offloads_disable(struct mlx5_eswitch *esw)
{
mlx5_eswitch_disable_pf_vf_vports(esw);
- esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
+ mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
mapping_destroy(esw->offloads.reg_c0_obj_pool);
@@ -3494,13 +3634,43 @@ static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
struct net *devl_net, *netdev_net;
struct mlx5_eswitch *esw;
- esw = mlx5_devlink_eswitch_get(devlink);
+ esw = mlx5_devlink_eswitch_nocheck_get(devlink);
netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
devl_net = devlink_net(devlink);
return net_eq(devl_net, netdev_net);
}
+int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ int err;
+
+ if (!mlx5_esw_allowed(esw))
+ return 0;
+
+ /* Take TC into account */
+ err = mlx5_esw_try_lock(esw);
+ if (err < 0)
+ return err;
+
+ esw->offloads.num_block_mode++;
+ mlx5_esw_unlock(esw);
+ return 0;
+}
+
+void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+
+ if (!mlx5_esw_allowed(esw))
+ return;
+
+ down_write(&esw->mode_lock);
+ esw->offloads.num_block_mode--;
+ up_write(&esw->mode_lock);
+}
+
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
@@ -3534,6 +3704,13 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (cur_mlx5_mode == mlx5_mode)
goto unlock;
+ if (esw->offloads.num_block_mode) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't change eswitch mode when IPsec SA and/or policies are configured");
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
mlx5_eswitch_disable_locked(esw);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
@@ -3693,38 +3870,28 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
- devl_lock(devlink);
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw)) {
- devl_unlock(devlink);
- /* Failure means no eswitch => not possible to change encap */
+ if (!mlx5_esw_allowed(esw))
return true;
- }
down_write(&esw->mode_lock);
if (esw->mode != MLX5_ESWITCH_LEGACY &&
esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
up_write(&esw->mode_lock);
- devl_unlock(devlink);
return false;
}
esw->offloads.num_block_encap++;
up_write(&esw->mode_lock);
- devl_unlock(devlink);
return true;
}
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw))
+ if (!mlx5_esw_allowed(esw))
return;
down_write(&esw->mode_lock);
@@ -3920,38 +4087,6 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
-int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
- u16 vport_num, u32 controller, u32 sfnum)
-{
- int err;
-
- err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
- if (err)
- return err;
-
- err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum);
- if (err)
- goto devlink_err;
-
- err = mlx5_esw_offloads_rep_load(esw, vport_num);
- if (err)
- goto rep_err;
- return 0;
-
-rep_err:
- mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
-devlink_err:
- mlx5_esw_vport_disable(esw, vport_num);
- return err;
-}
-
-void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
-{
- mlx5_esw_offloads_rep_unload(esw, vport_num);
- mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
- mlx5_esw_vport_disable(esw, vport_num);
-}
-
static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
{
int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
@@ -4040,35 +4175,12 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
-static bool
-is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
-{
- return vport_num == MLX5_VPORT_PF ||
- mlx5_eswitch_is_vf_vport(esw, vport_num) ||
- mlx5_esw_is_sf_vport(esw, vport_num);
-}
-
int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
u8 *hw_addr, int *hw_addr_len,
struct netlink_ext_ack *extack)
{
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- u16 vport_num;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw))
- return PTR_ERR(esw);
-
- vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
- if (!is_port_function_supported(esw, vport_num))
- return -EOPNOTSUPP;
-
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport)) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid port");
- return PTR_ERR(vport);
- }
+ struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
mutex_lock(&esw->state_lock);
ether_addr_copy(hw_addr, vport->info.mac);
@@ -4081,100 +4193,55 @@ int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
const u8 *hw_addr, int hw_addr_len,
struct netlink_ext_ack *extack)
{
- struct mlx5_eswitch *esw;
- u16 vport_num;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw)) {
- NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
- return PTR_ERR(esw);
- }
-
- vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
- if (!is_port_function_supported(esw, vport_num)) {
- NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
- return -EINVAL;
- }
+ struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
- return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr);
-}
-
-static struct mlx5_vport *
-mlx5_devlink_port_fn_get_vport(struct devlink_port *port, struct mlx5_eswitch *esw)
-{
- u16 vport_num;
-
- if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
- return ERR_PTR(-EOPNOTSUPP);
-
- vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
- if (!is_port_function_supported(esw, vport_num))
- return ERR_PTR(-EOPNOTSUPP);
-
- return mlx5_eswitch_get_vport(esw, vport_num);
+ return mlx5_eswitch_set_vport_mac(esw, vport->vport, hw_addr);
}
int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
struct netlink_ext_ack *extack)
{
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- int err = -EOPNOTSUPP;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw))
- return PTR_ERR(esw);
+ struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
if (!MLX5_CAP_GEN(esw->dev, migration)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
- return err;
+ return -EOPNOTSUPP;
}
- vport = mlx5_devlink_port_fn_get_vport(port, esw);
- if (IS_ERR(vport)) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid port");
- return PTR_ERR(vport);
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
+ return -EOPNOTSUPP;
}
mutex_lock(&esw->state_lock);
- if (vport->enabled) {
- *is_enabled = vport->info.mig_enabled;
- err = 0;
- }
+ *is_enabled = vport->info.mig_enabled;
mutex_unlock(&esw->state_lock);
- return err;
+ return 0;
}
int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack)
{
+ struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
void *query_ctx;
void *hca_caps;
- int err = -EOPNOTSUPP;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw))
- return PTR_ERR(esw);
+ int err;
if (!MLX5_CAP_GEN(esw->dev, migration)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
- return err;
+ return -EOPNOTSUPP;
}
- vport = mlx5_devlink_port_fn_get_vport(port, esw);
- if (IS_ERR(vport)) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid port");
- return PTR_ERR(vport);
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
+ return -EOPNOTSUPP;
}
mutex_lock(&esw->state_lock);
- if (!vport->enabled) {
- NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
- goto out;
- }
if (vport->info.mig_enabled == enable) {
err = 0;
@@ -4216,56 +4283,37 @@ out:
int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
struct netlink_ext_ack *extack)
{
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- int err = -EOPNOTSUPP;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw))
- return PTR_ERR(esw);
+ struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
- vport = mlx5_devlink_port_fn_get_vport(port, esw);
- if (IS_ERR(vport)) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid port");
- return PTR_ERR(vport);
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
+ return -EOPNOTSUPP;
}
mutex_lock(&esw->state_lock);
- if (vport->enabled) {
- *is_enabled = vport->info.roce_enabled;
- err = 0;
- }
+ *is_enabled = vport->info.roce_enabled;
mutex_unlock(&esw->state_lock);
- return err;
+ return 0;
}
int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack)
{
+ struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- int err = -EOPNOTSUPP;
+ u16 vport_num = vport->vport;
void *query_ctx;
void *hca_caps;
- u16 vport_num;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw))
- return PTR_ERR(esw);
+ int err;
- vport = mlx5_devlink_port_fn_get_vport(port, esw);
- if (IS_ERR(vport)) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid port");
- return PTR_ERR(vport);
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
+ return -EOPNOTSUPP;
}
- vport_num = vport->vport;
mutex_lock(&esw->state_lock);
- if (!vport->enabled) {
- NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
- goto out;
- }
if (vport->info.roce_enabled == enable) {
err = 0;
@@ -4303,3 +4351,188 @@ out:
mutex_unlock(&esw->state_lock);
return err;
}
+
+int
+mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
+ struct mlx5_esw_flow_attr *esw_attr, int attr_idx)
+{
+ struct mlx5_flow_destination new_dest = {};
+ struct mlx5_flow_destination old_dest = {};
+
+ if (!esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx))
+ return 0;
+
+ esw_setup_dest_fwd_ipsec(&old_dest, NULL, esw, esw_attr, attr_idx, 0, false);
+ esw_setup_dest_fwd_vport(&new_dest, NULL, esw, esw_attr, attr_idx, 0, false);
+
+ return mlx5_modify_rule_destination(rule, &new_dest, &old_dest);
+}
+
+#ifdef CONFIG_XFRM_OFFLOAD
+int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ int err = 0;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPSec crypto");
+ return -EOPNOTSUPP;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ *is_enabled = vport->info.ipsec_crypto_enabled;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ u16 vport_num;
+ int err;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ err = mlx5_esw_ipsec_vf_crypto_offload_supported(esw->dev, vport_num);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support IPsec crypto");
+ return err;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+ goto unlock;
+ }
+
+ if (vport->info.ipsec_crypto_enabled == enable)
+ goto unlock;
+
+ if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ err = mlx5_esw_ipsec_vf_crypto_offload_set(esw, vport, enable);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set IPsec crypto");
+ goto unlock;
+ }
+
+ vport->info.ipsec_crypto_enabled = enable;
+ if (enable)
+ esw->enabled_ipsec_vf_count++;
+ else
+ esw->enabled_ipsec_vf_count--;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ int err = 0;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet");
+ return -EOPNOTSUPP;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ *is_enabled = vport->info.ipsec_packet_enabled;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port,
+ bool enable,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ u16 vport_num;
+ int err;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ err = mlx5_esw_ipsec_vf_packet_offload_supported(esw->dev, vport_num);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support IPsec packet mode");
+ return err;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+ goto unlock;
+ }
+
+ if (vport->info.ipsec_packet_enabled == enable)
+ goto unlock;
+
+ if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ err = mlx5_esw_ipsec_vf_packet_offload_set(esw, vport, enable);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set IPsec packet mode");
+ goto unlock;
+ }
+
+ vport->info.ipsec_packet_enabled = enable;
+ if (enable)
+ esw->enabled_ipsec_vf_count++;
+ else
+ esw->enabled_ipsec_vf_count--;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+#endif /* CONFIG_XFRM_OFFLOAD */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 12abe991583a..c4de6bf8d1b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -445,7 +445,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
goto err_cqwq;
}
- err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn);
+ err = mlx5_comp_eqn_get(mdev, smp_processor_id(), &eqn);
if (err) {
kvfree(in);
goto err_cqwq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index 39c03dcbd196..e5c1012921d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -57,7 +57,7 @@ static const char * const mlx5_fpga_qp_error_strings[] = {
};
static struct mlx5_fpga_device *mlx5_fpga_device_alloc(void)
{
- struct mlx5_fpga_device *fdev = NULL;
+ struct mlx5_fpga_device *fdev;
fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
if (!fdev)
@@ -252,7 +252,7 @@ out:
int mlx5_fpga_init(struct mlx5_core_dev *mdev)
{
- struct mlx5_fpga_device *fdev = NULL;
+ struct mlx5_fpga_device *fdev;
if (!MLX5_CAP_GEN(mdev, fpga)) {
mlx5_core_dbg(mdev, "FPGA capability not present\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 244cfd470903..a4b925331661 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -975,6 +975,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
table_type = FS_FT_ESW_INGRESS_ACL;
break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC:
case MLX5_FLOW_NAMESPACE_RDMA_TX:
max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions);
table_type = FS_FT_RDMA_TX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 6b069fa411c5..a13b9c2bd144 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -107,7 +107,7 @@
LEFTOVERS_NUM_PRIOS)
#define KERNEL_RX_MACSEC_NUM_PRIOS 1
-#define KERNEL_RX_MACSEC_NUM_LEVELS 2
+#define KERNEL_RX_MACSEC_NUM_LEVELS 3
#define KERNEL_RX_MACSEC_MIN_LEVEL (BY_PASS_MIN_LEVEL + KERNEL_RX_MACSEC_NUM_PRIOS)
#define ETHTOOL_PRIO_NUM_LEVELS 1
@@ -224,6 +224,7 @@ static struct init_tree_node egress_root_fs = {
enum {
RDMA_RX_IPSEC_PRIO,
+ RDMA_RX_MACSEC_PRIO,
RDMA_RX_COUNTERS_PRIO,
RDMA_RX_BYPASS_PRIO,
RDMA_RX_KERNEL_PRIO,
@@ -237,9 +238,13 @@ enum {
#define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
#define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
+#define RDMA_RX_MACSEC_NUM_PRIOS 1
+#define RDMA_RX_MACSEC_PRIO_NUM_LEVELS 2
+#define RDMA_RX_MACSEC_MIN_LEVEL (RDMA_RX_COUNTERS_MIN_LEVEL + RDMA_RX_MACSEC_NUM_PRIOS)
+
static struct init_tree_node rdma_rx_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 4,
+ .ar_size = 5,
.children = (struct init_tree_node[]) {
[RDMA_RX_IPSEC_PRIO] =
ADD_PRIO(0, RDMA_RX_IPSEC_MIN_LEVEL, 0,
@@ -247,6 +252,12 @@ static struct init_tree_node rdma_rx_root_fs = {
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(RDMA_RX_IPSEC_NUM_PRIOS,
RDMA_RX_IPSEC_NUM_LEVELS))),
+ [RDMA_RX_MACSEC_PRIO] =
+ ADD_PRIO(0, RDMA_RX_MACSEC_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(RDMA_RX_MACSEC_NUM_PRIOS,
+ RDMA_RX_MACSEC_PRIO_NUM_LEVELS))),
[RDMA_RX_COUNTERS_PRIO] =
ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
FS_CHAINING_CAPS,
@@ -270,6 +281,7 @@ static struct init_tree_node rdma_rx_root_fs = {
enum {
RDMA_TX_COUNTERS_PRIO,
RDMA_TX_IPSEC_PRIO,
+ RDMA_TX_MACSEC_PRIO,
RDMA_TX_BYPASS_PRIO,
};
@@ -280,9 +292,13 @@ enum {
#define RDMA_TX_IPSEC_PRIO_NUM_LEVELS 1
#define RDMA_TX_IPSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_IPSEC_NUM_PRIOS)
+#define RDMA_TX_MACSEC_NUM_PRIOS 1
+#define RDMA_TX_MACESC_PRIO_NUM_LEVELS 1
+#define RDMA_TX_MACSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_MACSEC_NUM_PRIOS)
+
static struct init_tree_node rdma_tx_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 3,
+ .ar_size = 4,
.children = (struct init_tree_node[]) {
[RDMA_TX_COUNTERS_PRIO] =
ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
@@ -296,7 +312,12 @@ static struct init_tree_node rdma_tx_root_fs = {
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(RDMA_TX_IPSEC_NUM_PRIOS,
RDMA_TX_IPSEC_PRIO_NUM_LEVELS))),
-
+ [RDMA_TX_MACSEC_PRIO] =
+ ADD_PRIO(0, RDMA_TX_MACSEC_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(RDMA_TX_MACSEC_NUM_PRIOS,
+ RDMA_TX_MACESC_PRIO_NUM_LEVELS))),
[RDMA_TX_BYPASS_PRIO] =
ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
FS_CHAINING_CAPS_RDMA_TX,
@@ -1122,7 +1143,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
}
for (i = 0; i < handle->num_rules; i++) {
- if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
+ if (mlx5_flow_dests_cmp(old_dest, &handle->rule[i]->dest_attr))
return _mlx5_modify_rule_destination(handle->rule[i],
new_dest);
}
@@ -2466,6 +2487,14 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
root_ns = steering->rdma_tx_root_ns;
prio = RDMA_TX_IPSEC_PRIO;
break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC:
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_MACSEC_PRIO;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC:
+ root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_MACSEC_PRIO;
+ break;
default: /* Must be NIC RX */
WARN_ON(!is_nic_rx_ns(type));
root_ns = steering->root_ns;
@@ -3050,6 +3079,12 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
if (err)
goto out_err;
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_CRYPTO_INGRESS, 3);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
+ goto out_err;
+ }
+
err = create_fdb_fast_path(steering);
if (err)
goto out_err;
@@ -3072,6 +3107,12 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
goto out_err;
}
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_CRYPTO_EGRESS, 3);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
+ goto out_err;
+ }
+
/* We put this priority last, knowing that nothing will get here
* unless explicitly forwarded to. This is possible because the
* slow path tables have catch all rules and nothing gets passed
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index fb2035a5ec99..58f4c0d0fafa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -143,90 +143,86 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
int err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
if (MLX5_CAP_GEN(dev, port_selection_cap)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_PORT_SELECTION);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_PORT_SELECTION, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, hca_cap_2)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_GENERAL_2, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+ HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_IPOIB_ENHANCED_OFFLOADS);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
+ HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, pg)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ODP, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, atomic)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ATOMIC, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, roce)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ROCE, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, nic_flow_table) ||
MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_FLOW_TABLE, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_ESWITCH_MANAGER(dev)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
- if (err)
- return err;
- }
-
- if (MLX5_CAP_GEN(dev, vector_calc)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_VECTOR_CALC);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ESWITCH, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, qos)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_QOS);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_QOS, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, debug))
- mlx5_core_get_caps(dev, MLX5_CAP_DEBUG);
+ mlx5_core_get_caps_mode(dev, MLX5_CAP_DEBUG, HCA_CAP_OPMOD_GET_CUR);
if (MLX5_CAP_GEN(dev, pcam_reg))
mlx5_get_pcam_reg(dev);
if (MLX5_CAP_GEN(dev, mcam_reg)) {
mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_FIRST_128);
- mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9080_0x90FF);
mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9100_0x917F);
}
@@ -234,57 +230,52 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
mlx5_get_qcam_reg(dev);
if (MLX5_CAP_GEN(dev, device_memory)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_MEM);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_DEV_MEM, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, event_cap)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_EVENT);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_DEV_EVENT, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, tls_tx) || MLX5_CAP_GEN(dev, tls_rx)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_TLS, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN_64(dev, general_obj_types) &
MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_VDPA_EMULATION);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_VDPA_EMULATION, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, ipsec_offload)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_IPSEC);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_IPSEC, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, crypto)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_CRYPTO);
- if (err)
- return err;
- }
-
- if (MLX5_CAP_GEN(dev, shampo)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_SHAMPO);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_CRYPTO, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN_64(dev, general_obj_types) &
MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_MACSEC);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_MACSEC, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, adv_virtualization)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ADV_VIRTUALIZATION);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ADV_VIRTUALIZATION,
+ HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 4804990b7f22..e87766f91150 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -127,17 +127,23 @@ static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state))
goto out;
+ if (!reset_state)
+ return 0;
+
switch (reset_state) {
case MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION:
case MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS:
- NL_SET_ERR_MSG_MOD(extack, "Sync reset was already triggered");
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset still in progress");
return -EBUSY;
- case MLX5_MFRL_REG_RESET_STATE_TIMEOUT:
- NL_SET_ERR_MSG_MOD(extack, "Sync reset got timeout");
+ case MLX5_MFRL_REG_RESET_STATE_NEG_TIMEOUT:
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset negotiation timeout");
return -ETIMEDOUT;
case MLX5_MFRL_REG_RESET_STATE_NACK:
NL_SET_ERR_MSG_MOD(extack, "One of the hosts disabled reset");
return -EPERM;
+ case MLX5_MFRL_REG_RESET_STATE_UNLOAD_TIMEOUT:
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset unload timeout");
+ return -ETIMEDOUT;
}
out:
@@ -151,7 +157,7 @@ int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
- int err;
+ int err, rst_res;
set_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
@@ -164,13 +170,34 @@ int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
return 0;
clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
- if (err == -EREMOTEIO && MLX5_CAP_MCAM_FEATURE(dev, reset_state))
- return mlx5_fw_reset_get_reset_state_err(dev, extack);
+ if (err == -EREMOTEIO && MLX5_CAP_MCAM_FEATURE(dev, reset_state)) {
+ rst_res = mlx5_fw_reset_get_reset_state_err(dev, extack);
+ return rst_res ? rst_res : err;
+ }
NL_SET_ERR_MSG_MOD(extack, "Sync reset command failed");
return mlx5_cmd_check(dev, err, in, out);
}
+int mlx5_fw_reset_verify_fw_complete(struct mlx5_core_dev *dev,
+ struct netlink_ext_ack *extack)
+{
+ u8 rst_state;
+ int err;
+
+ err = mlx5_fw_reset_get_reset_state_err(dev, extack);
+ if (err)
+ return err;
+
+ rst_state = mlx5_get_fw_rst_state(dev);
+ if (!rst_state)
+ return 0;
+
+ mlx5_core_err(dev, "Sync reset did not complete, state=%d\n", rst_state);
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset did not complete successfully");
+ return rst_state;
+}
+
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
{
return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
index c57465595f7c..ea527d06a85f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
@@ -12,6 +12,8 @@ int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
+int mlx5_fw_reset_verify_fw_complete(struct mlx5_core_dev *dev,
+ struct netlink_ext_ack *extack);
void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev);
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 187cb2c464f8..2fb2598b775e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -50,20 +50,6 @@ enum {
};
enum {
- MLX5_HEALTH_SYNDR_FW_ERR = 0x1,
- MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7,
- MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR = 0x8,
- MLX5_HEALTH_SYNDR_CRC_ERR = 0x9,
- MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa,
- MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb,
- MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc,
- MLX5_HEALTH_SYNDR_EQ_ERR = 0xd,
- MLX5_HEALTH_SYNDR_EQ_INV = 0xe,
- MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf,
- MLX5_HEALTH_SYNDR_HIGH_TEMP = 0x10
-};
-
-enum {
MLX5_DROP_HEALTH_WORK,
};
@@ -357,27 +343,27 @@ static int mlx5_health_try_recover(struct mlx5_core_dev *dev)
static const char *hsynd_str(u8 synd)
{
switch (synd) {
- case MLX5_HEALTH_SYNDR_FW_ERR:
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR:
return "firmware internal error";
- case MLX5_HEALTH_SYNDR_IRISC_ERR:
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC:
return "irisc not responding";
- case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR:
return "unrecoverable hardware error";
- case MLX5_HEALTH_SYNDR_CRC_ERR:
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR:
return "firmware CRC error";
- case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR:
return "ICM fetch PCI error";
- case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR:
return "HW fatal error\n";
- case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN:
return "async EQ buffer overrun";
- case MLX5_HEALTH_SYNDR_EQ_ERR:
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR:
return "EQ error";
- case MLX5_HEALTH_SYNDR_EQ_INV:
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV:
return "Invalid EQ referenced";
- case MLX5_HEALTH_SYNDR_FFSER_ERR:
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR:
return "FFSER error";
- case MLX5_HEALTH_SYNDR_HIGH_TEMP:
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR:
return "High temperature";
default:
return "unrecognized error";
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
new file mode 100644
index 000000000000..353f81dccd1c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+
+#include <linux/hwmon.h>
+#include <linux/bitmap.h>
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/port.h>
+#include "mlx5_core.h"
+#include "hwmon.h"
+
+#define CHANNELS_TYPE_NUM 2 /* chip channel and temp channel */
+#define CHIP_CONFIG_NUM 1
+
+/* module 0 is mapped to sensor_index 64 in MTMP register */
+#define to_mtmp_module_sensor_idx(idx) (64 + (idx))
+
+/* All temperatures retrieved in units of 0.125C. hwmon framework expect
+ * it in units of millidegrees C. Hence multiply values by 125.
+ */
+#define mtmp_temp_to_mdeg(temp) ((temp) * 125)
+
+struct temp_channel_desc {
+ u32 sensor_index;
+ char sensor_name[32];
+};
+
+/* chip_channel_config and channel_info arrays must be 0-terminated, hence + 1 */
+struct mlx5_hwmon {
+ struct mlx5_core_dev *mdev;
+ struct device *hwmon_dev;
+ struct hwmon_channel_info chip_info;
+ u32 chip_channel_config[CHIP_CONFIG_NUM + 1];
+ struct hwmon_channel_info temp_info;
+ u32 *temp_channel_config;
+ const struct hwmon_channel_info *channel_info[CHANNELS_TYPE_NUM + 1];
+ struct hwmon_chip_info chip;
+ struct temp_channel_desc *temp_channel_desc;
+ u32 asic_platform_scount;
+ u32 module_scount;
+};
+
+static int mlx5_hwmon_query_mtmp(struct mlx5_core_dev *mdev, u32 sensor_index, u32 *mtmp_out)
+{
+ u32 mtmp_in[MLX5_ST_SZ_DW(mtmp_reg)] = {};
+
+ MLX5_SET(mtmp_reg, mtmp_in, sensor_index, sensor_index);
+
+ return mlx5_core_access_reg(mdev, mtmp_in, sizeof(mtmp_in),
+ mtmp_out, MLX5_ST_SZ_BYTES(mtmp_reg),
+ MLX5_REG_MTMP, 0, 0);
+}
+
+static int mlx5_hwmon_reset_max_temp(struct mlx5_core_dev *mdev, int sensor_index)
+{
+ u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)] = {};
+ u32 mtmp_in[MLX5_ST_SZ_DW(mtmp_reg)] = {};
+
+ MLX5_SET(mtmp_reg, mtmp_in, sensor_index, sensor_index);
+ MLX5_SET(mtmp_reg, mtmp_in, mtr, 1);
+
+ return mlx5_core_access_reg(mdev, mtmp_in, sizeof(mtmp_in),
+ mtmp_out, sizeof(mtmp_out),
+ MLX5_REG_MTMP, 0, 0);
+}
+
+static int mlx5_hwmon_enable_max_temp(struct mlx5_core_dev *mdev, int sensor_index)
+{
+ u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)] = {};
+ u32 mtmp_in[MLX5_ST_SZ_DW(mtmp_reg)] = {};
+ int err;
+
+ err = mlx5_hwmon_query_mtmp(mdev, sensor_index, mtmp_in);
+ if (err)
+ return err;
+
+ MLX5_SET(mtmp_reg, mtmp_in, mte, 1);
+ return mlx5_core_access_reg(mdev, mtmp_in, sizeof(mtmp_in),
+ mtmp_out, sizeof(mtmp_out),
+ MLX5_REG_MTMP, 0, 1);
+}
+
+static int mlx5_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct mlx5_hwmon *hwmon = dev_get_drvdata(dev);
+ u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)] = {};
+ int err;
+
+ if (type != hwmon_temp)
+ return -EOPNOTSUPP;
+
+ err = mlx5_hwmon_query_mtmp(hwmon->mdev, hwmon->temp_channel_desc[channel].sensor_index,
+ mtmp_out);
+ if (err)
+ return err;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = mtmp_temp_to_mdeg(MLX5_GET(mtmp_reg, mtmp_out, temperature));
+ return 0;
+ case hwmon_temp_highest:
+ *val = mtmp_temp_to_mdeg(MLX5_GET(mtmp_reg, mtmp_out, max_temperature));
+ return 0;
+ case hwmon_temp_crit:
+ *val = mtmp_temp_to_mdeg(MLX5_GET(mtmp_reg, mtmp_out, temp_threshold_hi));
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct mlx5_hwmon *hwmon = dev_get_drvdata(dev);
+
+ if (type != hwmon_temp || attr != hwmon_temp_reset_history)
+ return -EOPNOTSUPP;
+
+ return mlx5_hwmon_reset_max_temp(hwmon->mdev,
+ hwmon->temp_channel_desc[channel].sensor_index);
+}
+
+static umode_t mlx5_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_highest:
+ case hwmon_temp_crit:
+ case hwmon_temp_label:
+ return 0444;
+ case hwmon_temp_reset_history:
+ return 0200;
+ default:
+ return 0;
+ }
+}
+
+static int mlx5_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ struct mlx5_hwmon *hwmon = dev_get_drvdata(dev);
+
+ if (type != hwmon_temp || attr != hwmon_temp_label)
+ return -EOPNOTSUPP;
+
+ *str = (const char *)hwmon->temp_channel_desc[channel].sensor_name;
+ return 0;
+}
+
+static const struct hwmon_ops mlx5_hwmon_ops = {
+ .read = mlx5_hwmon_read,
+ .read_string = mlx5_hwmon_read_string,
+ .is_visible = mlx5_hwmon_is_visible,
+ .write = mlx5_hwmon_write,
+};
+
+static int mlx5_hwmon_init_channels_names(struct mlx5_hwmon *hwmon)
+{
+ u32 i;
+
+ for (i = 0; i < hwmon->asic_platform_scount + hwmon->module_scount; i++) {
+ u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)] = {};
+ char *sensor_name;
+ int err;
+
+ err = mlx5_hwmon_query_mtmp(hwmon->mdev, hwmon->temp_channel_desc[i].sensor_index,
+ mtmp_out);
+ if (err)
+ return err;
+
+ sensor_name = MLX5_ADDR_OF(mtmp_reg, mtmp_out, sensor_name_hi);
+ if (!*sensor_name) {
+ snprintf(hwmon->temp_channel_desc[i].sensor_name,
+ sizeof(hwmon->temp_channel_desc[i].sensor_name), "sensor%u",
+ hwmon->temp_channel_desc[i].sensor_index);
+ continue;
+ }
+
+ memcpy(&hwmon->temp_channel_desc[i].sensor_name, sensor_name,
+ MLX5_FLD_SZ_BYTES(mtmp_reg, sensor_name_hi) +
+ MLX5_FLD_SZ_BYTES(mtmp_reg, sensor_name_lo));
+ }
+
+ return 0;
+}
+
+static int mlx5_hwmon_get_module_sensor_index(struct mlx5_core_dev *mdev, u32 *module_index)
+{
+ int module_num;
+ int err;
+
+ err = mlx5_query_module_num(mdev, &module_num);
+ if (err)
+ return err;
+
+ *module_index = to_mtmp_module_sensor_idx(module_num);
+
+ return 0;
+}
+
+static int mlx5_hwmon_init_sensors_indexes(struct mlx5_hwmon *hwmon, u64 sensor_map)
+{
+ DECLARE_BITMAP(smap, BITS_PER_TYPE(sensor_map));
+ unsigned long bit_pos;
+ int err = 0;
+ int i = 0;
+
+ bitmap_from_u64(smap, sensor_map);
+
+ for_each_set_bit(bit_pos, smap, BITS_PER_TYPE(sensor_map)) {
+ hwmon->temp_channel_desc[i].sensor_index = bit_pos;
+ i++;
+ }
+
+ if (hwmon->module_scount)
+ err = mlx5_hwmon_get_module_sensor_index(hwmon->mdev,
+ &hwmon->temp_channel_desc[i].sensor_index);
+
+ return err;
+}
+
+static void mlx5_hwmon_channel_info_init(struct mlx5_hwmon *hwmon)
+{
+ int i;
+
+ hwmon->channel_info[0] = &hwmon->chip_info;
+ hwmon->channel_info[1] = &hwmon->temp_info;
+
+ hwmon->chip_channel_config[0] = HWMON_C_REGISTER_TZ;
+ hwmon->chip_info.config = (const u32 *)hwmon->chip_channel_config;
+ hwmon->chip_info.type = hwmon_chip;
+
+ for (i = 0; i < hwmon->asic_platform_scount + hwmon->module_scount; i++)
+ hwmon->temp_channel_config[i] = HWMON_T_INPUT | HWMON_T_HIGHEST | HWMON_T_CRIT |
+ HWMON_T_RESET_HISTORY | HWMON_T_LABEL;
+
+ hwmon->temp_info.config = (const u32 *)hwmon->temp_channel_config;
+ hwmon->temp_info.type = hwmon_temp;
+}
+
+static int mlx5_hwmon_is_module_mon_cap(struct mlx5_core_dev *mdev, bool *mon_cap)
+{
+ u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)];
+ u32 module_index;
+ int err;
+
+ err = mlx5_hwmon_get_module_sensor_index(mdev, &module_index);
+ if (err)
+ return err;
+
+ err = mlx5_hwmon_query_mtmp(mdev, module_index, mtmp_out);
+ if (err)
+ return err;
+
+ if (MLX5_GET(mtmp_reg, mtmp_out, temperature))
+ *mon_cap = true;
+
+ return 0;
+}
+
+static int mlx5_hwmon_get_sensors_count(struct mlx5_core_dev *mdev, u32 *asic_platform_scount)
+{
+ u32 mtcap_out[MLX5_ST_SZ_DW(mtcap_reg)] = {};
+ u32 mtcap_in[MLX5_ST_SZ_DW(mtcap_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(mdev, mtcap_in, sizeof(mtcap_in),
+ mtcap_out, sizeof(mtcap_out),
+ MLX5_REG_MTCAP, 0, 0);
+ if (err)
+ return err;
+
+ *asic_platform_scount = MLX5_GET(mtcap_reg, mtcap_out, sensor_count);
+
+ return 0;
+}
+
+static void mlx5_hwmon_free(struct mlx5_hwmon *hwmon)
+{
+ if (!hwmon)
+ return;
+
+ kfree(hwmon->temp_channel_config);
+ kfree(hwmon->temp_channel_desc);
+ kfree(hwmon);
+}
+
+static struct mlx5_hwmon *mlx5_hwmon_alloc(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_hwmon *hwmon;
+ bool mon_cap = false;
+ u32 sensors_count;
+ int err;
+
+ hwmon = kzalloc(sizeof(*mdev->hwmon), GFP_KERNEL);
+ if (!hwmon)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5_hwmon_get_sensors_count(mdev, &hwmon->asic_platform_scount);
+ if (err)
+ goto err_free_hwmon;
+
+ /* check if module sensor has thermal mon cap. if yes, allocate channel desc for it */
+ err = mlx5_hwmon_is_module_mon_cap(mdev, &mon_cap);
+ if (err)
+ goto err_free_hwmon;
+
+ hwmon->module_scount = mon_cap ? 1 : 0;
+ sensors_count = hwmon->asic_platform_scount + hwmon->module_scount;
+ hwmon->temp_channel_desc = kcalloc(sensors_count, sizeof(*hwmon->temp_channel_desc),
+ GFP_KERNEL);
+ if (!hwmon->temp_channel_desc) {
+ err = -ENOMEM;
+ goto err_free_hwmon;
+ }
+
+ /* sensors configuration values array, must be 0-terminated hence, + 1 */
+ hwmon->temp_channel_config = kcalloc(sensors_count + 1, sizeof(*hwmon->temp_channel_config),
+ GFP_KERNEL);
+ if (!hwmon->temp_channel_config) {
+ err = -ENOMEM;
+ goto err_free_temp_channel_desc;
+ }
+
+ hwmon->mdev = mdev;
+
+ return hwmon;
+
+err_free_temp_channel_desc:
+ kfree(hwmon->temp_channel_desc);
+err_free_hwmon:
+ kfree(hwmon);
+ return ERR_PTR(err);
+}
+
+static int mlx5_hwmon_dev_init(struct mlx5_hwmon *hwmon)
+{
+ u32 mtcap_out[MLX5_ST_SZ_DW(mtcap_reg)] = {};
+ u32 mtcap_in[MLX5_ST_SZ_DW(mtcap_reg)] = {};
+ int err;
+ int i;
+
+ err = mlx5_core_access_reg(hwmon->mdev, mtcap_in, sizeof(mtcap_in),
+ mtcap_out, sizeof(mtcap_out),
+ MLX5_REG_MTCAP, 0, 0);
+ if (err)
+ return err;
+
+ mlx5_hwmon_channel_info_init(hwmon);
+ mlx5_hwmon_init_sensors_indexes(hwmon, MLX5_GET64(mtcap_reg, mtcap_out, sensor_map));
+ err = mlx5_hwmon_init_channels_names(hwmon);
+ if (err)
+ return err;
+
+ for (i = 0; i < hwmon->asic_platform_scount + hwmon->module_scount; i++) {
+ err = mlx5_hwmon_enable_max_temp(hwmon->mdev,
+ hwmon->temp_channel_desc[i].sensor_index);
+ if (err)
+ return err;
+ }
+
+ hwmon->chip.ops = &mlx5_hwmon_ops;
+ hwmon->chip.info = (const struct hwmon_channel_info **)hwmon->channel_info;
+
+ return 0;
+}
+
+int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev)
+{
+ struct device *dev = mdev->device;
+ struct mlx5_hwmon *hwmon;
+ int err;
+
+ if (!MLX5_CAP_MCAM_REG(mdev, mtmp))
+ return 0;
+
+ hwmon = mlx5_hwmon_alloc(mdev);
+ if (IS_ERR(hwmon))
+ return PTR_ERR(hwmon);
+
+ err = mlx5_hwmon_dev_init(hwmon);
+ if (err)
+ goto err_free_hwmon;
+
+ hwmon->hwmon_dev = hwmon_device_register_with_info(dev, "mlx5",
+ hwmon,
+ &hwmon->chip,
+ NULL);
+ if (IS_ERR(hwmon->hwmon_dev)) {
+ err = PTR_ERR(hwmon->hwmon_dev);
+ goto err_free_hwmon;
+ }
+
+ mdev->hwmon = hwmon;
+ return 0;
+
+err_free_hwmon:
+ mlx5_hwmon_free(hwmon);
+ return err;
+}
+
+void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_hwmon *hwmon = mdev->hwmon;
+
+ if (!hwmon)
+ return;
+
+ hwmon_device_unregister(hwmon->hwmon_dev);
+ mlx5_hwmon_free(hwmon);
+ mdev->hwmon = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
new file mode 100644
index 000000000000..999654a9b9da
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+ * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+#ifndef __MLX5_HWMON_H__
+#define __MLX5_HWMON_H__
+
+#include <linux/mlx5/driver.h>
+
+#if IS_ENABLED(CONFIG_HWMON)
+
+int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev);
+void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev);
+
+#else
+static inline int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev)
+{
+ return 0;
+}
+
+static inline void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev) {}
+
+#endif
+
+#endif /* __MLX5_HWMON_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index fa467335526e..047d5fed5f89 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -156,67 +156,57 @@ unlock:
return least_loaded_irq;
}
-void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs,
- int num_irqs)
+void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
{
struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
- int i;
-
- for (i = 0; i < num_irqs; i++) {
- int cpu = cpumask_first(mlx5_irq_get_affinity_mask(irqs[i]));
+ int cpu;
- synchronize_irq(pci_irq_vector(pool->dev->pdev,
- mlx5_irq_get_index(irqs[i])));
- if (mlx5_irq_put(irqs[i]))
- if (pool->irqs_per_cpu)
- cpu_put(pool, cpu);
- }
+ cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
+ synchronize_irq(pci_irq_vector(pool->dev->pdev,
+ mlx5_irq_get_index(irq)));
+ if (mlx5_irq_put(irq))
+ if (pool->irqs_per_cpu)
+ cpu_put(pool, cpu);
}
/**
- * mlx5_irq_affinity_irqs_request_auto - request one or more IRQs for mlx5 device.
- * @dev: mlx5 device that is requesting the IRQs.
- * @nirqs: number of IRQs to request.
- * @irqs: an output array of IRQs pointers.
+ * mlx5_irq_affinity_irq_request_auto - request one IRQ for mlx5 device.
+ * @dev: mlx5 device that is requesting the IRQ.
+ * @used_cpus: cpumask of bounded cpus by the device
+ * @vecidx: vector index to request an IRQ for.
*
* Each IRQ is bounded to at most 1 CPU.
- * This function is requesting IRQs according to the default assignment.
+ * This function is requesting an IRQ according to the default assignment.
* The default assignment policy is:
- * - in each iteration, request the least loaded IRQ which is not bound to any
+ * - request the least loaded IRQ which is not bound to any
* CPU of the previous IRQs requested.
*
- * This function returns the number of IRQs requested, (which might be smaller than
- * @nirqs), if successful, or a negative error code in case of an error.
+ * On success, this function updates used_cpus mask and returns an irq pointer.
+ * In case of an error, an appropriate error pointer is returned.
*/
-int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
- struct mlx5_irq **irqs)
+struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
+ struct cpumask *used_cpus, u16 vecidx)
{
struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
struct irq_affinity_desc af_desc = {};
struct mlx5_irq *irq;
- int i = 0;
+
+ if (!mlx5_irq_pool_is_sf_pool(pool))
+ return ERR_PTR(-ENOENT);
af_desc.is_managed = 1;
cpumask_copy(&af_desc.mask, cpu_online_mask);
- for (i = 0; i < nirqs; i++) {
- if (mlx5_irq_pool_is_sf_pool(pool))
- irq = mlx5_irq_affinity_request(pool, &af_desc);
- else
- /* In case SF pool doesn't exists, fallback to the PF IRQs.
- * The PF IRQs are already allocated and binded to CPU
- * at this point. Hence, only an index is needed.
- */
- irq = mlx5_irq_request(dev, i, NULL, NULL);
- if (IS_ERR(irq))
- break;
- irqs[i] = irq;
- cpumask_clear_cpu(cpumask_first(mlx5_irq_get_affinity_mask(irq)), &af_desc.mask);
- mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
- pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
- cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
- mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
- }
- if (!i)
- return PTR_ERR(irq);
- return i;
+ cpumask_andnot(&af_desc.mask, &af_desc.mask, used_cpus);
+ irq = mlx5_irq_affinity_request(pool, &af_desc);
+
+ if (IS_ERR(irq))
+ return irq;
+
+ cpumask_or(used_cpus, used_cpus, mlx5_irq_get_affinity_mask(irq));
+ mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
+ pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
+ cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
+ mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
+
+ return irq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index f0a074b2fcdf..af3fac090b82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -835,7 +835,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
dev = ldev->pf[MLX5_LAG_P1].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
- mlx5_devcom_comp_is_ready(dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
+ mlx5_esw_offloads_devcom_is_ready(dev->priv.eswitch) &&
MLX5_CAP_ESW(dev, esw_shared_ingress_acl) &&
mlx5_eswitch_get_npeers(dev->priv.eswitch) == MLX5_CAP_GEN(dev, num_lag_ports) - 1)
return true;
@@ -1268,16 +1268,6 @@ recheck:
mlx5_ldev_put(ldev);
}
-bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
-{
- if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
- !MLX5_CAP_GEN(dev, lag_master) ||
- MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
- MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
- return false;
- return true;
-}
-
void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index a061b1873e27..481e92f39fe6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -74,8 +74,6 @@ struct mlx5_lag {
struct lag_mpesw lag_mpesw;
};
-bool mlx5_lag_is_supported(struct mlx5_core_dev *dev);
-
static inline struct mlx5_lag *
mlx5_lag_dev(struct mlx5_core_dev *dev)
{
@@ -115,4 +113,14 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev);
int mlx5_deactivate_lag(struct mlx5_lag *ldev);
void mlx5_lag_add_devices(struct mlx5_lag *ldev);
+static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
+{
+ if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+ !MLX5_CAP_GEN(dev, lag_master) ||
+ MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
+ MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
+ return false;
+ return true;
+}
+
#endif /* __MLX5_LAG_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
index 5a80fb7dbbca..40c7be124041 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -81,7 +81,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
int inlen, eqn;
int err;
- err = mlx5_vector2eqn(mdev, 0, &eqn);
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index 78c94b22bdc0..00e67910e3ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -2,214 +2,274 @@
/* Copyright (c) 2018 Mellanox Technologies */
#include <linux/mlx5/vport.h>
+#include <linux/list.h>
#include "lib/devcom.h"
#include "mlx5_core.h"
-static LIST_HEAD(devcom_list);
+static LIST_HEAD(devcom_dev_list);
+static LIST_HEAD(devcom_comp_list);
+/* protect device list */
+static DEFINE_MUTEX(dev_list_lock);
+/* protect component list */
+static DEFINE_MUTEX(comp_list_lock);
-#define devcom_for_each_component(priv, comp, iter) \
- for (iter = 0; \
- comp = &(priv)->components[iter], iter < MLX5_DEVCOM_NUM_COMPONENTS; \
- iter++)
+#define devcom_for_each_component(iter) \
+ list_for_each_entry(iter, &devcom_comp_list, comp_list)
-struct mlx5_devcom_component {
- struct {
- void __rcu *data;
- } device[MLX5_DEVCOM_PORTS_SUPPORTED];
+struct mlx5_devcom_dev {
+ struct list_head list;
+ struct mlx5_core_dev *dev;
+ struct kref ref;
+};
+struct mlx5_devcom_comp {
+ struct list_head comp_list;
+ enum mlx5_devcom_component id;
+ u64 key;
+ struct list_head comp_dev_list_head;
mlx5_devcom_event_handler_t handler;
- struct rw_semaphore sem;
+ struct kref ref;
bool ready;
+ struct rw_semaphore sem;
};
-struct mlx5_devcom_list {
+struct mlx5_devcom_comp_dev {
struct list_head list;
-
- struct mlx5_devcom_component components[MLX5_DEVCOM_NUM_COMPONENTS];
- struct mlx5_core_dev *devs[MLX5_DEVCOM_PORTS_SUPPORTED];
+ struct mlx5_devcom_comp *comp;
+ struct mlx5_devcom_dev *devc;
+ void __rcu *data;
};
-struct mlx5_devcom {
- struct mlx5_devcom_list *priv;
- int idx;
-};
-
-static struct mlx5_devcom_list *mlx5_devcom_list_alloc(void)
+static bool devcom_dev_exists(struct mlx5_core_dev *dev)
{
- struct mlx5_devcom_component *comp;
- struct mlx5_devcom_list *priv;
- int i;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return NULL;
+ struct mlx5_devcom_dev *iter;
- devcom_for_each_component(priv, comp, i)
- init_rwsem(&comp->sem);
+ list_for_each_entry(iter, &devcom_dev_list, list)
+ if (iter->dev == dev)
+ return true;
- return priv;
+ return false;
}
-static struct mlx5_devcom *mlx5_devcom_alloc(struct mlx5_devcom_list *priv,
- u8 idx)
+static struct mlx5_devcom_dev *
+mlx5_devcom_dev_alloc(struct mlx5_core_dev *dev)
{
- struct mlx5_devcom *devcom;
+ struct mlx5_devcom_dev *devc;
- devcom = kzalloc(sizeof(*devcom), GFP_KERNEL);
- if (!devcom)
+ devc = kzalloc(sizeof(*devc), GFP_KERNEL);
+ if (!devc)
return NULL;
- devcom->priv = priv;
- devcom->idx = idx;
- return devcom;
+ devc->dev = dev;
+ kref_init(&devc->ref);
+ return devc;
}
-/* Must be called with intf_mutex held */
-struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
+struct mlx5_devcom_dev *
+mlx5_devcom_register_device(struct mlx5_core_dev *dev)
{
- struct mlx5_devcom_list *priv = NULL, *iter;
- struct mlx5_devcom *devcom = NULL;
- bool new_priv = false;
- u64 sguid0, sguid1;
- int idx, i;
-
- if (!mlx5_core_is_pf(dev))
- return NULL;
- if (MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_DEVCOM_PORTS_SUPPORTED)
- return NULL;
-
- mlx5_dev_list_lock();
- sguid0 = mlx5_query_nic_system_image_guid(dev);
- list_for_each_entry(iter, &devcom_list, list) {
- /* There is at least one device in iter */
- struct mlx5_core_dev *tmp_dev;
-
- idx = -1;
- for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) {
- if (iter->devs[i])
- tmp_dev = iter->devs[i];
- else
- idx = i;
- }
-
- if (idx == -1)
- continue;
-
- sguid1 = mlx5_query_nic_system_image_guid(tmp_dev);
- if (sguid0 != sguid1)
- continue;
-
- priv = iter;
- break;
- }
+ struct mlx5_devcom_dev *devc;
- if (!priv) {
- priv = mlx5_devcom_list_alloc();
- if (!priv) {
- devcom = ERR_PTR(-ENOMEM);
- goto out;
- }
+ mutex_lock(&dev_list_lock);
- idx = 0;
- new_priv = true;
+ if (devcom_dev_exists(dev)) {
+ devc = ERR_PTR(-EEXIST);
+ goto out;
}
- priv->devs[idx] = dev;
- devcom = mlx5_devcom_alloc(priv, idx);
- if (!devcom) {
- if (new_priv)
- kfree(priv);
- devcom = ERR_PTR(-ENOMEM);
+ devc = mlx5_devcom_dev_alloc(dev);
+ if (!devc) {
+ devc = ERR_PTR(-ENOMEM);
goto out;
}
- if (new_priv)
- list_add(&priv->list, &devcom_list);
+ list_add_tail(&devc->list, &devcom_dev_list);
out:
- mlx5_dev_list_unlock();
- return devcom;
+ mutex_unlock(&dev_list_lock);
+ return devc;
}
-/* Must be called with intf_mutex held */
-void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
+static void
+mlx5_devcom_dev_release(struct kref *ref)
{
- struct mlx5_devcom_list *priv;
- int i;
+ struct mlx5_devcom_dev *devc = container_of(ref, struct mlx5_devcom_dev, ref);
- if (IS_ERR_OR_NULL(devcom))
- return;
+ mutex_lock(&dev_list_lock);
+ list_del(&devc->list);
+ mutex_unlock(&dev_list_lock);
+ kfree(devc);
+}
- mlx5_dev_list_lock();
- priv = devcom->priv;
- priv->devs[devcom->idx] = NULL;
+void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc)
+{
+ if (!IS_ERR_OR_NULL(devc))
+ kref_put(&devc->ref, mlx5_devcom_dev_release);
+}
- kfree(devcom);
+static struct mlx5_devcom_comp *
+mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler)
+{
+ struct mlx5_devcom_comp *comp;
- for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
- if (priv->devs[i])
- break;
+ comp = kzalloc(sizeof(*comp), GFP_KERNEL);
+ if (!comp)
+ return ERR_PTR(-ENOMEM);
- if (i != MLX5_DEVCOM_PORTS_SUPPORTED)
- goto out;
+ comp->id = id;
+ comp->key = key;
+ comp->handler = handler;
+ init_rwsem(&comp->sem);
+ kref_init(&comp->ref);
+ INIT_LIST_HEAD(&comp->comp_dev_list_head);
- list_del(&priv->list);
- kfree(priv);
-out:
- mlx5_dev_list_unlock();
+ return comp;
}
-void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id,
- mlx5_devcom_event_handler_t handler,
- void *data)
+static void
+mlx5_devcom_comp_release(struct kref *ref)
{
- struct mlx5_devcom_component *comp;
+ struct mlx5_devcom_comp *comp = container_of(ref, struct mlx5_devcom_comp, ref);
- if (IS_ERR_OR_NULL(devcom))
- return;
+ mutex_lock(&comp_list_lock);
+ list_del(&comp->comp_list);
+ mutex_unlock(&comp_list_lock);
+ kfree(comp);
+}
+
+static struct mlx5_devcom_comp_dev *
+devcom_alloc_comp_dev(struct mlx5_devcom_dev *devc,
+ struct mlx5_devcom_comp *comp,
+ void *data)
+{
+ struct mlx5_devcom_comp_dev *devcom;
- WARN_ON(!data);
+ devcom = kzalloc(sizeof(*devcom), GFP_KERNEL);
+ if (!devcom)
+ return ERR_PTR(-ENOMEM);
+
+ kref_get(&devc->ref);
+ devcom->devc = devc;
+ devcom->comp = comp;
+ rcu_assign_pointer(devcom->data, data);
- comp = &devcom->priv->components[id];
down_write(&comp->sem);
- comp->handler = handler;
- rcu_assign_pointer(comp->device[devcom->idx].data, data);
+ list_add_tail(&devcom->list, &comp->comp_dev_list_head);
up_write(&comp->sem);
+
+ return devcom;
}
-void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id)
+static void
+devcom_free_comp_dev(struct mlx5_devcom_comp_dev *devcom)
{
- struct mlx5_devcom_component *comp;
-
- if (IS_ERR_OR_NULL(devcom))
- return;
+ struct mlx5_devcom_comp *comp = devcom->comp;
- comp = &devcom->priv->components[id];
down_write(&comp->sem);
- RCU_INIT_POINTER(comp->device[devcom->idx].data, NULL);
+ list_del(&devcom->list);
up_write(&comp->sem);
- synchronize_rcu();
+
+ kref_put(&devcom->devc->ref, mlx5_devcom_dev_release);
+ kfree(devcom);
+ kref_put(&comp->ref, mlx5_devcom_comp_release);
}
-int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id,
+static bool
+devcom_component_equal(struct mlx5_devcom_comp *devcom,
+ enum mlx5_devcom_component id,
+ u64 key)
+{
+ return devcom->id == id && devcom->key == key;
+}
+
+static struct mlx5_devcom_comp *
+devcom_component_get(struct mlx5_devcom_dev *devc,
+ enum mlx5_devcom_component id,
+ u64 key,
+ mlx5_devcom_event_handler_t handler)
+{
+ struct mlx5_devcom_comp *comp;
+
+ devcom_for_each_component(comp) {
+ if (devcom_component_equal(comp, id, key)) {
+ if (handler == comp->handler) {
+ kref_get(&comp->ref);
+ return comp;
+ }
+
+ mlx5_core_err(devc->dev,
+ "Cannot register existing devcom component with different handler\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ return NULL;
+}
+
+struct mlx5_devcom_comp_dev *
+mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
+ enum mlx5_devcom_component id,
+ u64 key,
+ mlx5_devcom_event_handler_t handler,
+ void *data)
+{
+ struct mlx5_devcom_comp_dev *devcom;
+ struct mlx5_devcom_comp *comp;
+
+ if (IS_ERR_OR_NULL(devc))
+ return NULL;
+
+ mutex_lock(&comp_list_lock);
+ comp = devcom_component_get(devc, id, key, handler);
+ if (IS_ERR(comp)) {
+ devcom = ERR_PTR(-EINVAL);
+ goto out_unlock;
+ }
+
+ if (!comp) {
+ comp = mlx5_devcom_comp_alloc(id, key, handler);
+ if (IS_ERR(comp)) {
+ devcom = ERR_CAST(comp);
+ goto out_unlock;
+ }
+ list_add_tail(&comp->comp_list, &devcom_comp_list);
+ }
+ mutex_unlock(&comp_list_lock);
+
+ devcom = devcom_alloc_comp_dev(devc, comp, data);
+ if (IS_ERR(devcom))
+ kref_put(&comp->ref, mlx5_devcom_comp_release);
+
+ return devcom;
+
+out_unlock:
+ mutex_unlock(&comp_list_lock);
+ return devcom;
+}
+
+void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom)
+{
+ if (!IS_ERR_OR_NULL(devcom))
+ devcom_free_comp_dev(devcom);
+}
+
+int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
int event, int rollback_event,
void *event_data)
{
- struct mlx5_devcom_component *comp;
- int err = -ENODEV, i;
+ struct mlx5_devcom_comp_dev *pos;
+ struct mlx5_devcom_comp *comp;
+ int err = 0;
+ void *data;
if (IS_ERR_OR_NULL(devcom))
- return err;
+ return -ENODEV;
- comp = &devcom->priv->components[id];
+ comp = devcom->comp;
down_write(&comp->sem);
- for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) {
- void *data = rcu_dereference_protected(comp->device[i].data,
- lockdep_is_held(&comp->sem));
+ list_for_each_entry(pos, &comp->comp_dev_list_head, list) {
+ data = rcu_dereference_protected(pos->data, lockdep_is_held(&comp->sem));
- if (i != devcom->idx && data) {
+ if (pos != devcom && data) {
err = comp->handler(event, data, event_data);
if (err)
goto rollback;
@@ -220,48 +280,43 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
return 0;
rollback:
- while (i--) {
- void *data = rcu_dereference_protected(comp->device[i].data,
- lockdep_is_held(&comp->sem));
+ if (list_entry_is_head(pos, &comp->comp_dev_list_head, list))
+ goto out;
+ pos = list_prev_entry(pos, list);
+ list_for_each_entry_from_reverse(pos, &comp->comp_dev_list_head, list) {
+ data = rcu_dereference_protected(pos->data, lockdep_is_held(&comp->sem));
- if (i != devcom->idx && data)
+ if (pos != devcom && data)
comp->handler(rollback_event, data, event_data);
}
-
+out:
up_write(&comp->sem);
return err;
}
-void mlx5_devcom_comp_set_ready(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id,
- bool ready)
+void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready)
{
- struct mlx5_devcom_component *comp;
-
- comp = &devcom->priv->components[id];
- WARN_ON(!rwsem_is_locked(&comp->sem));
+ WARN_ON(!rwsem_is_locked(&devcom->comp->sem));
- WRITE_ONCE(comp->ready, ready);
+ WRITE_ONCE(devcom->comp->ready, ready);
}
-bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id)
+bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom)
{
if (IS_ERR_OR_NULL(devcom))
return false;
- return READ_ONCE(devcom->priv->components[id].ready);
+ return READ_ONCE(devcom->comp->ready);
}
-bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id)
+bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom)
{
- struct mlx5_devcom_component *comp;
+ struct mlx5_devcom_comp *comp;
if (IS_ERR_OR_NULL(devcom))
return false;
- comp = &devcom->priv->components[id];
+ comp = devcom->comp;
down_read(&comp->sem);
if (!READ_ONCE(comp->ready)) {
up_read(&comp->sem);
@@ -271,74 +326,60 @@ bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom,
return true;
}
-void mlx5_devcom_for_each_peer_end(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id)
+void mlx5_devcom_for_each_peer_end(struct mlx5_devcom_comp_dev *devcom)
{
- struct mlx5_devcom_component *comp = &devcom->priv->components[id];
-
- up_read(&comp->sem);
+ up_read(&devcom->comp->sem);
}
-void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id,
- int *i)
+void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom_comp_dev *devcom,
+ struct mlx5_devcom_comp_dev **pos)
{
- struct mlx5_devcom_component *comp;
- void *ret;
- int idx;
+ struct mlx5_devcom_comp *comp = devcom->comp;
+ struct mlx5_devcom_comp_dev *tmp;
+ void *data;
- comp = &devcom->priv->components[id];
+ tmp = list_prepare_entry(*pos, &comp->comp_dev_list_head, list);
- if (*i == MLX5_DEVCOM_PORTS_SUPPORTED)
- return NULL;
- for (idx = *i; idx < MLX5_DEVCOM_PORTS_SUPPORTED; idx++) {
- if (idx != devcom->idx) {
- ret = rcu_dereference_protected(comp->device[idx].data,
- lockdep_is_held(&comp->sem));
- if (ret)
+ list_for_each_entry_continue(tmp, &comp->comp_dev_list_head, list) {
+ if (tmp != devcom) {
+ data = rcu_dereference_protected(tmp->data, lockdep_is_held(&comp->sem));
+ if (data)
break;
}
}
- if (idx == MLX5_DEVCOM_PORTS_SUPPORTED) {
- *i = idx;
+ if (list_entry_is_head(tmp, &comp->comp_dev_list_head, list))
return NULL;
- }
- *i = idx + 1;
- return ret;
+ *pos = tmp;
+ return data;
}
-void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id,
- int *i)
+void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
+ struct mlx5_devcom_comp_dev **pos)
{
- struct mlx5_devcom_component *comp;
- void *ret;
- int idx;
+ struct mlx5_devcom_comp *comp = devcom->comp;
+ struct mlx5_devcom_comp_dev *tmp;
+ void *data;
- comp = &devcom->priv->components[id];
+ tmp = list_prepare_entry(*pos, &comp->comp_dev_list_head, list);
- if (*i == MLX5_DEVCOM_PORTS_SUPPORTED)
- return NULL;
- for (idx = *i; idx < MLX5_DEVCOM_PORTS_SUPPORTED; idx++) {
- if (idx != devcom->idx) {
+ list_for_each_entry_continue(tmp, &comp->comp_dev_list_head, list) {
+ if (tmp != devcom) {
/* This can change concurrently, however 'data' pointer will remain
* valid for the duration of RCU read section.
*/
if (!READ_ONCE(comp->ready))
return NULL;
- ret = rcu_dereference(comp->device[idx].data);
- if (ret)
+ data = rcu_dereference(tmp->data);
+ if (data)
break;
}
}
- if (idx == MLX5_DEVCOM_PORTS_SUPPORTED) {
- *i = idx;
+ if (list_entry_is_head(tmp, &comp->comp_dev_list_head, list))
return NULL;
- }
- *i = idx + 1;
- return ret;
+ *pos = tmp;
+ return data;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index d953a01b8eaa..8389ac0af708 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -6,11 +6,8 @@
#include <linux/mlx5/driver.h>
-#define MLX5_DEVCOM_PORTS_SUPPORTED 4
-
-enum mlx5_devcom_components {
+enum mlx5_devcom_component {
MLX5_DEVCOM_ESW_OFFLOADS,
-
MLX5_DEVCOM_NUM_COMPONENTS,
};
@@ -18,45 +15,40 @@ typedef int (*mlx5_devcom_event_handler_t)(int event,
void *my_data,
void *event_data);
-struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev);
-void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom);
+struct mlx5_devcom_dev *mlx5_devcom_register_device(struct mlx5_core_dev *dev);
+void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc);
-void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id,
- mlx5_devcom_event_handler_t handler,
- void *data);
-void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id);
+struct mlx5_devcom_comp_dev *
+mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
+ enum mlx5_devcom_component id,
+ u64 key,
+ mlx5_devcom_event_handler_t handler,
+ void *data);
+void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom);
-int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id,
+int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
int event, int rollback_event,
void *event_data);
-void mlx5_devcom_comp_set_ready(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id,
- bool ready);
-bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id);
-
-bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id);
-void mlx5_devcom_for_each_peer_end(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id);
-void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id, int *i);
-
-#define mlx5_devcom_for_each_peer_entry(devcom, id, data, i) \
- for (i = 0, data = mlx5_devcom_get_next_peer_data(devcom, id, &i); \
- data; \
- data = mlx5_devcom_get_next_peer_data(devcom, id, &i))
-
-void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id, int *i);
-
-#define mlx5_devcom_for_each_peer_entry_rcu(devcom, id, data, i) \
- for (i = 0, data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i); \
- data; \
- data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i))
-
-#endif
+void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready);
+bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom);
+
+bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom);
+void mlx5_devcom_for_each_peer_end(struct mlx5_devcom_comp_dev *devcom);
+void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom_comp_dev *devcom,
+ struct mlx5_devcom_comp_dev **pos);
+
+#define mlx5_devcom_for_each_peer_entry(devcom, data, pos) \
+ for (pos = NULL, data = mlx5_devcom_get_next_peer_data(devcom, &pos); \
+ data; \
+ data = mlx5_devcom_get_next_peer_data(devcom, &pos))
+
+void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
+ struct mlx5_devcom_comp_dev **pos);
+
+#define mlx5_devcom_for_each_peer_entry_rcu(devcom, data, pos) \
+ for (pos = NULL, data = mlx5_devcom_get_next_peer_data_rcu(devcom, &pos); \
+ data; \
+ data = mlx5_devcom_get_next_peer_data_rcu(devcom, &pos))
+
+#endif /* __LIB_MLX5_DEVCOM_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index d3d628b862f3..69a75459775d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -104,6 +104,6 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
#endif
-int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
+int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
index 4047629a876b..30564d9b00e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
@@ -40,7 +40,7 @@ struct mlx5_hv_vhca_agent {
struct mlx5_hv_vhca *mlx5_hv_vhca_create(struct mlx5_core_dev *dev)
{
- struct mlx5_hv_vhca *hv_vhca = NULL;
+ struct mlx5_hv_vhca *hv_vhca;
hv_vhca = kzalloc(sizeof(*hv_vhca), GFP_KERNEL);
if (!hv_vhca)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
new file mode 100644
index 000000000000..4a078113e292
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
@@ -0,0 +1,2411 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <net/macsec.h>
+#include <linux/mlx5/qp.h>
+#include <linux/if_vlan.h>
+#include <linux/mlx5/fs_helpers.h>
+#include <linux/mlx5/macsec.h>
+#include "fs_core.h"
+#include "lib/macsec_fs.h"
+#include "mlx5_core.h"
+
+/* MACsec TX flow steering */
+#define CRYPTO_NUM_MAXSEC_FTE BIT(15)
+#define CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE 1
+
+#define TX_CRYPTO_TABLE_LEVEL 0
+#define TX_CRYPTO_TABLE_NUM_GROUPS 3
+#define TX_CRYPTO_TABLE_MKE_GROUP_SIZE 1
+#define TX_CRYPTO_TABLE_SA_GROUP_SIZE \
+ (CRYPTO_NUM_MAXSEC_FTE - (TX_CRYPTO_TABLE_MKE_GROUP_SIZE + \
+ CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE))
+#define TX_CHECK_TABLE_LEVEL 1
+#define TX_CHECK_TABLE_NUM_FTE 2
+#define RX_CRYPTO_TABLE_LEVEL 0
+#define RX_CHECK_TABLE_LEVEL 1
+#define RX_ROCE_TABLE_LEVEL 2
+#define RX_CHECK_TABLE_NUM_FTE 3
+#define RX_ROCE_TABLE_NUM_FTE 2
+#define RX_CRYPTO_TABLE_NUM_GROUPS 3
+#define RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE \
+ ((CRYPTO_NUM_MAXSEC_FTE - CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE) / 2)
+#define RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE \
+ (CRYPTO_NUM_MAXSEC_FTE - RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE)
+#define RX_NUM_OF_RULES_PER_SA 2
+
+#define RDMA_RX_ROCE_IP_TABLE_LEVEL 0
+#define RDMA_RX_ROCE_MACSEC_OP_TABLE_LEVEL 1
+
+#define MLX5_MACSEC_TAG_LEN 8 /* SecTAG length with ethertype and without the optional SCI */
+#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK 0x23
+#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET 0x8
+#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET 0x5
+#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT (0x1 << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET)
+#define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8
+#define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN)
+
+/* MACsec RX flow steering */
+#define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E
+
+/* MACsec fs_id handling for steering */
+#define macsec_fs_set_tx_fs_id(fs_id) (MLX5_ETH_WQE_FT_META_MACSEC | (fs_id) << 2)
+#define macsec_fs_set_rx_fs_id(fs_id) ((fs_id) | BIT(30))
+
+struct mlx5_sectag_header {
+ __be16 ethertype;
+ u8 tci_an;
+ u8 sl;
+ u32 pn;
+ u8 sci[MACSEC_SCI_LEN]; /* optional */
+} __packed;
+
+struct mlx5_roce_macsec_tx_rule {
+ u32 fs_id;
+ u16 gid_idx;
+ struct list_head entry;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_modify_hdr *meta_modhdr;
+};
+
+struct mlx5_macsec_tx_rule {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ u32 fs_id;
+};
+
+struct mlx5_macsec_flow_table {
+ int num_groups;
+ struct mlx5_flow_table *t;
+ struct mlx5_flow_group **g;
+};
+
+struct mlx5_macsec_tables {
+ struct mlx5_macsec_flow_table ft_crypto;
+ struct mlx5_flow_handle *crypto_miss_rule;
+
+ struct mlx5_flow_table *ft_check;
+ struct mlx5_flow_group *ft_check_group;
+ struct mlx5_fc *check_miss_rule_counter;
+ struct mlx5_flow_handle *check_miss_rule;
+ struct mlx5_fc *check_rule_counter;
+
+ u32 refcnt;
+};
+
+struct mlx5_fs_id {
+ u32 id;
+ refcount_t refcnt;
+ sci_t sci;
+ struct rhash_head hash;
+};
+
+struct mlx5_macsec_device {
+ struct list_head macsec_devices_list_entry;
+ void *macdev;
+ struct xarray tx_id_xa;
+ struct xarray rx_id_xa;
+};
+
+struct mlx5_macsec_tx {
+ struct mlx5_flow_handle *crypto_mke_rule;
+ struct mlx5_flow_handle *check_rule;
+
+ struct ida tx_halloc;
+
+ struct mlx5_macsec_tables tables;
+
+ struct mlx5_flow_table *ft_rdma_tx;
+};
+
+struct mlx5_roce_macsec_rx_rule {
+ u32 fs_id;
+ u16 gid_idx;
+ struct mlx5_flow_handle *op;
+ struct mlx5_flow_handle *ip;
+ struct list_head entry;
+};
+
+struct mlx5_macsec_rx_rule {
+ struct mlx5_flow_handle *rule[RX_NUM_OF_RULES_PER_SA];
+ struct mlx5_modify_hdr *meta_modhdr;
+};
+
+struct mlx5_macsec_miss {
+ struct mlx5_flow_group *g;
+ struct mlx5_flow_handle *rule;
+};
+
+struct mlx5_macsec_rx_roce {
+ /* Flow table/rules in NIC domain, to check if it's a RoCE packet */
+ struct mlx5_flow_group *g;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_modify_hdr *copy_modify_hdr;
+ struct mlx5_macsec_miss nic_miss;
+
+ /* Flow table/rule in RDMA domain, to check dgid */
+ struct mlx5_flow_table *ft_ip_check;
+ struct mlx5_flow_table *ft_macsec_op_check;
+ struct mlx5_macsec_miss miss;
+};
+
+struct mlx5_macsec_rx {
+ struct mlx5_flow_handle *check_rule[2];
+ struct mlx5_pkt_reformat *check_rule_pkt_reformat[2];
+
+ struct mlx5_macsec_tables tables;
+ struct mlx5_macsec_rx_roce roce;
+};
+
+union mlx5_macsec_rule {
+ struct mlx5_macsec_tx_rule tx_rule;
+ struct mlx5_macsec_rx_rule rx_rule;
+};
+
+static const struct rhashtable_params rhash_sci = {
+ .key_len = sizeof_field(struct mlx5_fs_id, sci),
+ .key_offset = offsetof(struct mlx5_fs_id, sci),
+ .head_offset = offsetof(struct mlx5_fs_id, hash),
+ .automatic_shrinking = true,
+ .min_size = 1,
+};
+
+static const struct rhashtable_params rhash_fs_id = {
+ .key_len = sizeof_field(struct mlx5_fs_id, id),
+ .key_offset = offsetof(struct mlx5_fs_id, id),
+ .head_offset = offsetof(struct mlx5_fs_id, hash),
+ .automatic_shrinking = true,
+ .min_size = 1,
+};
+
+struct mlx5_macsec_fs {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_macsec_tx *tx_fs;
+ struct mlx5_macsec_rx *rx_fs;
+
+ /* Stats manage */
+ struct mlx5_macsec_stats stats;
+
+ /* Tx sci -> fs id mapping handling */
+ struct rhashtable sci_hash; /* sci -> mlx5_fs_id */
+
+ /* RX fs_id -> mlx5_fs_id mapping handling */
+ struct rhashtable fs_id_hash; /* fs_id -> mlx5_fs_id */
+
+ /* TX & RX fs_id lists per macsec device */
+ struct list_head macsec_devices_list;
+};
+
+static void macsec_fs_destroy_groups(struct mlx5_macsec_flow_table *ft)
+{
+ int i;
+
+ for (i = ft->num_groups - 1; i >= 0; i--) {
+ if (!IS_ERR_OR_NULL(ft->g[i]))
+ mlx5_destroy_flow_group(ft->g[i]);
+ ft->g[i] = NULL;
+ }
+ ft->num_groups = 0;
+}
+
+static void macsec_fs_destroy_flow_table(struct mlx5_macsec_flow_table *ft)
+{
+ macsec_fs_destroy_groups(ft);
+ kfree(ft->g);
+ mlx5_destroy_flow_table(ft->t);
+ ft->t = NULL;
+}
+
+static void macsec_fs_tx_destroy(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_macsec_tables *tx_tables;
+
+ if (mlx5_is_macsec_roce_supported(macsec_fs->mdev))
+ mlx5_destroy_flow_table(tx_fs->ft_rdma_tx);
+
+ tx_tables = &tx_fs->tables;
+
+ /* Tx check table */
+ if (tx_fs->check_rule) {
+ mlx5_del_flow_rules(tx_fs->check_rule);
+ tx_fs->check_rule = NULL;
+ }
+
+ if (tx_tables->check_miss_rule) {
+ mlx5_del_flow_rules(tx_tables->check_miss_rule);
+ tx_tables->check_miss_rule = NULL;
+ }
+
+ if (tx_tables->ft_check_group) {
+ mlx5_destroy_flow_group(tx_tables->ft_check_group);
+ tx_tables->ft_check_group = NULL;
+ }
+
+ if (tx_tables->ft_check) {
+ mlx5_destroy_flow_table(tx_tables->ft_check);
+ tx_tables->ft_check = NULL;
+ }
+
+ /* Tx crypto table */
+ if (tx_fs->crypto_mke_rule) {
+ mlx5_del_flow_rules(tx_fs->crypto_mke_rule);
+ tx_fs->crypto_mke_rule = NULL;
+ }
+
+ if (tx_tables->crypto_miss_rule) {
+ mlx5_del_flow_rules(tx_tables->crypto_miss_rule);
+ tx_tables->crypto_miss_rule = NULL;
+ }
+
+ macsec_fs_destroy_flow_table(&tx_tables->ft_crypto);
+}
+
+static int macsec_fs_tx_create_crypto_table_groups(struct mlx5_macsec_flow_table *ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(TX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+ in = kvzalloc(inlen, GFP_KERNEL);
+
+ if (!in) {
+ kfree(ft->g);
+ ft->g = NULL;
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ /* Flow Group for MKE match */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += TX_CRYPTO_TABLE_MKE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for SA rules */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
+ MLX5_SET(fte_match_param, mc, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_MACSEC_MASK);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += TX_CRYPTO_TABLE_SA_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for l2 traps */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ kvfree(in);
+
+ return err;
+}
+
+static struct mlx5_flow_table
+ *macsec_fs_auto_group_table_create(struct mlx5_flow_namespace *ns, int flags,
+ int level, int max_fte)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *fdb = NULL;
+
+ /* reserve entry for the match all miss group and rule */
+ ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.prio = 0;
+ ft_attr.flags = flags;
+ ft_attr.level = level;
+ ft_attr.max_fte = max_fte;
+
+ fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+
+ return fdb;
+}
+
+enum {
+ RDMA_TX_MACSEC_LEVEL = 0,
+};
+
+static int macsec_fs_tx_roce_create(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ if (!mlx5_is_macsec_roce_supported(mdev)) {
+ mlx5_core_dbg(mdev, "Failed to init RoCE MACsec, capabilities not supported\n");
+ return 0;
+ }
+
+ ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC);
+ if (!ns)
+ return -ENOMEM;
+
+ /* Tx RoCE crypto table */
+ ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_TX_MACSEC_LEVEL, CRYPTO_NUM_MAXSEC_FTE);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Failed to create MACsec RoCE Tx crypto table err(%d)\n", err);
+ return err;
+ }
+ tx_fs->ft_rdma_tx = ft;
+
+ return 0;
+}
+
+static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_macsec_tables *tx_tables;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_macsec_flow_table *ft_crypto;
+ struct mlx5_flow_table *flow_table;
+ struct mlx5_flow_group *flow_group;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ u32 *flow_group_in;
+ int err;
+
+ ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
+ if (!ns)
+ return -ENOMEM;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in) {
+ err = -ENOMEM;
+ goto out_spec;
+ }
+
+ tx_tables = &tx_fs->tables;
+ ft_crypto = &tx_tables->ft_crypto;
+
+ /* Tx crypto table */
+ ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ ft_attr.level = TX_CRYPTO_TABLE_LEVEL;
+ ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
+
+ flow_table = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ mlx5_core_err(mdev, "Failed to create MACsec Tx crypto table err(%d)\n", err);
+ goto out_flow_group;
+ }
+ ft_crypto->t = flow_table;
+
+ /* Tx crypto table groups */
+ err = macsec_fs_tx_create_crypto_table_groups(ft_crypto);
+ if (err) {
+ mlx5_core_err(mdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+
+ /* Tx crypto table MKE rule - MKE packets shouldn't be offloaded */
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_PAE);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add MACsec TX MKE rule, err=%d\n", err);
+ goto err;
+ }
+ tx_fs->crypto_mke_rule = rule;
+
+ /* Tx crypto table Default miss rule */
+ memset(&flow_act, 0, sizeof(flow_act));
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add MACsec Tx table default miss rule %d\n", err);
+ goto err;
+ }
+ tx_tables->crypto_miss_rule = rule;
+
+ /* Tx check table */
+ flow_table = macsec_fs_auto_group_table_create(ns, 0, TX_CHECK_TABLE_LEVEL,
+ TX_CHECK_TABLE_NUM_FTE);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ mlx5_core_err(mdev, "Fail to create MACsec TX check table, err(%d)\n", err);
+ goto err;
+ }
+ tx_tables->ft_check = flow_table;
+
+ /* Tx check table Default miss group/rule */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
+ flow_group = mlx5_create_flow_group(tx_tables->ft_check, flow_group_in);
+ if (IS_ERR(flow_group)) {
+ err = PTR_ERR(flow_group);
+ mlx5_core_err(mdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+ tx_tables->ft_check_group = flow_group;
+
+ /* Tx check table default drop rule */
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to added MACsec tx check drop rule, err(%d)\n", err);
+ goto err;
+ }
+ tx_tables->check_miss_rule = rule;
+
+ /* Tx check table rule */
+ memset(spec, 0, sizeof(struct mlx5_flow_spec));
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter);
+ rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add MACsec check rule, err=%d\n", err);
+ goto err;
+ }
+ tx_fs->check_rule = rule;
+
+ err = macsec_fs_tx_roce_create(macsec_fs);
+ if (err)
+ goto err;
+
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return 0;
+
+err:
+ macsec_fs_tx_destroy(macsec_fs);
+out_flow_group:
+ kvfree(flow_group_in);
+out_spec:
+ kvfree(spec);
+ return err;
+}
+
+static int macsec_fs_tx_ft_get(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_macsec_tables *tx_tables;
+ int err = 0;
+
+ tx_tables = &tx_fs->tables;
+ if (tx_tables->refcnt)
+ goto out;
+
+ err = macsec_fs_tx_create(macsec_fs);
+ if (err)
+ return err;
+
+out:
+ tx_tables->refcnt++;
+ return err;
+}
+
+static void macsec_fs_tx_ft_put(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
+
+ if (--tx_tables->refcnt)
+ return;
+
+ macsec_fs_tx_destroy(macsec_fs);
+}
+
+static int macsec_fs_tx_setup_fte(struct mlx5_macsec_fs *macsec_fs,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ u32 macsec_obj_id,
+ u32 *fs_id)
+{
+ struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ int err = 0;
+ u32 id;
+
+ err = ida_alloc_range(&tx_fs->tx_halloc, 1,
+ MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES,
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ id = err;
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+
+ /* Metadata match */
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_MACSEC_MASK);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
+ macsec_fs_set_tx_fs_id(id));
+
+ *fs_id = id;
+ flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
+ flow_act->crypto.obj_id = macsec_obj_id;
+
+ mlx5_core_dbg(macsec_fs->mdev, "Tx fte: macsec obj_id %u, fs_id %u\n", macsec_obj_id, id);
+ return 0;
+}
+
+static void macsec_fs_tx_create_sectag_header(const struct macsec_context *ctx,
+ char *reformatbf,
+ size_t *reformat_size)
+{
+ const struct macsec_secy *secy = ctx->secy;
+ bool sci_present = macsec_send_sci(secy);
+ struct mlx5_sectag_header sectag = {};
+ const struct macsec_tx_sc *tx_sc;
+
+ tx_sc = &secy->tx_sc;
+ sectag.ethertype = htons(ETH_P_MACSEC);
+
+ if (sci_present) {
+ sectag.tci_an |= MACSEC_TCI_SC;
+ memcpy(&sectag.sci, &secy->sci,
+ sizeof(sectag.sci));
+ } else {
+ if (tx_sc->end_station)
+ sectag.tci_an |= MACSEC_TCI_ES;
+ if (tx_sc->scb)
+ sectag.tci_an |= MACSEC_TCI_SCB;
+ }
+
+ /* With GCM, C/E clear for !encrypt, both set for encrypt */
+ if (tx_sc->encrypt)
+ sectag.tci_an |= MACSEC_TCI_CONFID;
+ else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
+ sectag.tci_an |= MACSEC_TCI_C;
+
+ sectag.tci_an |= tx_sc->encoding_sa;
+
+ *reformat_size = MLX5_MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
+
+ memcpy(reformatbf, &sectag, *reformat_size);
+}
+
+static bool macsec_fs_is_macsec_device_empty(struct mlx5_macsec_device *macsec_device)
+{
+ if (xa_empty(&macsec_device->tx_id_xa) &&
+ xa_empty(&macsec_device->rx_id_xa))
+ return true;
+
+ return false;
+}
+
+static void macsec_fs_id_del(struct list_head *macsec_devices_list, u32 fs_id,
+ void *macdev, struct rhashtable *hash_table, bool is_tx)
+{
+ const struct rhashtable_params *rhash = (is_tx) ? &rhash_sci : &rhash_fs_id;
+ struct mlx5_macsec_device *iter, *macsec_device = NULL;
+ struct mlx5_fs_id *fs_id_found;
+ struct xarray *fs_id_xa;
+
+ list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) {
+ if (iter->macdev == macdev) {
+ macsec_device = iter;
+ break;
+ }
+ }
+ WARN_ON(!macsec_device);
+
+ fs_id_xa = (is_tx) ? &macsec_device->tx_id_xa :
+ &macsec_device->rx_id_xa;
+ xa_lock(fs_id_xa);
+ fs_id_found = xa_load(fs_id_xa, fs_id);
+ WARN_ON(!fs_id_found);
+
+ if (!refcount_dec_and_test(&fs_id_found->refcnt)) {
+ xa_unlock(fs_id_xa);
+ return;
+ }
+
+ if (fs_id_found->id) {
+ /* Make sure ongoing datapath readers sees a valid SA */
+ rhashtable_remove_fast(hash_table, &fs_id_found->hash, *rhash);
+ fs_id_found->id = 0;
+ }
+ xa_unlock(fs_id_xa);
+
+ xa_erase(fs_id_xa, fs_id);
+
+ kfree(fs_id_found);
+
+ if (macsec_fs_is_macsec_device_empty(macsec_device)) {
+ list_del(&macsec_device->macsec_devices_list_entry);
+ kfree(macsec_device);
+ }
+}
+
+static int macsec_fs_id_add(struct list_head *macsec_devices_list, u32 fs_id,
+ void *macdev, struct rhashtable *hash_table, sci_t sci,
+ bool is_tx)
+{
+ const struct rhashtable_params *rhash = (is_tx) ? &rhash_sci : &rhash_fs_id;
+ struct mlx5_macsec_device *iter, *macsec_device = NULL;
+ struct mlx5_fs_id *fs_id_iter;
+ struct xarray *fs_id_xa;
+ int err;
+
+ if (!is_tx) {
+ rcu_read_lock();
+ fs_id_iter = rhashtable_lookup(hash_table, &fs_id, rhash_fs_id);
+ if (fs_id_iter) {
+ refcount_inc(&fs_id_iter->refcnt);
+ rcu_read_unlock();
+ return 0;
+ }
+ rcu_read_unlock();
+ }
+
+ fs_id_iter = kzalloc(sizeof(*fs_id_iter), GFP_KERNEL);
+ if (!fs_id_iter)
+ return -ENOMEM;
+
+ list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) {
+ if (iter->macdev == macdev) {
+ macsec_device = iter;
+ break;
+ }
+ }
+
+ if (!macsec_device) { /* first time adding a SA to that device */
+ macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
+ if (!macsec_device) {
+ err = -ENOMEM;
+ goto err_alloc_dev;
+ }
+ macsec_device->macdev = macdev;
+ xa_init(&macsec_device->tx_id_xa);
+ xa_init(&macsec_device->rx_id_xa);
+ list_add(&macsec_device->macsec_devices_list_entry, macsec_devices_list);
+ }
+
+ fs_id_xa = (is_tx) ? &macsec_device->tx_id_xa :
+ &macsec_device->rx_id_xa;
+ fs_id_iter->id = fs_id;
+ refcount_set(&fs_id_iter->refcnt, 1);
+ fs_id_iter->sci = sci;
+ err = xa_err(xa_store(fs_id_xa, fs_id, fs_id_iter, GFP_KERNEL));
+ if (err)
+ goto err_store_id;
+
+ err = rhashtable_insert_fast(hash_table, &fs_id_iter->hash, *rhash);
+ if (err)
+ goto err_hash_insert;
+
+ return 0;
+
+err_hash_insert:
+ xa_erase(fs_id_xa, fs_id);
+err_store_id:
+ if (macsec_fs_is_macsec_device_empty(macsec_device)) {
+ list_del(&macsec_device->macsec_devices_list_entry);
+ kfree(macsec_device);
+ }
+err_alloc_dev:
+ kfree(fs_id_iter);
+ return err;
+}
+
+static void macsec_fs_tx_del_rule(struct mlx5_macsec_fs *macsec_fs,
+ struct mlx5_macsec_tx_rule *tx_rule,
+ void *macdev)
+{
+ macsec_fs_id_del(&macsec_fs->macsec_devices_list, tx_rule->fs_id, macdev,
+ &macsec_fs->sci_hash, true);
+
+ if (tx_rule->rule) {
+ mlx5_del_flow_rules(tx_rule->rule);
+ tx_rule->rule = NULL;
+ }
+
+ if (tx_rule->pkt_reformat) {
+ mlx5_packet_reformat_dealloc(macsec_fs->mdev, tx_rule->pkt_reformat);
+ tx_rule->pkt_reformat = NULL;
+ }
+
+ if (tx_rule->fs_id) {
+ ida_free(&macsec_fs->tx_fs->tx_halloc, tx_rule->fs_id);
+ tx_rule->fs_id = 0;
+ }
+
+ kfree(tx_rule);
+
+ macsec_fs_tx_ft_put(macsec_fs);
+}
+
+#define MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES 1
+
+static union mlx5_macsec_rule *
+macsec_fs_tx_add_rule(struct mlx5_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs, u32 *fs_id)
+{
+ char reformatbf[MLX5_MACSEC_TAG_LEN + MACSEC_SCI_LEN];
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ union mlx5_macsec_rule *macsec_rule = NULL;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_macsec_tables *tx_tables;
+ struct mlx5_macsec_tx_rule *tx_rule;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ size_t reformat_size;
+ int err = 0;
+
+ tx_tables = &tx_fs->tables;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return NULL;
+
+ err = macsec_fs_tx_ft_get(macsec_fs);
+ if (err)
+ goto out_spec;
+
+ macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
+ if (!macsec_rule) {
+ macsec_fs_tx_ft_put(macsec_fs);
+ goto out_spec;
+ }
+
+ tx_rule = &macsec_rule->tx_rule;
+
+ /* Tx crypto table crypto rule */
+ macsec_fs_tx_create_sectag_header(macsec_ctx, reformatbf, &reformat_size);
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_ADD_MACSEC;
+ reformat_params.size = reformat_size;
+ reformat_params.data = reformatbf;
+
+ if (is_vlan_dev(macsec_ctx->netdev))
+ reformat_params.param_0 = MLX5_REFORMAT_PARAM_ADD_MACSEC_OFFSET_4_BYTES;
+
+ flow_act.pkt_reformat = mlx5_packet_reformat_alloc(mdev,
+ &reformat_params,
+ MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
+ if (IS_ERR(flow_act.pkt_reformat)) {
+ err = PTR_ERR(flow_act.pkt_reformat);
+ mlx5_core_err(mdev, "Failed to allocate MACsec Tx reformat context err=%d\n", err);
+ goto err;
+ }
+ tx_rule->pkt_reformat = flow_act.pkt_reformat;
+
+ err = macsec_fs_tx_setup_fte(macsec_fs, spec, &flow_act, attrs->macsec_obj_id, fs_id);
+ if (err) {
+ mlx5_core_err(mdev,
+ "Failed to add packet reformat for MACsec TX crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+
+ tx_rule->fs_id = *fs_id;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = tx_tables->ft_check;
+ rule = mlx5_add_flow_rules(tx_tables->ft_crypto.t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add MACsec TX crypto rule, err=%d\n", err);
+ goto err;
+ }
+ tx_rule->rule = rule;
+
+ err = macsec_fs_id_add(&macsec_fs->macsec_devices_list, *fs_id, macsec_ctx->secy->netdev,
+ &macsec_fs->sci_hash, attrs->sci, true);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to save fs_id, err=%d\n", err);
+ goto err;
+ }
+
+ goto out_spec;
+
+err:
+ macsec_fs_tx_del_rule(macsec_fs, tx_rule, macsec_ctx->secy->netdev);
+ macsec_rule = NULL;
+out_spec:
+ kvfree(spec);
+
+ return macsec_rule;
+}
+
+static void macsec_fs_tx_cleanup(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_macsec_tables *tx_tables;
+
+ if (!tx_fs)
+ return;
+
+ tx_tables = &tx_fs->tables;
+ if (tx_tables->refcnt) {
+ mlx5_core_err(mdev,
+ "Can't destroy MACsec offload tx_fs, refcnt(%u) isn't 0\n",
+ tx_tables->refcnt);
+ return;
+ }
+
+ ida_destroy(&tx_fs->tx_halloc);
+
+ if (tx_tables->check_miss_rule_counter) {
+ mlx5_fc_destroy(mdev, tx_tables->check_miss_rule_counter);
+ tx_tables->check_miss_rule_counter = NULL;
+ }
+
+ if (tx_tables->check_rule_counter) {
+ mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
+ tx_tables->check_rule_counter = NULL;
+ }
+
+ kfree(tx_fs);
+ macsec_fs->tx_fs = NULL;
+}
+
+static int macsec_fs_tx_init(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_macsec_tables *tx_tables;
+ struct mlx5_macsec_tx *tx_fs;
+ struct mlx5_fc *flow_counter;
+ int err;
+
+ tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL);
+ if (!tx_fs)
+ return -ENOMEM;
+
+ tx_tables = &tx_fs->tables;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to create MACsec Tx encrypt flow counter, err(%d)\n",
+ err);
+ goto err_encrypt_counter;
+ }
+ tx_tables->check_rule_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to create MACsec Tx drop flow counter, err(%d)\n",
+ err);
+ goto err_drop_counter;
+ }
+ tx_tables->check_miss_rule_counter = flow_counter;
+
+ ida_init(&tx_fs->tx_halloc);
+ INIT_LIST_HEAD(&macsec_fs->macsec_devices_list);
+
+ macsec_fs->tx_fs = tx_fs;
+
+ return 0;
+
+err_drop_counter:
+ mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
+ tx_tables->check_rule_counter = NULL;
+
+err_encrypt_counter:
+ kfree(tx_fs);
+ macsec_fs->tx_fs = NULL;
+
+ return err;
+}
+
+static void macsec_fs_rx_roce_miss_destroy(struct mlx5_macsec_miss *miss)
+{
+ mlx5_del_flow_rules(miss->rule);
+ mlx5_destroy_flow_group(miss->g);
+}
+
+static void macsec_fs_rdma_rx_destroy(struct mlx5_macsec_rx_roce *roce, struct mlx5_core_dev *mdev)
+{
+ if (!mlx5_is_macsec_roce_supported(mdev))
+ return;
+
+ mlx5_del_flow_rules(roce->nic_miss.rule);
+ mlx5_del_flow_rules(roce->rule);
+ mlx5_modify_header_dealloc(mdev, roce->copy_modify_hdr);
+ mlx5_destroy_flow_group(roce->nic_miss.g);
+ mlx5_destroy_flow_group(roce->g);
+ mlx5_destroy_flow_table(roce->ft);
+
+ macsec_fs_rx_roce_miss_destroy(&roce->miss);
+ mlx5_destroy_flow_table(roce->ft_macsec_op_check);
+ mlx5_destroy_flow_table(roce->ft_ip_check);
+}
+
+static void macsec_fs_rx_destroy(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_macsec_tables *rx_tables;
+ int i;
+
+ /* Rx check table */
+ for (i = 1; i >= 0; --i) {
+ if (rx_fs->check_rule[i]) {
+ mlx5_del_flow_rules(rx_fs->check_rule[i]);
+ rx_fs->check_rule[i] = NULL;
+ }
+
+ if (rx_fs->check_rule_pkt_reformat[i]) {
+ mlx5_packet_reformat_dealloc(macsec_fs->mdev,
+ rx_fs->check_rule_pkt_reformat[i]);
+ rx_fs->check_rule_pkt_reformat[i] = NULL;
+ }
+ }
+
+ rx_tables = &rx_fs->tables;
+
+ if (rx_tables->check_miss_rule) {
+ mlx5_del_flow_rules(rx_tables->check_miss_rule);
+ rx_tables->check_miss_rule = NULL;
+ }
+
+ if (rx_tables->ft_check_group) {
+ mlx5_destroy_flow_group(rx_tables->ft_check_group);
+ rx_tables->ft_check_group = NULL;
+ }
+
+ if (rx_tables->ft_check) {
+ mlx5_destroy_flow_table(rx_tables->ft_check);
+ rx_tables->ft_check = NULL;
+ }
+
+ /* Rx crypto table */
+ if (rx_tables->crypto_miss_rule) {
+ mlx5_del_flow_rules(rx_tables->crypto_miss_rule);
+ rx_tables->crypto_miss_rule = NULL;
+ }
+
+ macsec_fs_destroy_flow_table(&rx_tables->ft_crypto);
+
+ macsec_fs_rdma_rx_destroy(&macsec_fs->rx_fs->roce, macsec_fs->mdev);
+}
+
+static int macsec_fs_rx_create_crypto_table_groups(struct mlx5_macsec_flow_table *ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(RX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ kfree(ft->g);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ /* Flow group for SA rule with SCI */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS_5);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK <<
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+ MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_2);
+ MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_3);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow group for SA rule without SCI */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS_5);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_15_0);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for l2 traps */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ kvfree(in);
+
+ return err;
+}
+
+static int macsec_fs_rx_create_check_decap_rule(struct mlx5_macsec_fs *macsec_fs,
+ struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_spec *spec,
+ int reformat_param_size)
+{
+ int rule_index = (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) ? 0 : 1;
+ u8 mlx5_reformat_buf[MLX5_SECTAG_HEADER_SIZE_WITH_SCI];
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_flow_destination roce_dest[2];
+ struct mlx5_macsec_tables *rx_tables;
+ struct mlx5_flow_handle *rule;
+ int err = 0, dstn = 0;
+
+ rx_tables = &rx_fs->tables;
+
+ /* Rx check table decap 16B rule */
+ memset(dest, 0, sizeof(*dest));
+ memset(flow_act, 0, sizeof(*flow_act));
+ memset(spec, 0, sizeof(*spec));
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_DEL_MACSEC;
+ reformat_params.size = reformat_param_size;
+ reformat_params.data = mlx5_reformat_buf;
+ flow_act->pkt_reformat = mlx5_packet_reformat_alloc(mdev,
+ &reformat_params,
+ MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
+ if (IS_ERR(flow_act->pkt_reformat)) {
+ err = PTR_ERR(flow_act->pkt_reformat);
+ mlx5_core_err(mdev, "Failed to allocate MACsec Rx reformat context err=%d\n", err);
+ return err;
+ }
+ rx_fs->check_rule_pkt_reformat[rule_index] = flow_act->pkt_reformat;
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ /* MACsec syndrome match */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.macsec_syndrome);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.macsec_syndrome, 0);
+ /* ASO return reg syndrome match */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
+ /* Sectag TCI SC present bit*/
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ if (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI)
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT <<
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ flow_act->flags = FLOW_ACT_NO_APPEND;
+
+ if (rx_fs->roce.ft) {
+ flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ roce_dest[dstn].ft = rx_fs->roce.ft;
+ dstn++;
+ } else {
+ flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ }
+
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ roce_dest[dstn].counter_id = mlx5_fc_id(rx_tables->check_rule_counter);
+ rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, roce_dest, dstn + 1);
+
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add MACsec Rx check rule, err=%d\n", err);
+ return err;
+ }
+
+ rx_fs->check_rule[rule_index] = rule;
+
+ return 0;
+}
+
+static int macsec_fs_rx_roce_miss_create(struct mlx5_core_dev *mdev,
+ struct mlx5_macsec_rx_roce *roce)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_group *flow_group;
+ struct mlx5_flow_handle *rule;
+ u32 *flow_group_in;
+ int err;
+
+ flow_group_in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ /* IP check ft has no miss rule since we use default miss action which is go to next PRIO */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ roce->ft_macsec_op_check->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ roce->ft_macsec_op_check->max_fte - 1);
+ flow_group = mlx5_create_flow_group(roce->ft_macsec_op_check, flow_group_in);
+ if (IS_ERR(flow_group)) {
+ err = PTR_ERR(flow_group);
+ mlx5_core_err(mdev,
+ "Failed to create miss flow group for MACsec RoCE operation check table err(%d)\n",
+ err);
+ goto err_macsec_op_miss_group;
+ }
+ roce->miss.g = flow_group;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ rule = mlx5_add_flow_rules(roce->ft_macsec_op_check, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add miss rule to MACsec RoCE operation check table err(%d)\n",
+ err);
+ goto err_macsec_op_rule;
+ }
+ roce->miss.rule = rule;
+
+ kvfree(flow_group_in);
+ return 0;
+
+err_macsec_op_rule:
+ mlx5_destroy_flow_group(roce->miss.g);
+err_macsec_op_miss_group:
+ kvfree(flow_group_in);
+ return err;
+}
+
+#define MLX5_RX_ROCE_GROUP_SIZE BIT(0)
+
+static int macsec_fs_rx_roce_jump_to_rdma_groups_create(struct mlx5_core_dev *mdev,
+ struct mlx5_macsec_rx_roce *roce)
+{
+ struct mlx5_flow_group *g;
+ void *outer_headers_c;
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
+
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_RX_ROCE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(roce->ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Failed to create main flow group for MACsec RoCE NIC UDP table err(%d)\n",
+ err);
+ goto err_udp_group;
+ }
+ roce->g = g;
+
+ memset(in, 0, MLX5_ST_SZ_BYTES(create_flow_group_in));
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_RX_ROCE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(roce->ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Failed to create miss flow group for MACsec RoCE NIC UDP table err(%d)\n",
+ err);
+ goto err_udp_miss_group;
+ }
+ roce->nic_miss.g = g;
+
+ kvfree(in);
+ return 0;
+
+err_udp_miss_group:
+ mlx5_destroy_flow_group(roce->g);
+err_udp_group:
+ kvfree(in);
+ return err;
+}
+
+static int macsec_fs_rx_roce_jump_to_rdma_rules_create(struct mlx5_macsec_fs *macsec_fs,
+ struct mlx5_macsec_rx_roce *roce)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_flow_destination dst = {};
+ struct mlx5_modify_hdr *modify_hdr;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, ROCE_V2_UDP_DPORT);
+
+ MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
+ MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ MLX5_SET(copy_action_in, action, src_offset, 0);
+ MLX5_SET(copy_action_in, action, length, 32);
+ MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_5);
+ MLX5_SET(copy_action_in, action, dst_offset, 0);
+
+ modify_hdr = mlx5_modify_header_alloc(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
+ 1, action);
+
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ mlx5_core_err(mdev,
+ "Failed to alloc macsec copy modify_header_id err(%d)\n", err);
+ goto err_alloc_hdr;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_act.modify_hdr = modify_hdr;
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ dst.ft = roce->ft_ip_check;
+ rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add rule to MACsec RoCE NIC UDP table err(%d)\n",
+ err);
+ goto err_add_rule;
+ }
+ roce->rule = rule;
+ roce->copy_modify_hdr = modify_hdr;
+
+ memset(&flow_act, 0, sizeof(flow_act));
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add miss rule to MACsec RoCE NIC UDP table err(%d)\n",
+ err);
+ goto err_add_rule2;
+ }
+ roce->nic_miss.rule = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_add_rule2:
+ mlx5_del_flow_rules(roce->rule);
+err_add_rule:
+ mlx5_modify_header_dealloc(macsec_fs->mdev, modify_hdr);
+err_alloc_hdr:
+ kvfree(spec);
+ return err;
+}
+
+static int macsec_fs_rx_roce_jump_to_rdma_create(struct mlx5_macsec_fs *macsec_fs,
+ struct mlx5_macsec_rx_roce *roce)
+{
+ int err;
+
+ err = macsec_fs_rx_roce_jump_to_rdma_groups_create(macsec_fs->mdev, roce);
+ if (err)
+ return err;
+
+ err = macsec_fs_rx_roce_jump_to_rdma_rules_create(macsec_fs, roce);
+ if (err)
+ goto err;
+
+ return 0;
+err:
+ mlx5_destroy_flow_group(roce->nic_miss.g);
+ mlx5_destroy_flow_group(roce->g);
+ return err;
+}
+
+static int macsec_fs_rx_roce_create(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+ int err = 0;
+
+ if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev)) {
+ mlx5_core_dbg(mdev, "Failed to init RoCE MACsec, capabilities not supported\n");
+ return 0;
+ }
+
+ ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC);
+ if (!ns)
+ return -ENOMEM;
+
+ ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_RX_ROCE_IP_TABLE_LEVEL,
+ CRYPTO_NUM_MAXSEC_FTE);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev,
+ "Failed to create MACsec IP check RoCE table err(%d)\n", err);
+ return err;
+ }
+ rx_fs->roce.ft_ip_check = ft;
+
+ ft = macsec_fs_auto_group_table_create(ns, 0, RDMA_RX_ROCE_MACSEC_OP_TABLE_LEVEL,
+ CRYPTO_NUM_MAXSEC_FTE);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev,
+ "Failed to create MACsec operation check RoCE table err(%d)\n",
+ err);
+ goto err_macsec_op;
+ }
+ rx_fs->roce.ft_macsec_op_check = ft;
+
+ err = macsec_fs_rx_roce_miss_create(mdev, &rx_fs->roce);
+ if (err)
+ goto err_miss_create;
+
+ ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
+ if (!ns) {
+ err = -EOPNOTSUPP;
+ goto err_ns;
+ }
+
+ ft_attr.level = RX_ROCE_TABLE_LEVEL;
+ ft_attr.max_fte = RX_ROCE_TABLE_NUM_FTE;
+ ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev,
+ "Failed to create MACsec jump to RX RoCE, NIC table err(%d)\n", err);
+ goto err_ns;
+ }
+ rx_fs->roce.ft = ft;
+
+ err = macsec_fs_rx_roce_jump_to_rdma_create(macsec_fs, &rx_fs->roce);
+ if (err)
+ goto err_udp_ft;
+
+ return 0;
+
+err_udp_ft:
+ mlx5_destroy_flow_table(rx_fs->roce.ft);
+err_ns:
+ macsec_fs_rx_roce_miss_destroy(&rx_fs->roce.miss);
+err_miss_create:
+ mlx5_destroy_flow_table(rx_fs->roce.ft_macsec_op_check);
+err_macsec_op:
+ mlx5_destroy_flow_table(rx_fs->roce.ft_ip_check);
+ return err;
+}
+
+static int macsec_fs_rx_create(struct mlx5_macsec_fs *macsec_fs)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_macsec_flow_table *ft_crypto;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_macsec_tables *rx_tables;
+ struct mlx5_flow_table *flow_table;
+ struct mlx5_flow_group *flow_group;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ u32 *flow_group_in;
+ int err;
+
+ ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
+ if (!ns)
+ return -ENOMEM;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in) {
+ err = -ENOMEM;
+ goto free_spec;
+ }
+
+ rx_tables = &rx_fs->tables;
+ ft_crypto = &rx_tables->ft_crypto;
+
+ err = macsec_fs_rx_roce_create(macsec_fs);
+ if (err)
+ goto out_flow_group;
+
+ /* Rx crypto table */
+ ft_attr.level = RX_CRYPTO_TABLE_LEVEL;
+ ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
+
+ flow_table = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ mlx5_core_err(mdev, "Failed to create MACsec Rx crypto table err(%d)\n", err);
+ goto err;
+ }
+ ft_crypto->t = flow_table;
+
+ /* Rx crypto table groups */
+ err = macsec_fs_rx_create_crypto_table_groups(ft_crypto);
+ if (err) {
+ mlx5_core_err(mdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add MACsec Rx crypto table default miss rule %d\n",
+ err);
+ goto err;
+ }
+ rx_tables->crypto_miss_rule = rule;
+
+ /* Rx check table */
+ flow_table = macsec_fs_auto_group_table_create(ns,
+ MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT,
+ RX_CHECK_TABLE_LEVEL,
+ RX_CHECK_TABLE_NUM_FTE);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ mlx5_core_err(mdev, "Fail to create MACsec RX check table, err(%d)\n", err);
+ goto err;
+ }
+ rx_tables->ft_check = flow_table;
+
+ /* Rx check table Default miss group/rule */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
+ flow_group = mlx5_create_flow_group(rx_tables->ft_check, flow_group_in);
+ if (IS_ERR(flow_group)) {
+ err = PTR_ERR(flow_group);
+ mlx5_core_err(mdev,
+ "Failed to create default flow group for MACsec Rx check table err(%d)\n",
+ err);
+ goto err;
+ }
+ rx_tables->ft_check_group = flow_group;
+
+ /* Rx check table default drop rule */
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to added MACsec Rx check drop rule, err(%d)\n", err);
+ goto err;
+ }
+ rx_tables->check_miss_rule = rule;
+
+ /* Rx check table decap rules */
+ err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
+ MLX5_SECTAG_HEADER_SIZE_WITH_SCI);
+ if (err)
+ goto err;
+
+ err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
+ MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI);
+ if (err)
+ goto err;
+
+ goto out_flow_group;
+
+err:
+ macsec_fs_rx_destroy(macsec_fs);
+out_flow_group:
+ kvfree(flow_group_in);
+free_spec:
+ kvfree(spec);
+ return err;
+}
+
+static int macsec_fs_rx_ft_get(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+ int err = 0;
+
+ if (rx_tables->refcnt)
+ goto out;
+
+ err = macsec_fs_rx_create(macsec_fs);
+ if (err)
+ return err;
+
+out:
+ rx_tables->refcnt++;
+ return err;
+}
+
+static void macsec_fs_rx_ft_put(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+
+ if (--rx_tables->refcnt)
+ return;
+
+ macsec_fs_rx_destroy(macsec_fs);
+}
+
+static void macsec_fs_rx_del_rule(struct mlx5_macsec_fs *macsec_fs,
+ struct mlx5_macsec_rx_rule *rx_rule,
+ void *macdev, u32 fs_id)
+{
+ int i;
+
+ macsec_fs_id_del(&macsec_fs->macsec_devices_list, fs_id, macdev,
+ &macsec_fs->fs_id_hash, false);
+
+ for (i = 0; i < RX_NUM_OF_RULES_PER_SA; ++i) {
+ if (rx_rule->rule[i]) {
+ mlx5_del_flow_rules(rx_rule->rule[i]);
+ rx_rule->rule[i] = NULL;
+ }
+ }
+
+ if (rx_rule->meta_modhdr) {
+ mlx5_modify_header_dealloc(macsec_fs->mdev, rx_rule->meta_modhdr);
+ rx_rule->meta_modhdr = NULL;
+ }
+
+ kfree(rx_rule);
+
+ macsec_fs_rx_ft_put(macsec_fs);
+}
+
+static void macsec_fs_rx_setup_fte(struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_macsec_rule_attrs *attrs,
+ bool sci_present)
+{
+ u8 tci_an = (sci_present << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) | attrs->assoc_num;
+ struct mlx5_flow_act_crypto_params *crypto_params = &flow_act->crypto;
+ __be32 *sci_p = (__be32 *)(&attrs->sci);
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ /* MACsec ethertype */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_MACSEC);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
+
+ /* Sectag AN + TCI SC present bit*/
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
+ tci_an << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ if (sci_present) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_5.macsec_tag_2);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_2,
+ be32_to_cpu(sci_p[0]));
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_5.macsec_tag_3);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_3,
+ be32_to_cpu(sci_p[1]));
+ } else {
+ /* When SCI isn't present in the Sectag, need to match the source */
+ /* MAC address only if the SCI contains the default MACsec PORT */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.smac_47_16),
+ sci_p, ETH_ALEN);
+ }
+
+ crypto_params->type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
+ crypto_params->obj_id = attrs->macsec_obj_id;
+}
+
+static union mlx5_macsec_rule *
+macsec_fs_rx_add_rule(struct mlx5_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 fs_id)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ union mlx5_macsec_rule *macsec_rule = NULL;
+ struct mlx5_modify_hdr *modify_hdr = NULL;
+ struct mlx5_macsec_flow_table *ft_crypto;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_macsec_tables *rx_tables;
+ struct mlx5_macsec_rx_rule *rx_rule;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return NULL;
+
+ err = macsec_fs_rx_ft_get(macsec_fs);
+ if (err)
+ goto out_spec;
+
+ macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
+ if (!macsec_rule) {
+ macsec_fs_rx_ft_put(macsec_fs);
+ goto out_spec;
+ }
+
+ rx_rule = &macsec_rule->rx_rule;
+ rx_tables = &rx_fs->tables;
+ ft_crypto = &rx_tables->ft_crypto;
+
+ /* Set bit[31 - 30] macsec marker - 0x01 */
+ /* Set bit[15-0] fs id */
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ MLX5_SET(set_action_in, action, data, macsec_fs_set_rx_fs_id(fs_id));
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
+ 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ mlx5_core_err(mdev, "Fail to alloc MACsec set modify_header_id err=%d\n", err);
+ modify_hdr = NULL;
+ goto err;
+ }
+ rx_rule->meta_modhdr = modify_hdr;
+
+ /* Rx crypto table with SCI rule */
+ macsec_fs_rx_setup_fte(spec, &flow_act, attrs, true);
+
+ flow_act.modify_hdr = modify_hdr;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = rx_tables->ft_check;
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add SA with SCI rule to Rx crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+ rx_rule->rule[0] = rule;
+
+ /* Rx crypto table without SCI rule */
+ if ((cpu_to_be64((__force u64)attrs->sci) & 0xFFFF) == ntohs(MACSEC_PORT_ES)) {
+ memset(spec, 0, sizeof(struct mlx5_flow_spec));
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ macsec_fs_rx_setup_fte(spec, &flow_act, attrs, false);
+
+ flow_act.modify_hdr = modify_hdr;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = rx_tables->ft_check;
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add SA without SCI rule to Rx crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+ rx_rule->rule[1] = rule;
+ }
+
+ err = macsec_fs_id_add(&macsec_fs->macsec_devices_list, fs_id, macsec_ctx->secy->netdev,
+ &macsec_fs->fs_id_hash, attrs->sci, false);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to save fs_id, err=%d\n", err);
+ goto err;
+ }
+
+ kvfree(spec);
+ return macsec_rule;
+
+err:
+ macsec_fs_rx_del_rule(macsec_fs, rx_rule, macsec_ctx->secy->netdev, fs_id);
+ macsec_rule = NULL;
+out_spec:
+ kvfree(spec);
+ return macsec_rule;
+}
+
+static int macsec_fs_rx_init(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_macsec_tables *rx_tables;
+ struct mlx5_macsec_rx *rx_fs;
+ struct mlx5_fc *flow_counter;
+ int err;
+
+ rx_fs = kzalloc(sizeof(*rx_fs), GFP_KERNEL);
+ if (!rx_fs)
+ return -ENOMEM;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to create MACsec Rx encrypt flow counter, err(%d)\n",
+ err);
+ goto err_encrypt_counter;
+ }
+
+ rx_tables = &rx_fs->tables;
+ rx_tables->check_rule_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to create MACsec Rx drop flow counter, err(%d)\n",
+ err);
+ goto err_drop_counter;
+ }
+ rx_tables->check_miss_rule_counter = flow_counter;
+
+ macsec_fs->rx_fs = rx_fs;
+
+ return 0;
+
+err_drop_counter:
+ mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
+ rx_tables->check_rule_counter = NULL;
+
+err_encrypt_counter:
+ kfree(rx_fs);
+ macsec_fs->rx_fs = NULL;
+
+ return err;
+}
+
+static void macsec_fs_rx_cleanup(struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_macsec_tables *rx_tables;
+
+ if (!rx_fs)
+ return;
+
+ rx_tables = &rx_fs->tables;
+
+ if (rx_tables->refcnt) {
+ mlx5_core_err(mdev,
+ "Can't destroy MACsec offload rx_fs, refcnt(%u) isn't 0\n",
+ rx_tables->refcnt);
+ return;
+ }
+
+ if (rx_tables->check_miss_rule_counter) {
+ mlx5_fc_destroy(mdev, rx_tables->check_miss_rule_counter);
+ rx_tables->check_miss_rule_counter = NULL;
+ }
+
+ if (rx_tables->check_rule_counter) {
+ mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
+ rx_tables->check_rule_counter = NULL;
+ }
+
+ kfree(rx_fs);
+ macsec_fs->rx_fs = NULL;
+}
+
+static void set_ipaddr_spec_v4(struct sockaddr_in *in, struct mlx5_flow_spec *spec, bool is_dst_ip)
+{
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.ip_version, MLX5_FS_IPV4_VERSION);
+
+ if (is_dst_ip) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &in->sin_addr.s_addr, 4);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &in->sin_addr.s_addr, 4);
+ }
+}
+
+static void set_ipaddr_spec_v6(struct sockaddr_in6 *in6, struct mlx5_flow_spec *spec,
+ bool is_dst_ip)
+{
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.ip_version, MLX5_FS_IPV6_VERSION);
+
+ if (is_dst_ip) {
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ 0xff, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &in6->sin6_addr, 16);
+ } else {
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ 0xff, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &in6->sin6_addr, 16);
+ }
+}
+
+static void set_ipaddr_spec(const struct sockaddr *addr,
+ struct mlx5_flow_spec *spec, bool is_dst_ip)
+{
+ struct sockaddr_in6 *in6;
+
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ip_version);
+
+ if (addr->sa_family == AF_INET) {
+ struct sockaddr_in *in = (struct sockaddr_in *)addr;
+
+ set_ipaddr_spec_v4(in, spec, is_dst_ip);
+ return;
+ }
+
+ in6 = (struct sockaddr_in6 *)addr;
+ set_ipaddr_spec_v6(in6, spec, is_dst_ip);
+}
+
+static void macsec_fs_del_roce_rule_rx(struct mlx5_roce_macsec_rx_rule *rx_rule)
+{
+ mlx5_del_flow_rules(rx_rule->op);
+ mlx5_del_flow_rules(rx_rule->ip);
+ list_del(&rx_rule->entry);
+ kfree(rx_rule);
+}
+
+static void macsec_fs_del_roce_rules_rx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id,
+ struct list_head *rx_rules_list)
+{
+ struct mlx5_roce_macsec_rx_rule *rx_rule, *next;
+
+ if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev))
+ return;
+
+ list_for_each_entry_safe(rx_rule, next, rx_rules_list, entry) {
+ if (rx_rule->fs_id == fs_id)
+ macsec_fs_del_roce_rule_rx(rx_rule);
+ }
+}
+
+static void macsec_fs_del_roce_rule_tx(struct mlx5_core_dev *mdev,
+ struct mlx5_roce_macsec_tx_rule *tx_rule)
+{
+ mlx5_del_flow_rules(tx_rule->rule);
+ mlx5_modify_header_dealloc(mdev, tx_rule->meta_modhdr);
+ list_del(&tx_rule->entry);
+ kfree(tx_rule);
+}
+
+static void macsec_fs_del_roce_rules_tx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id,
+ struct list_head *tx_rules_list)
+{
+ struct mlx5_roce_macsec_tx_rule *tx_rule, *next;
+
+ if (!mlx5_is_macsec_roce_supported(macsec_fs->mdev))
+ return;
+
+ list_for_each_entry_safe(tx_rule, next, tx_rules_list, entry) {
+ if (tx_rule->fs_id == fs_id)
+ macsec_fs_del_roce_rule_tx(macsec_fs->mdev, tx_rule);
+ }
+}
+
+void mlx5_macsec_fs_get_stats_fill(struct mlx5_macsec_fs *macsec_fs, void *macsec_stats)
+{
+ struct mlx5_macsec_stats *stats = (struct mlx5_macsec_stats *)macsec_stats;
+ struct mlx5_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
+ struct mlx5_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+
+ if (tx_tables->check_rule_counter)
+ mlx5_fc_query(mdev, tx_tables->check_rule_counter,
+ &stats->macsec_tx_pkts, &stats->macsec_tx_bytes);
+
+ if (tx_tables->check_miss_rule_counter)
+ mlx5_fc_query(mdev, tx_tables->check_miss_rule_counter,
+ &stats->macsec_tx_pkts_drop, &stats->macsec_tx_bytes_drop);
+
+ if (rx_tables->check_rule_counter)
+ mlx5_fc_query(mdev, rx_tables->check_rule_counter,
+ &stats->macsec_rx_pkts, &stats->macsec_rx_bytes);
+
+ if (rx_tables->check_miss_rule_counter)
+ mlx5_fc_query(mdev, rx_tables->check_miss_rule_counter,
+ &stats->macsec_rx_pkts_drop, &stats->macsec_rx_bytes_drop);
+}
+
+struct mlx5_macsec_stats *mlx5_macsec_fs_get_stats(struct mlx5_macsec_fs *macsec_fs)
+{
+ if (!macsec_fs)
+ return NULL;
+
+ return &macsec_fs->stats;
+}
+
+u32 mlx5_macsec_fs_get_fs_id_from_hashtable(struct mlx5_macsec_fs *macsec_fs, sci_t *sci)
+{
+ struct mlx5_fs_id *mlx5_fs_id;
+ u32 fs_id = 0;
+
+ rcu_read_lock();
+ mlx5_fs_id = rhashtable_lookup(&macsec_fs->sci_hash, sci, rhash_sci);
+ if (mlx5_fs_id)
+ fs_id = mlx5_fs_id->id;
+ rcu_read_unlock();
+
+ return fs_id;
+}
+
+union mlx5_macsec_rule *
+mlx5_macsec_fs_add_rule(struct mlx5_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 *sa_fs_id)
+{
+ struct mlx5_macsec_event_data data = {.macsec_fs = macsec_fs,
+ .macdev = macsec_ctx->secy->netdev,
+ .is_tx =
+ (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT)
+ };
+ union mlx5_macsec_rule *macsec_rule;
+ u32 tx_new_fs_id;
+
+ macsec_rule = (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
+ macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, &tx_new_fs_id) :
+ macsec_fs_rx_add_rule(macsec_fs, macsec_ctx, attrs, *sa_fs_id);
+
+ data.fs_id = (data.is_tx) ? tx_new_fs_id : *sa_fs_id;
+ if (macsec_rule)
+ blocking_notifier_call_chain(&macsec_fs->mdev->macsec_nh,
+ MLX5_DRIVER_EVENT_MACSEC_SA_ADDED,
+ &data);
+
+ return macsec_rule;
+}
+
+void mlx5_macsec_fs_del_rule(struct mlx5_macsec_fs *macsec_fs,
+ union mlx5_macsec_rule *macsec_rule,
+ int action, void *macdev, u32 sa_fs_id)
+{
+ struct mlx5_macsec_event_data data = {.macsec_fs = macsec_fs,
+ .macdev = macdev,
+ .is_tx = (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT)
+ };
+
+ data.fs_id = (data.is_tx) ? macsec_rule->tx_rule.fs_id : sa_fs_id;
+ blocking_notifier_call_chain(&macsec_fs->mdev->macsec_nh,
+ MLX5_DRIVER_EVENT_MACSEC_SA_DELETED,
+ &data);
+
+ (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
+ macsec_fs_tx_del_rule(macsec_fs, &macsec_rule->tx_rule, macdev) :
+ macsec_fs_rx_del_rule(macsec_fs, &macsec_rule->rx_rule, macdev, sa_fs_id);
+}
+
+static int mlx5_macsec_fs_add_roce_rule_rx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, u16 gid_idx,
+ const struct sockaddr *addr,
+ struct list_head *rx_rules_list)
+{
+ struct mlx5_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_roce_macsec_rx_rule *rx_rule;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *new_rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ rx_rule = kzalloc(sizeof(*rx_rule), GFP_KERNEL);
+ if (!rx_rule) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ set_ipaddr_spec(addr, spec, true);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.ft = rx_fs->roce.ft_macsec_op_check;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ new_rule = mlx5_add_flow_rules(rx_fs->roce.ft_ip_check, spec, &flow_act,
+ &dest, 1);
+ if (IS_ERR(new_rule)) {
+ err = PTR_ERR(new_rule);
+ goto ip_rule_err;
+ }
+ rx_rule->ip = new_rule;
+
+ memset(&flow_act, 0, sizeof(flow_act));
+ memset(spec, 0, sizeof(*spec));
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_5);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_5,
+ macsec_fs_set_rx_fs_id(fs_id));
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ new_rule = mlx5_add_flow_rules(rx_fs->roce.ft_macsec_op_check, spec, &flow_act,
+ NULL, 0);
+ if (IS_ERR(new_rule)) {
+ err = PTR_ERR(new_rule);
+ goto op_rule_err;
+ }
+ rx_rule->op = new_rule;
+ rx_rule->gid_idx = gid_idx;
+ rx_rule->fs_id = fs_id;
+ list_add_tail(&rx_rule->entry, rx_rules_list);
+
+ goto out;
+
+op_rule_err:
+ mlx5_del_flow_rules(rx_rule->ip);
+ rx_rule->ip = NULL;
+ip_rule_err:
+ kfree(rx_rule);
+out:
+ kvfree(spec);
+ return err;
+}
+
+static int mlx5_macsec_fs_add_roce_rule_tx(struct mlx5_macsec_fs *macsec_fs, u32 fs_id, u16 gid_idx,
+ const struct sockaddr *addr,
+ struct list_head *tx_rules_list)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_modify_hdr *modify_hdr = NULL;
+ struct mlx5_roce_macsec_tx_rule *tx_rule;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *new_rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ tx_rule = kzalloc(sizeof(*tx_rule), GFP_KERNEL);
+ if (!tx_rule) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ set_ipaddr_spec(addr, spec, false);
+
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_A);
+ MLX5_SET(set_action_in, action, data, macsec_fs_set_tx_fs_id(fs_id));
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC,
+ 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ mlx5_core_err(mdev, "Fail to alloc ROCE MACsec set modify_header_id err=%d\n",
+ err);
+ modify_hdr = NULL;
+ goto modify_hdr_err;
+ }
+ tx_rule->meta_modhdr = modify_hdr;
+
+ flow_act.modify_hdr = modify_hdr;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ dest.ft = tx_fs->tables.ft_crypto.t;
+ new_rule = mlx5_add_flow_rules(tx_fs->ft_rdma_tx, spec, &flow_act, &dest, 1);
+ if (IS_ERR(new_rule)) {
+ err = PTR_ERR(new_rule);
+ mlx5_core_err(mdev, "Failed to add ROCE TX rule, err=%d\n", err);
+ goto rule_err;
+ }
+ tx_rule->rule = new_rule;
+ tx_rule->gid_idx = gid_idx;
+ tx_rule->fs_id = fs_id;
+ list_add_tail(&tx_rule->entry, tx_rules_list);
+
+ goto out;
+
+rule_err:
+ mlx5_modify_header_dealloc(mdev, tx_rule->meta_modhdr);
+modify_hdr_err:
+ kfree(tx_rule);
+out:
+ kvfree(spec);
+ return err;
+}
+
+void mlx5_macsec_del_roce_rule(u16 gid_idx, struct mlx5_macsec_fs *macsec_fs,
+ struct list_head *tx_rules_list, struct list_head *rx_rules_list)
+{
+ struct mlx5_roce_macsec_rx_rule *rx_rule, *next_rx;
+ struct mlx5_roce_macsec_tx_rule *tx_rule, *next_tx;
+
+ list_for_each_entry_safe(tx_rule, next_tx, tx_rules_list, entry) {
+ if (tx_rule->gid_idx == gid_idx)
+ macsec_fs_del_roce_rule_tx(macsec_fs->mdev, tx_rule);
+ }
+
+ list_for_each_entry_safe(rx_rule, next_rx, rx_rules_list, entry) {
+ if (rx_rule->gid_idx == gid_idx)
+ macsec_fs_del_roce_rule_rx(rx_rule);
+ }
+}
+EXPORT_SYMBOL_GPL(mlx5_macsec_del_roce_rule);
+
+int mlx5_macsec_add_roce_rule(void *macdev, const struct sockaddr *addr, u16 gid_idx,
+ struct list_head *tx_rules_list, struct list_head *rx_rules_list,
+ struct mlx5_macsec_fs *macsec_fs)
+{
+ struct mlx5_macsec_device *iter, *macsec_device = NULL;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5_fs_id *fs_id_iter;
+ unsigned long index = 0;
+ int err;
+
+ list_for_each_entry(iter, &macsec_fs->macsec_devices_list, macsec_devices_list_entry) {
+ if (iter->macdev == macdev) {
+ macsec_device = iter;
+ break;
+ }
+ }
+
+ if (!macsec_device)
+ return 0;
+
+ xa_for_each(&macsec_device->tx_id_xa, index, fs_id_iter) {
+ err = mlx5_macsec_fs_add_roce_rule_tx(macsec_fs, fs_id_iter->id, gid_idx, addr,
+ tx_rules_list);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to add roce TX rule\n");
+ goto out;
+ }
+ }
+
+ index = 0;
+ xa_for_each(&macsec_device->rx_id_xa, index, fs_id_iter) {
+ err = mlx5_macsec_fs_add_roce_rule_rx(macsec_fs, fs_id_iter->id, gid_idx, addr,
+ rx_rules_list);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to add roce TX rule\n");
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ mlx5_macsec_del_roce_rule(gid_idx, macsec_fs, tx_rules_list, rx_rules_list);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_macsec_add_roce_rule);
+
+void mlx5_macsec_add_roce_sa_rules(u32 fs_id, const struct sockaddr *addr, u16 gid_idx,
+ struct list_head *tx_rules_list,
+ struct list_head *rx_rules_list,
+ struct mlx5_macsec_fs *macsec_fs, bool is_tx)
+{
+ (is_tx) ?
+ mlx5_macsec_fs_add_roce_rule_tx(macsec_fs, fs_id, gid_idx, addr,
+ tx_rules_list) :
+ mlx5_macsec_fs_add_roce_rule_rx(macsec_fs, fs_id, gid_idx, addr,
+ rx_rules_list);
+}
+EXPORT_SYMBOL_GPL(mlx5_macsec_add_roce_sa_rules);
+
+void mlx5_macsec_del_roce_sa_rules(u32 fs_id, struct mlx5_macsec_fs *macsec_fs,
+ struct list_head *tx_rules_list,
+ struct list_head *rx_rules_list, bool is_tx)
+{
+ (is_tx) ?
+ macsec_fs_del_roce_rules_tx(macsec_fs, fs_id, tx_rules_list) :
+ macsec_fs_del_roce_rules_rx(macsec_fs, fs_id, rx_rules_list);
+}
+EXPORT_SYMBOL_GPL(mlx5_macsec_del_roce_sa_rules);
+
+void mlx5_macsec_fs_cleanup(struct mlx5_macsec_fs *macsec_fs)
+{
+ macsec_fs_rx_cleanup(macsec_fs);
+ macsec_fs_tx_cleanup(macsec_fs);
+ rhashtable_destroy(&macsec_fs->fs_id_hash);
+ rhashtable_destroy(&macsec_fs->sci_hash);
+ kfree(macsec_fs);
+}
+
+struct mlx5_macsec_fs *
+mlx5_macsec_fs_init(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_macsec_fs *macsec_fs;
+ int err;
+
+ macsec_fs = kzalloc(sizeof(*macsec_fs), GFP_KERNEL);
+ if (!macsec_fs)
+ return NULL;
+
+ macsec_fs->mdev = mdev;
+
+ err = rhashtable_init(&macsec_fs->sci_hash, &rhash_sci);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n",
+ err);
+ goto err_hash;
+ }
+
+ err = rhashtable_init(&macsec_fs->fs_id_hash, &rhash_fs_id);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to init FS_ID hash table, err=%d\n",
+ err);
+ goto sci_hash_cleanup;
+ }
+
+ err = macsec_fs_tx_init(macsec_fs);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
+ goto fs_id_hash_cleanup;
+ }
+
+ err = macsec_fs_rx_init(macsec_fs);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
+ goto tx_cleanup;
+ }
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&mdev->macsec_nh);
+
+ return macsec_fs;
+
+tx_cleanup:
+ macsec_fs_tx_cleanup(macsec_fs);
+fs_id_hash_cleanup:
+ rhashtable_destroy(&macsec_fs->fs_id_hash);
+sci_hash_cleanup:
+ rhashtable_destroy(&macsec_fs->sci_hash);
+err_hash:
+ kfree(macsec_fs);
+ return NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h
new file mode 100644
index 000000000000..34b80c3ef6a5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_MACSEC_STEERING_H__
+#define __MLX5_MACSEC_STEERING_H__
+
+#ifdef CONFIG_MLX5_MACSEC
+
+/* Bit31 - 30: MACsec marker, Bit15-0: MACsec id */
+#define MLX5_MACEC_RX_FS_ID_MAX USHRT_MAX /* Must be power of two */
+#define MLX5_MACSEC_RX_FS_ID_MASK MLX5_MACEC_RX_FS_ID_MAX
+#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1)
+#define MLX5_MACSEC_RX_METADAT_HANDLE(metadata) ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK)
+
+#define MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES 16
+
+struct mlx5_macsec_fs;
+union mlx5_macsec_rule;
+
+struct mlx5_macsec_rule_attrs {
+ sci_t sci;
+ u32 macsec_obj_id;
+ u8 assoc_num;
+ int action;
+};
+
+struct mlx5_macsec_stats {
+ u64 macsec_rx_pkts;
+ u64 macsec_rx_bytes;
+ u64 macsec_rx_pkts_drop;
+ u64 macsec_rx_bytes_drop;
+ u64 macsec_tx_pkts;
+ u64 macsec_tx_bytes;
+ u64 macsec_tx_pkts_drop;
+ u64 macsec_tx_bytes_drop;
+};
+
+enum mlx5_macsec_action {
+ MLX5_ACCEL_MACSEC_ACTION_ENCRYPT,
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT,
+};
+
+void mlx5_macsec_fs_cleanup(struct mlx5_macsec_fs *macsec_fs);
+
+struct mlx5_macsec_fs *
+mlx5_macsec_fs_init(struct mlx5_core_dev *mdev);
+
+union mlx5_macsec_rule *
+mlx5_macsec_fs_add_rule(struct mlx5_macsec_fs *macsec_fs,
+ const struct macsec_context *ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 *sa_fs_id);
+
+void mlx5_macsec_fs_del_rule(struct mlx5_macsec_fs *macsec_fs,
+ union mlx5_macsec_rule *macsec_rule,
+ int action, void *macdev, u32 sa_fs_id);
+
+void mlx5_macsec_fs_get_stats_fill(struct mlx5_macsec_fs *macsec_fs, void *macsec_stats);
+struct mlx5_macsec_stats *mlx5_macsec_fs_get_stats(struct mlx5_macsec_fs *macsec_fs);
+u32 mlx5_macsec_fs_get_fs_id_from_hashtable(struct mlx5_macsec_fs *macsec_fs, sci_t *sci);
+
+#endif
+
+#endif /* __MLX5_MACSEC_STEERING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 72ae560a1c68..15561965d2af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -49,7 +49,6 @@
#include <linux/version.h>
#include <net/devlink.h>
#include "mlx5_core.h"
-#include "thermal.h"
#include "lib/eq.h"
#include "fs_core.h"
#include "lib/mpfs.h"
@@ -73,6 +72,7 @@
#include "sf/dev/dev.h"
#include "sf/sf.h"
#include "mlx5_irq.h"
+#include "hwmon.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
@@ -361,9 +361,8 @@ void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_core_uplink_netdev_event_replay);
-static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
- enum mlx5_cap_type cap_type,
- enum mlx5_cap_mode cap_mode)
+int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode)
{
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
@@ -951,10 +950,10 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
{
int err;
- dev->priv.devcom = mlx5_devcom_register_device(dev);
- if (IS_ERR(dev->priv.devcom))
- mlx5_core_err(dev, "failed to register with devcom (0x%p)\n",
- dev->priv.devcom);
+ dev->priv.devc = mlx5_devcom_register_device(dev);
+ if (IS_ERR(dev->priv.devc))
+ mlx5_core_warn(dev, "failed to register devcom device %ld\n",
+ PTR_ERR(dev->priv.devc));
err = mlx5_query_board_id(dev);
if (err) {
@@ -1089,7 +1088,7 @@ err_eq_cleanup:
err_irq_cleanup:
mlx5_irq_table_cleanup(dev);
err_devcom:
- mlx5_devcom_unregister_device(dev->priv.devcom);
+ mlx5_devcom_unregister_device(dev->priv.devc);
return err;
}
@@ -1118,7 +1117,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev);
- mlx5_devcom_unregister_device(dev->priv.devcom);
+ mlx5_devcom_unregister_device(dev->priv.devc);
}
static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeout)
@@ -1142,7 +1141,7 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
return err;
}
- err = mlx5_cmd_init(dev);
+ err = mlx5_cmd_enable(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
return err;
@@ -1196,7 +1195,7 @@ stop_health_poll:
mlx5_stop_health_poll(dev, boot);
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
- mlx5_cmd_cleanup(dev);
+ mlx5_cmd_disable(dev);
return err;
}
@@ -1207,7 +1206,7 @@ static void mlx5_function_disable(struct mlx5_core_dev *dev, bool boot)
mlx5_core_disable_hca(dev, 0);
mlx5_stop_health_poll(dev, boot);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
- mlx5_cmd_cleanup(dev);
+ mlx5_cmd_disable(dev);
}
static int mlx5_function_open(struct mlx5_core_dev *dev)
@@ -1620,21 +1619,24 @@ static int mlx5_query_hca_caps_light(struct mlx5_core_dev *dev)
return err;
if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+ HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, nic_flow_table) ||
MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN_64(dev, general_obj_types) &
MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_VDPA_EMULATION);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_VDPA_EMULATION,
+ HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
@@ -1714,7 +1716,6 @@ static const int types[] = {
MLX5_CAP_FLOW_TABLE,
MLX5_CAP_ESWITCH_FLOW_TABLE,
MLX5_CAP_ESWITCH,
- MLX5_CAP_VECTOR_CALC,
MLX5_CAP_QOS,
MLX5_CAP_DEBUG,
MLX5_CAP_DEV_MEM,
@@ -1723,7 +1724,6 @@ static const int types[] = {
MLX5_CAP_VDPA_EMULATION,
MLX5_CAP_IPSEC,
MLX5_CAP_PORT_SELECTION,
- MLX5_CAP_DEV_SHAMPO,
MLX5_CAP_MACSEC,
MLX5_CAP_ADV_VIRTUALIZATION,
MLX5_CAP_CRYPTO,
@@ -1797,6 +1797,12 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
debugfs_create_file("vhca_id", 0400, priv->dbg.dbg_root, dev, &vhca_id_fops);
INIT_LIST_HEAD(&priv->traps);
+ err = mlx5_cmd_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed initializing cmdif SW structs, aborting\n");
+ goto err_cmd_init;
+ }
+
err = mlx5_tout_init(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
@@ -1842,6 +1848,8 @@ err_pagealloc_init:
err_health_init:
mlx5_tout_cleanup(dev);
err_timeout_init:
+ mlx5_cmd_cleanup(dev);
+err_cmd_init:
debugfs_remove(dev->priv.dbg.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
@@ -1864,6 +1872,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
mlx5_tout_cleanup(dev);
+ mlx5_cmd_cleanup(dev);
debugfs_remove_recursive(dev->priv.dbg.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
@@ -1921,9 +1930,9 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
- err = mlx5_thermal_init(dev);
+ err = mlx5_hwmon_dev_register(dev);
if (err)
- dev_err(&pdev->dev, "mlx5_thermal_init failed with error code %d\n", err);
+ mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
pci_save_state(pdev);
devlink_register(devlink);
@@ -1955,7 +1964,7 @@ static void remove_one(struct pci_dev *pdev)
mlx5_drain_health_wq(dev);
devlink_unregister(devlink);
mlx5_sriov_disable(pdev, false);
- mlx5_thermal_uninit(dev);
+ mlx5_hwmon_dev_unregister(dev);
mlx5_crdump_disable(dev);
mlx5_uninit_one(dev);
mlx5_pci_close(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 682d3dc00dd1..124352459c23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -174,10 +174,16 @@ static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed,
#define MLX5_FLEXIBLE_INLEN(dev, fixed, item_size, num_items) \
mlx5_flexible_inlen(dev, fixed, item_size, num_items, __func__, __LINE__)
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
+int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode);
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
+int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num);
int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
+int mlx5_cmd_enable(struct mlx5_core_dev *dev);
+void mlx5_cmd_disable(struct mlx5_core_dev *dev);
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
enum mlx5_cmdif_state cmdif_state);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index aa403a5ea34e..1088114e905d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -29,9 +29,9 @@ void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq);
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
struct irq_affinity_desc *af_desc,
struct cpu_rmap **rmap);
-int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
- struct mlx5_irq **irqs, struct cpu_rmap **rmap);
-void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs);
+struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
+ u16 vecidx, struct cpu_rmap **rmap);
+void mlx5_irq_release_vector(struct mlx5_irq *irq);
int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
@@ -39,17 +39,17 @@ int mlx5_irq_get_index(struct mlx5_irq *irq);
struct mlx5_irq_pool;
#ifdef CONFIG_MLX5_SF
-int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
- struct mlx5_irq **irqs);
+struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
+ struct cpumask *used_cpus, u16 vecidx);
struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool,
struct irq_affinity_desc *af_desc);
-void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs,
- int num_irqs);
+void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq);
#else
-static inline int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
- struct mlx5_irq **irqs)
+static inline
+struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
+ struct cpumask *used_cpus, u16 vecidx)
{
- return -EOPNOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct mlx5_irq *
@@ -58,7 +58,9 @@ mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *
return ERR_PTR(-EOPNOTSUPP);
}
-static inline void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev,
- struct mlx5_irq **irqs, int num_irqs) {}
+static inline
+void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
+{
+}
#endif
#endif /* __MLX5_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index cba2a4afb5fd..653648216730 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -259,8 +259,11 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
int err;
irq = kzalloc(sizeof(*irq), GFP_KERNEL);
- if (!irq)
+ if (!irq || !zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
+ kfree(irq);
return ERR_PTR(-ENOMEM);
+ }
+
if (!i || !pci_msix_can_alloc_dyn(dev->pdev)) {
/* The vector at index 0 is always statically allocated. If
* dynamic irq is not supported all vectors are statically
@@ -297,11 +300,7 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
mlx5_core_err(dev, "Failed to request irq. err = %d\n", err);
goto err_req_irq;
}
- if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
- mlx5_core_warn(dev, "zalloc_cpumask_var failed\n");
- err = -ENOMEM;
- goto err_cpumask;
- }
+
if (af_desc) {
cpumask_copy(irq->mask, &af_desc->mask);
irq_set_affinity_and_hint(irq->map.virq, irq->mask);
@@ -319,8 +318,6 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
err_xa:
if (af_desc)
irq_update_affinity_hint(irq->map.virq, NULL);
- free_cpumask_var(irq->mask);
-err_cpumask:
free_irq(irq->map.virq, &irq->nh);
err_req_irq:
#ifdef CONFIG_RFS_ACCEL
@@ -333,6 +330,7 @@ err_irq_rmap:
if (i && pci_msix_can_alloc_dyn(dev->pdev))
pci_msix_free_irq(dev->pdev, irq->map);
err_alloc_irq:
+ free_cpumask_var(irq->mask);
kfree(irq);
return ERR_PTR(err);
}
@@ -432,19 +430,10 @@ static struct mlx5_irq_pool *ctrl_irq_pool_get(struct mlx5_core_dev *dev)
return pool ? pool : irq_table->pcif_pool;
}
-/**
- * mlx5_irqs_release - release one or more IRQs back to the system.
- * @irqs: IRQs to be released.
- * @nirqs: number of IRQs to be released.
- */
-static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs)
+static void _mlx5_irq_release(struct mlx5_irq *irq)
{
- int i;
-
- for (i = 0; i < nirqs; i++) {
- synchronize_irq(irqs[i]->map.virq);
- mlx5_irq_put(irqs[i]);
- }
+ synchronize_irq(irq->map.virq);
+ mlx5_irq_put(irq);
}
/**
@@ -453,7 +442,7 @@ static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs)
*/
void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
{
- mlx5_irqs_release(&ctrl_irq, 1);
+ _mlx5_irq_release(ctrl_irq);
}
/**
@@ -569,53 +558,42 @@ void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map)
EXPORT_SYMBOL(mlx5_msix_free);
/**
- * mlx5_irqs_release_vectors - release one or more IRQs back to the system.
- * @irqs: IRQs to be released.
- * @nirqs: number of IRQs to be released.
+ * mlx5_irq_release_vector - release one IRQ back to the system.
+ * @irq: the irq to release.
*/
-void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs)
+void mlx5_irq_release_vector(struct mlx5_irq *irq)
{
- mlx5_irqs_release(irqs, nirqs);
+ _mlx5_irq_release(irq);
}
/**
- * mlx5_irqs_request_vectors - request one or more IRQs for mlx5 device.
- * @dev: mlx5 device that is requesting the IRQs.
- * @cpus: CPUs array for binding the IRQs
- * @nirqs: number of IRQs to request.
- * @irqs: an output array of IRQs pointers.
+ * mlx5_irq_request_vector - request one IRQ for mlx5 device.
+ * @dev: mlx5 device that is requesting the IRQ.
+ * @cpu: CPU to bind the IRQ to.
+ * @vecidx: vector index to request an IRQ for.
* @rmap: pointer to reverse map pointer for completion interrupts
*
* Each IRQ is bound to at most 1 CPU.
- * This function is requests nirqs IRQs, starting from @vecidx.
+ * This function is requests one IRQ, for the given @vecidx.
*
- * This function returns the number of IRQs requested, (which might be smaller than
- * @nirqs), if successful, or a negative error code in case of an error.
+ * This function returns a pointer to the irq on success, or an error pointer
+ * in case of an error.
*/
-int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
- struct mlx5_irq **irqs, struct cpu_rmap **rmap)
+struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
+ u16 vecidx, struct cpu_rmap **rmap)
{
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = table->pcif_pool;
struct irq_affinity_desc af_desc;
- struct mlx5_irq *irq;
int offset = 1;
- int i;
if (!pool->xa_num_irqs.max)
offset = 0;
af_desc.is_managed = false;
- for (i = 0; i < nirqs; i++) {
- cpumask_clear(&af_desc.mask);
- cpumask_set_cpu(cpus[i], &af_desc.mask);
- irq = mlx5_irq_request(dev, i + offset, &af_desc, rmap);
- if (IS_ERR(irq))
- break;
- irqs[i] = irq;
- }
-
- return i ? i : PTR_ERR(irq);
+ cpumask_clear(&af_desc.mask);
+ cpumask_set_cpu(cpu, &af_desc.mask);
+ return mlx5_irq_request(dev, vecidx + offset, &af_desc, rmap);
}
static struct mlx5_irq_pool *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 0daeb4b72cca..be70d1f23a5d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -271,7 +271,7 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
-static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
+int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
{
u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 8e2abbab05f0..05e148db9889 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -129,7 +129,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id,
err = auxiliary_device_add(&sf_dev->adev);
if (err) {
- put_device(&sf_dev->adev.dev);
+ auxiliary_device_uninit(&sf_dev->adev);
goto add_err;
}
@@ -167,7 +167,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
if (!max_functions)
return 0;
- base_id = MLX5_CAP_GEN(table->dev, sf_base_id);
+ base_id = mlx5_sf_start_function_id(table->dev);
if (event->function_id < base_id || event->function_id >= (base_id + max_functions))
return 0;
@@ -185,7 +185,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
else
mlx5_core_err(table->dev,
- "SF DEV: teardown state for invalid dev index=%d fn_id=0x%x\n",
+ "SF DEV: teardown state for invalid dev index=%d sfnum=0x%x\n",
sf_index, event->sw_function_id);
break;
case MLX5_VHCA_STATE_ACTIVE:
@@ -209,7 +209,7 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
int i;
max_functions = mlx5_sf_max_functions(dev);
- function_id = MLX5_CAP_GEN(dev, sf_base_id);
+ function_id = mlx5_sf_start_function_id(dev);
/* Arm the vhca context as the vhca event notifier */
for (i = 0; i < max_functions; i++) {
err = mlx5_vhca_event_arm(dev, function_id);
@@ -234,7 +234,7 @@ static void mlx5_sf_dev_add_active_work(struct work_struct *work)
int i;
max_functions = mlx5_sf_max_functions(dev);
- function_id = MLX5_CAP_GEN(dev, sf_base_id);
+ function_id = mlx5_sf_start_function_id(dev);
for (i = 0; i < max_functions; i++, function_id++) {
if (table->stop_active_wq)
return;
@@ -299,7 +299,7 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
unsigned int max_sfs;
int err;
- if (!mlx5_sf_dev_supported(dev) || !mlx5_vhca_event_supported(dev))
+ if (!mlx5_sf_dev_supported(dev))
return;
table = kzalloc(sizeof(*table), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 6a3fa30b2bf2..e34a8f88c518 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -12,7 +12,7 @@
#include "diag/sf_tracepoint.h"
struct mlx5_sf {
- struct devlink_port dl_port;
+ struct mlx5_devlink_port dl_port;
unsigned int port_index;
u32 controller;
u16 id;
@@ -292,11 +292,11 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
if (IS_ERR(sf))
return PTR_ERR(sf);
- err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, sf->hw_fn_id,
- new_attr->controller, new_attr->sfnum);
+ err = mlx5_eswitch_load_sf_vport(esw, sf->hw_fn_id, MLX5_VPORT_UC_ADDR_CHANGE,
+ &sf->dl_port, new_attr->controller, new_attr->sfnum);
if (err)
goto esw_err;
- *dl_port = &sf->dl_port;
+ *dl_port = &sf->dl_port.dl_port;
trace_mlx5_sf_add(dev, sf->port_index, sf->controller, sf->hw_fn_id, new_attr->sfnum);
return 0;
@@ -400,7 +400,7 @@ int mlx5_devlink_sf_port_del(struct devlink *devlink,
goto sf_err;
}
- mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
+ mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
mlx5_sf_id_erase(table, sf);
mutex_lock(&table->sf_state_lock);
@@ -472,7 +472,7 @@ static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
* arrive. It is safe to destroy all user created SFs.
*/
xa_for_each(&table->port_indices, index, sf) {
- mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
+ mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
mlx5_sf_id_erase(table, sf);
mlx5_sf_dealloc(table, sf);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
index 17aa348989cb..1f613320fe07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -9,6 +9,7 @@
#include "mlx5_core.h"
#include "eswitch.h"
#include "diag/sf_tracepoint.h"
+#include "devlink.h"
struct mlx5_sf_hw {
u32 usr_sfnum;
@@ -243,31 +244,61 @@ static void mlx5_sf_hw_table_hwc_cleanup(struct mlx5_sf_hwc_table *hwc)
kfree(hwc->sfs);
}
+static void mlx5_sf_hw_table_res_unregister(struct mlx5_core_dev *dev)
+{
+ devl_resources_unregister(priv_to_devlink(dev));
+}
+
+static int mlx5_sf_hw_table_res_register(struct mlx5_core_dev *dev, u16 max_fn,
+ u16 max_ext_fn)
+{
+ struct devlink_resource_size_params size_params;
+ struct devlink *devlink = priv_to_devlink(dev);
+ int err;
+
+ devlink_resource_size_params_init(&size_params, max_fn, max_fn, 1,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devl_resource_register(devlink, "max_local_SFs", max_fn, MLX5_DL_RES_MAX_LOCAL_SFS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP, &size_params);
+ if (err)
+ return err;
+
+ devlink_resource_size_params_init(&size_params, max_ext_fn, max_ext_fn, 1,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ return devl_resource_register(devlink, "max_external_SFs", max_ext_fn,
+ MLX5_DL_RES_MAX_EXTERNAL_SFS, DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+}
+
int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_sf_hw_table *table;
u16 max_ext_fn = 0;
u16 ext_base_id = 0;
- u16 max_fn = 0;
u16 base_id;
+ u16 max_fn;
int err;
if (!mlx5_vhca_event_supported(dev))
return 0;
- if (mlx5_sf_supported(dev))
- max_fn = mlx5_sf_max_functions(dev);
+ max_fn = mlx5_sf_max_functions(dev);
err = mlx5_esw_sf_max_hpf_functions(dev, &max_ext_fn, &ext_base_id);
if (err)
return err;
+ if (mlx5_sf_hw_table_res_register(dev, max_fn, max_ext_fn))
+ mlx5_core_dbg(dev, "failed to register max SFs resources");
+
if (!max_fn && !max_ext_fn)
return 0;
table = kzalloc(sizeof(*table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
+ if (!table) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
mutex_init(&table->table_lock);
table->dev = dev;
@@ -291,6 +322,8 @@ ext_err:
table_err:
mutex_destroy(&table->table_lock);
kfree(table);
+alloc_err:
+ mlx5_sf_hw_table_res_unregister(dev);
return err;
}
@@ -299,12 +332,14 @@ void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
if (!table)
- return;
+ goto res_unregister;
- mutex_destroy(&table->table_lock);
mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_EXTERNAL]);
mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
+ mutex_destroy(&table->table_lock);
kfree(table);
+res_unregister:
+ mlx5_sf_hw_table_res_unregister(dev);
}
static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 54bb0866ed72..5b83da08692d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -1422,7 +1422,6 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
case DR_ACTION_TYP_TNL_L3_TO_L2:
{
u8 *hw_actions;
- int ret;
hw_actions = kzalloc(DR_ACTION_CACHE_LINE_SIZE, GFP_KERNEL);
if (!hw_actions)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 4a5ae86e2b62..4e8527a724f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -52,6 +52,7 @@ struct dr_qp_init_attr {
u32 cqn;
u32 pdn;
u32 max_send_wr;
+ u32 max_send_sge;
struct mlx5_uars_page *uar;
u8 isolate_vl_tc:1;
};
@@ -246,6 +247,37 @@ static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
return err == CQ_POLL_ERR ? err : npolled;
}
+static int dr_qp_get_args_update_send_wqe_size(struct dr_qp_init_attr *attr)
+{
+ return roundup_pow_of_two(sizeof(struct mlx5_wqe_ctrl_seg) +
+ sizeof(struct mlx5_wqe_flow_update_ctrl_seg) +
+ sizeof(struct mlx5_wqe_header_modify_argument_update_seg));
+}
+
+/* We calculate for specific RC QP with the required functionality */
+static int dr_qp_calc_rc_send_wqe(struct dr_qp_init_attr *attr)
+{
+ int update_arg_size;
+ int inl_size = 0;
+ int tot_size;
+ int size;
+
+ update_arg_size = dr_qp_get_args_update_send_wqe_size(attr);
+
+ size = sizeof(struct mlx5_wqe_ctrl_seg) +
+ sizeof(struct mlx5_wqe_raddr_seg);
+ inl_size = size + ALIGN(sizeof(struct mlx5_wqe_inline_seg) +
+ DR_STE_SIZE, 16);
+
+ size += attr->max_send_sge * sizeof(struct mlx5_wqe_data_seg);
+
+ size = max(size, update_arg_size);
+
+ tot_size = max(size, inl_size);
+
+ return ALIGN(tot_size, MLX5_SEND_WQE_BB);
+}
+
static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
struct dr_qp_init_attr *attr)
{
@@ -253,6 +285,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
struct mlx5_wq_param wqp;
struct mlx5dr_qp *dr_qp;
+ int wqe_size;
int inlen;
void *qpc;
void *in;
@@ -332,6 +365,15 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
if (err)
goto err_in;
dr_qp->uar = attr->uar;
+ wqe_size = dr_qp_calc_rc_send_wqe(attr);
+ dr_qp->max_inline_data = min(wqe_size -
+ (sizeof(struct mlx5_wqe_ctrl_seg) +
+ sizeof(struct mlx5_wqe_raddr_seg) +
+ sizeof(struct mlx5_wqe_inline_seg)),
+ (2 * MLX5_SEND_WQE_BB -
+ (sizeof(struct mlx5_wqe_ctrl_seg) +
+ sizeof(struct mlx5_wqe_raddr_seg) +
+ sizeof(struct mlx5_wqe_inline_seg))));
return dr_qp;
@@ -395,8 +437,48 @@ dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
MLX5_SEND_WQE_DS;
}
+static int dr_set_data_inl_seg(struct mlx5dr_qp *dr_qp,
+ struct dr_data_seg *data_seg, void *wqe)
+{
+ int inline_header_size = sizeof(struct mlx5_wqe_ctrl_seg) +
+ sizeof(struct mlx5_wqe_raddr_seg) +
+ sizeof(struct mlx5_wqe_inline_seg);
+ struct mlx5_wqe_inline_seg *seg;
+ int left_space;
+ int inl = 0;
+ void *addr;
+ int len;
+ int idx;
+
+ seg = wqe;
+ wqe += sizeof(*seg);
+ addr = (void *)(unsigned long)(data_seg->addr);
+ len = data_seg->length;
+ inl += len;
+ left_space = MLX5_SEND_WQE_BB - inline_header_size;
+
+ if (likely(len > left_space)) {
+ memcpy(wqe, addr, left_space);
+ len -= left_space;
+ addr += left_space;
+ idx = (dr_qp->sq.pc + 1) & (dr_qp->sq.wqe_cnt - 1);
+ wqe = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
+ }
+
+ memcpy(wqe, addr, len);
+
+ if (likely(inl)) {
+ seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
+ return DIV_ROUND_UP(inl + sizeof(seg->byte_count),
+ MLX5_SEND_WQE_DS);
+ } else {
+ return 0;
+ }
+}
+
static void
-dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
+dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
+ struct mlx5_wqe_ctrl_seg *wq_ctrl,
u64 remote_addr,
u32 rkey,
struct dr_data_seg *data_seg,
@@ -412,15 +494,17 @@ dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
wq_raddr->reserved = 0;
wq_dseg = (void *)(wq_raddr + 1);
+ /* WQE ctrl segment + WQE remote addr segment */
+ *size = (sizeof(*wq_ctrl) + sizeof(*wq_raddr)) / MLX5_SEND_WQE_DS;
- wq_dseg->byte_count = cpu_to_be32(data_seg->length);
- wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
- wq_dseg->addr = cpu_to_be64(data_seg->addr);
-
- *size = (sizeof(*wq_ctrl) + /* WQE ctrl segment */
- sizeof(*wq_dseg) + /* WQE data segment */
- sizeof(*wq_raddr)) / /* WQE remote addr segment */
- MLX5_SEND_WQE_DS;
+ if (data_seg->send_flags & IB_SEND_INLINE) {
+ *size += dr_set_data_inl_seg(dr_qp, data_seg, wq_dseg);
+ } else {
+ wq_dseg->byte_count = cpu_to_be32(data_seg->length);
+ wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
+ wq_dseg->addr = cpu_to_be64(data_seg->addr);
+ *size += sizeof(*wq_dseg) / MLX5_SEND_WQE_DS; /* WQE data segment */
+ }
}
static void dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *wq_ctrl,
@@ -451,7 +535,7 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
switch (opcode) {
case MLX5_OPCODE_RDMA_READ:
case MLX5_OPCODE_RDMA_WRITE:
- dr_rdma_handle_icm_write_segments(wq_ctrl, remote_addr,
+ dr_rdma_handle_icm_write_segments(dr_qp, wq_ctrl, remote_addr,
rkey, data_seg, &size);
break;
case MLX5_OPCODE_FLOW_TBL_ACCESS:
@@ -572,7 +656,7 @@ static void dr_fill_write_args_segs(struct mlx5dr_send_ring *send_ring,
if (send_ring->pending_wqe % send_ring->signal_th == 0)
send_info->write.send_flags |= IB_SEND_SIGNALED;
else
- send_info->write.send_flags = 0;
+ send_info->write.send_flags &= ~IB_SEND_SIGNALED;
}
static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
@@ -596,9 +680,13 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
}
send_ring->pending_wqe++;
+ if (!send_info->write.lkey)
+ send_info->write.send_flags |= IB_SEND_INLINE;
if (send_ring->pending_wqe % send_ring->signal_th == 0)
send_info->write.send_flags |= IB_SEND_SIGNALED;
+ else
+ send_info->write.send_flags &= ~IB_SEND_SIGNALED;
send_ring->pending_wqe++;
send_info->read.length = send_info->write.length;
@@ -608,9 +696,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
send_info->read.lkey = send_ring->sync_mr->mkey;
if (send_ring->pending_wqe % send_ring->signal_th == 0)
- send_info->read.send_flags = IB_SEND_SIGNALED;
+ send_info->read.send_flags |= IB_SEND_SIGNALED;
else
- send_info->read.send_flags = 0;
+ send_info->read.send_flags &= ~IB_SEND_SIGNALED;
}
static void dr_fill_data_segs(struct mlx5dr_domain *dmn,
@@ -1096,8 +1184,8 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in)
goto err_cqwq;
- vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
- err = mlx5_vector2eqn(mdev, vector, &eqn);
+ vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
+ err = mlx5_comp_eqn_get(mdev, vector, &eqn);
if (err) {
kvfree(in);
goto err_cqwq;
@@ -1257,6 +1345,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
dmn->send_ring->cq->qp = dmn->send_ring->qp;
dmn->info.max_send_wr = QUEUE_SIZE;
+ init_attr.max_send_sge = 1;
dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data,
DR_STE_SIZE);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index feb307fb3440..14f6df88b1f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -336,7 +336,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
if (fte->action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
err = -EINVAL;
mlx5dr_err(domain, "FW-owned reformat can't be used in SW rule\n");
- goto free_actions;
+ goto free_actions;
}
is_decap = fte->action.pkt_reformat->reformat_type ==
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/thermal.c b/drivers/net/ethernet/mellanox/mlx5/core/thermal.c
deleted file mode 100644
index 52199d39657e..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/thermal.c
+++ /dev/null
@@ -1,114 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/thermal.h>
-#include <linux/err.h>
-#include <linux/mlx5/driver.h>
-#include "mlx5_core.h"
-#include "thermal.h"
-
-#define MLX5_THERMAL_POLL_INT_MSEC 1000
-#define MLX5_THERMAL_NUM_TRIPS 0
-#define MLX5_THERMAL_ASIC_SENSOR_INDEX 0
-
-/* Bit string indicating the writeablility of trip points if any */
-#define MLX5_THERMAL_TRIP_MASK (BIT(MLX5_THERMAL_NUM_TRIPS) - 1)
-
-struct mlx5_thermal {
- struct mlx5_core_dev *mdev;
- struct thermal_zone_device *tzdev;
-};
-
-static int mlx5_thermal_get_mtmp_temp(struct mlx5_core_dev *mdev, u32 id, int *p_temp)
-{
- u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)] = {};
- u32 mtmp_in[MLX5_ST_SZ_DW(mtmp_reg)] = {};
- int err;
-
- MLX5_SET(mtmp_reg, mtmp_in, sensor_index, id);
-
- err = mlx5_core_access_reg(mdev, mtmp_in, sizeof(mtmp_in),
- mtmp_out, sizeof(mtmp_out),
- MLX5_REG_MTMP, 0, 0);
-
- if (err)
- return err;
-
- *p_temp = MLX5_GET(mtmp_reg, mtmp_out, temperature);
-
- return 0;
-}
-
-static int mlx5_thermal_get_temp(struct thermal_zone_device *tzdev,
- int *p_temp)
-{
- struct mlx5_thermal *thermal = thermal_zone_device_priv(tzdev);
- struct mlx5_core_dev *mdev = thermal->mdev;
- int err;
-
- err = mlx5_thermal_get_mtmp_temp(mdev, MLX5_THERMAL_ASIC_SENSOR_INDEX, p_temp);
-
- if (err)
- return err;
-
- /* The unit of temp returned is in 0.125 C. The thermal
- * framework expects the value in 0.001 C.
- */
- *p_temp *= 125;
-
- return 0;
-}
-
-static struct thermal_zone_device_ops mlx5_thermal_ops = {
- .get_temp = mlx5_thermal_get_temp,
-};
-
-int mlx5_thermal_init(struct mlx5_core_dev *mdev)
-{
- char data[THERMAL_NAME_LENGTH];
- struct mlx5_thermal *thermal;
- int err;
-
- if (!mlx5_core_is_pf(mdev) && !mlx5_core_is_ecpf(mdev))
- return 0;
-
- err = snprintf(data, sizeof(data), "mlx5_%s", dev_name(mdev->device));
- if (err < 0 || err >= sizeof(data)) {
- mlx5_core_err(mdev, "Failed to setup thermal zone name, %d\n", err);
- return -EINVAL;
- }
-
- thermal = kzalloc(sizeof(*thermal), GFP_KERNEL);
- if (!thermal)
- return -ENOMEM;
-
- thermal->mdev = mdev;
- thermal->tzdev = thermal_zone_device_register_with_trips(data,
- NULL,
- MLX5_THERMAL_NUM_TRIPS,
- MLX5_THERMAL_TRIP_MASK,
- thermal,
- &mlx5_thermal_ops,
- NULL, 0, MLX5_THERMAL_POLL_INT_MSEC);
- if (IS_ERR(thermal->tzdev)) {
- err = PTR_ERR(thermal->tzdev);
- mlx5_core_err(mdev, "Failed to register thermal zone device (%s) %d\n", data, err);
- kfree(thermal);
- return err;
- }
-
- mdev->thermal = thermal;
- return 0;
-}
-
-void mlx5_thermal_uninit(struct mlx5_core_dev *mdev)
-{
- if (!mdev->thermal)
- return;
-
- thermal_zone_device_unregister(mdev->thermal->tzdev);
- kfree(mdev->thermal);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/thermal.h b/drivers/net/ethernet/mellanox/mlx5/core/thermal.h
deleted file mode 100644
index 7d752c122192..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/thermal.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
- * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
- */
-#ifndef __MLX5_THERMAL_DRIVER_H
-#define __MLX5_THERMAL_DRIVER_H
-
-#if IS_ENABLED(CONFIG_THERMAL)
-int mlx5_thermal_init(struct mlx5_core_dev *mdev);
-void mlx5_thermal_uninit(struct mlx5_core_dev *mdev);
-#else
-static inline int mlx5_thermal_init(struct mlx5_core_dev *mdev)
-{
- mdev->thermal = NULL;
- return 0;
-}
-
-static inline void mlx5_thermal_uninit(struct mlx5_core_dev *mdev) { }
-#endif
-
-#endif /* __MLX5_THERMAL_DRIVER_H */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
index a453b9cd9033..bc94e75a7aeb 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
@@ -175,9 +175,6 @@ enum mlxbf_gige_res {
int mlxbf_gige_mdio_probe(struct platform_device *pdev,
struct mlxbf_gige *priv);
void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv);
-irqreturn_t mlxbf_gige_mdio_handle_phy_interrupt(int irq, void *dev_id);
-void mlxbf_gige_mdio_enable_phy_int(struct mlxbf_gige *priv);
-
void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index, u64 dmac);
void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 3ca9fce759ea..71cad6bb6e62 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -29,7 +29,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_nve.o spectrum_nve_vxlan.o \
spectrum_dpipe.o spectrum_trap.o \
spectrum_ethtool.o spectrum_policer.o \
- spectrum_pgt.o
+ spectrum_pgt.o spectrum_port_range.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK) += spectrum_ptp.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 9dfe7148199f..faa63ea9b83e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -1887,6 +1887,46 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
}
EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set);
+/* Ignore Action
+ * -------------
+ * The ignore action is used to ignore basic switching functions such as
+ * learning on a per-packet basis.
+ */
+
+#define MLXSW_AFA_IGNORE_CODE 0x0F
+#define MLXSW_AFA_IGNORE_SIZE 1
+
+/* afa_ignore_disable_learning
+ * Disable learning on ingress.
+ */
+MLXSW_ITEM32(afa, ignore, disable_learning, 0x00, 29, 1);
+
+/* afa_ignore_disable_security
+ * Disable security lookup on ingress.
+ * Reserved when Spectrum-1.
+ */
+MLXSW_ITEM32(afa, ignore, disable_security, 0x00, 28, 1);
+
+static void mlxsw_afa_ignore_pack(char *payload, bool disable_learning,
+ bool disable_security)
+{
+ mlxsw_afa_ignore_disable_learning_set(payload, disable_learning);
+ mlxsw_afa_ignore_disable_security_set(payload, disable_security);
+}
+
+int mlxsw_afa_block_append_ignore(struct mlxsw_afa_block *block,
+ bool disable_learning, bool disable_security)
+{
+ char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_IGNORE_CODE,
+ MLXSW_AFA_IGNORE_SIZE);
+
+ if (IS_ERR(act))
+ return PTR_ERR(act);
+ mlxsw_afa_ignore_pack(act, disable_learning, disable_security);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_ignore);
+
/* MC Routing Action
* -----------------
* The Multicast router action. Can be used by RMFT_V2 - Router Multicast
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index db58037be46e..0ead3a212de8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -89,6 +89,8 @@ int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_ignore(struct mlxsw_afa_block *block,
+ bool disable_learning, bool disable_security);
int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
u16 expected_irif, u16 min_mtu,
bool rmid_valid, u32 kvdl_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 973de2adc943..70f9b5e85a26 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -43,6 +43,7 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4),
MLXSW_AFK_ELEMENT_INFO_U32(FDB_MISS, 0x40, 0, 1),
+ MLXSW_AFK_ELEMENT_INFO_U32(L4_PORT_RANGE, 0x40, 1, 16),
};
struct mlxsw_afk {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index 65a4abadc7db..2eac7582c31a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -36,6 +36,7 @@ enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
MLXSW_AFK_ELEMENT_FDB_MISS,
+ MLXSW_AFK_ELEMENT_L4_PORT_RANGE,
MLXSW_AFK_ELEMENT_MAX,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 0107cbc32fc7..d637c0348fa1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -32,6 +32,7 @@ struct mlxsw_env {
const struct mlxsw_bus_info *bus_info;
u8 max_module_count; /* Maximum number of modules per-slot. */
u8 num_of_slots; /* Including the main board. */
+ u8 max_eeprom_len; /* Maximum module EEPROM transaction length. */
struct mutex line_cards_lock; /* Protects line cards. */
struct mlxsw_env_line_card *line_cards[];
};
@@ -111,7 +112,7 @@ mlxsw_env_validate_cable_ident(struct mlxsw_core *core, u8 slot_index, int id,
if (err)
return err;
- mlxsw_reg_mcia_pack(mcia_pl, slot_index, id, 0,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, id,
MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1,
MLXSW_REG_MCIA_I2C_ADDR_LOW);
err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl);
@@ -146,6 +147,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index,
int module, u16 offset, u16 size, void *data,
bool qsfp, unsigned int *p_read_size)
{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
char mcia_pl[MLXSW_REG_MCIA_LEN];
char *eeprom_tmp;
u16 i2c_addr;
@@ -153,11 +155,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index,
int status;
int err;
- /* MCIA register accepts buffer size <= 48. Page of size 128 should be
- * read by chunks of size 48, 48, 32. Align the size of the last chunk
- * to avoid reading after the end of the page.
- */
- size = min_t(u16, size, MLXSW_REG_MCIA_EEPROM_SIZE);
+ size = min_t(u16, size, mlxsw_env->max_eeprom_len);
if (offset < MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH &&
offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH)
@@ -188,7 +186,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index,
}
}
- mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page, offset, size,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, page, offset, size,
i2c_addr);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
@@ -266,12 +264,12 @@ mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, u8 slot_index,
page = MLXSW_REG_MCIA_TH_PAGE_CMIS_NUM;
else
page = MLXSW_REG_MCIA_TH_PAGE_NUM;
- mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, page,
MLXSW_REG_MCIA_TH_PAGE_OFF + off,
MLXSW_REG_MCIA_TH_ITEM_SIZE,
MLXSW_REG_MCIA_I2C_ADDR_LOW);
} else {
- mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module,
MLXSW_REG_MCIA_PAGE0_LO,
off, MLXSW_REG_MCIA_TH_ITEM_SIZE,
MLXSW_REG_MCIA_I2C_ADDR_HIGH);
@@ -489,9 +487,9 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
u8 size;
size = min_t(u8, page->length - bytes_read,
- MLXSW_REG_MCIA_EEPROM_SIZE);
+ mlxsw_env->max_eeprom_len);
- mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page->page,
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, page->page,
device_addr + bytes_read, size,
page->i2c_address);
mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank);
@@ -1359,6 +1357,26 @@ static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = {
.got_inactive = mlxsw_env_got_inactive,
};
+static int mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
+{
+ char mcam_pl[MLXSW_REG_MCAM_LEN];
+ bool mcia_128b_supported;
+ int err;
+
+ mlxsw_reg_mcam_pack(mcam_pl,
+ MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
+ err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mcam), mcam_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
+ &mcia_128b_supported);
+
+ mlxsw_env->max_eeprom_len = mcia_128b_supported ? 128 : 48;
+
+ return 0;
+}
+
int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *bus_info,
struct mlxsw_env **p_env)
@@ -1427,10 +1445,15 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
if (err)
goto err_type_set;
+ err = mlxsw_env_max_module_eeprom_len_query(env);
+ if (err)
+ goto err_eeprom_len_query;
+
env->line_cards[0]->active = true;
return 0;
+err_eeprom_len_query:
err_type_set:
mlxsw_env_module_event_disable(env, 0);
err_mlxsw_env_module_event_enable:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 17160e867bef..ae556ddd7624 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -2790,6 +2790,78 @@ static inline void mlxsw_reg_ptar_unpack(char *payload, char *tcam_region_info)
mlxsw_reg_ptar_tcam_region_info_memcpy_from(payload, tcam_region_info);
}
+/* PPRR - Policy-Engine Port Range Register
+ * ----------------------------------------
+ * This register is used for configuring port range identification.
+ */
+#define MLXSW_REG_PPRR_ID 0x3008
+#define MLXSW_REG_PPRR_LEN 0x14
+
+MLXSW_REG_DEFINE(pprr, MLXSW_REG_PPRR_ID, MLXSW_REG_PPRR_LEN);
+
+/* reg_pprr_ipv4
+ * Apply port range register to IPv4 packets.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pprr, ipv4, 0x00, 31, 1);
+
+/* reg_pprr_ipv6
+ * Apply port range register to IPv6 packets.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pprr, ipv6, 0x00, 30, 1);
+
+/* reg_pprr_src
+ * Apply port range register to source L4 ports.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pprr, src, 0x00, 29, 1);
+
+/* reg_pprr_dst
+ * Apply port range register to destination L4 ports.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pprr, dst, 0x00, 28, 1);
+
+/* reg_pprr_tcp
+ * Apply port range register to TCP packets.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pprr, tcp, 0x00, 27, 1);
+
+/* reg_pprr_udp
+ * Apply port range register to UDP packets.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pprr, udp, 0x00, 26, 1);
+
+/* reg_pprr_register_index
+ * Index of Port Range Register being accessed.
+ * Range is 0..cap_max_acl_l4_port_range-1.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pprr, register_index, 0x00, 0, 8);
+
+/* reg_prrr_port_range_min
+ * Minimum port range for comparison.
+ * Match is defined as:
+ * port_range_min <= packet_port <= port_range_max.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pprr, port_range_min, 0x04, 16, 16);
+
+/* reg_prrr_port_range_max
+ * Maximum port range for comparison.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pprr, port_range_max, 0x04, 0, 16);
+
+static inline void mlxsw_reg_pprr_pack(char *payload, u8 register_index)
+{
+ MLXSW_REG_ZERO(pprr, payload);
+ mlxsw_reg_pprr_register_index_set(payload, register_index);
+}
+
/* PPBS - Policy-Engine Policy Based Switching Register
* ----------------------------------------------------
* This register retrieves and sets Policy Based Switching Table entries.
@@ -9559,18 +9631,10 @@ static inline void mlxsw_reg_mtbr_temp_unpack(char *payload, int rec_ind,
*/
#define MLXSW_REG_MCIA_ID 0x9014
-#define MLXSW_REG_MCIA_LEN 0x40
+#define MLXSW_REG_MCIA_LEN 0x94
MLXSW_REG_DEFINE(mcia, MLXSW_REG_MCIA_ID, MLXSW_REG_MCIA_LEN);
-/* reg_mcia_l
- * Lock bit. Setting this bit will lock the access to the specific
- * cable. Used for updating a full page in a cable EPROM. Any access
- * other then subsequence writes will fail while the port is locked.
- * Access: RW
- */
-MLXSW_ITEM32(reg, mcia, l, 0x00, 31, 1);
-
/* reg_mcia_module
* Module number.
* Access: Index
@@ -9635,7 +9699,6 @@ MLXSW_ITEM32(reg, mcia, size, 0x08, 0, 16);
#define MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH 256
#define MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH 128
-#define MLXSW_REG_MCIA_EEPROM_SIZE 48
#define MLXSW_REG_MCIA_I2C_ADDR_LOW 0x50
#define MLXSW_REG_MCIA_I2C_ADDR_HIGH 0x51
#define MLXSW_REG_MCIA_PAGE0_LO_OFF 0xa0
@@ -9672,7 +9735,7 @@ enum mlxsw_reg_mcia_eeprom_module_info {
* Bytes to read/write.
* Access: RW
*/
-MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE);
+MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, 128);
/* This is used to access the optional upper pages (1-3) in the QSFP+
* memory map. Page 1 is available on offset 256 through 383, page 2 -
@@ -9683,14 +9746,12 @@ MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE);
MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH + 1)
static inline void mlxsw_reg_mcia_pack(char *payload, u8 slot_index, u8 module,
- u8 lock, u8 page_number,
- u16 device_addr, u8 size,
+ u8 page_number, u16 device_addr, u8 size,
u8 i2c_device_addr)
{
MLXSW_REG_ZERO(mcia, payload);
mlxsw_reg_mcia_slot_set(payload, slot_index);
mlxsw_reg_mcia_module_set(payload, module);
- mlxsw_reg_mcia_l_set(payload, lock);
mlxsw_reg_mcia_page_number_set(payload, page_number);
mlxsw_reg_mcia_device_address_set(payload, device_addr);
mlxsw_reg_mcia_size_set(payload, size);
@@ -10500,6 +10561,79 @@ static inline void mlxsw_reg_mcda_pack(char *payload, u32 update_handle,
mlxsw_reg_mcda_data_set(payload, i, *(u32 *) &data[i * 4]);
}
+/* MCAM - Management Capabilities Mask Register
+ * --------------------------------------------
+ * Reports the device supported management features.
+ */
+#define MLXSW_REG_MCAM_ID 0x907F
+#define MLXSW_REG_MCAM_LEN 0x48
+
+MLXSW_REG_DEFINE(mcam, MLXSW_REG_MCAM_ID, MLXSW_REG_MCAM_LEN);
+
+enum mlxsw_reg_mcam_feature_group {
+ /* Enhanced features. */
+ MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES,
+};
+
+/* reg_mcam_feature_group
+ * Feature list mask index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcam, feature_group, 0x00, 16, 8);
+
+enum mlxsw_reg_mcam_mng_feature_cap_mask_bits {
+ /* If set, MCIA supports 128 bytes payloads. Otherwise, 48 bytes. */
+ MLXSW_REG_MCAM_MCIA_128B = 34,
+};
+
+#define MLXSW_REG_BYTES_PER_DWORD 0x4
+
+/* reg_mcam_mng_feature_cap_mask
+ * Supported port's enhanced features.
+ * Based on feature_group index.
+ * When bit is set, the feature is supported in the device.
+ * Access: RO
+ */
+#define MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(_dw_num, _offset) \
+ MLXSW_ITEM_BIT_ARRAY(reg, mcam, mng_feature_cap_mask_dw##_dw_num, \
+ _offset, MLXSW_REG_BYTES_PER_DWORD, 1)
+
+/* The access to the bits in the field 'mng_feature_cap_mask' is not same to
+ * other mask fields in other registers. In most of the cases bit #0 is the
+ * first one in the last dword. In MCAM register, the first dword contains bits
+ * #0-#31 and so on, so the access to the bits is simpler using bit array per
+ * dword. Declare each dword of 'mng_feature_cap_mask' field separately.
+ */
+MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(0, 0x28);
+MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(1, 0x2C);
+MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(2, 0x30);
+MLXSW_REG_MCAM_MNG_FEATURE_CAP_MASK_DWORD(3, 0x34);
+
+static inline void
+mlxsw_reg_mcam_pack(char *payload, enum mlxsw_reg_mcam_feature_group feat_group)
+{
+ MLXSW_REG_ZERO(mcam, payload);
+ mlxsw_reg_mcam_feature_group_set(payload, feat_group);
+}
+
+static inline void
+mlxsw_reg_mcam_unpack(char *payload,
+ enum mlxsw_reg_mcam_mng_feature_cap_mask_bits bit,
+ bool *p_mng_feature_cap_val)
+{
+ int offset = bit % (MLXSW_REG_BYTES_PER_DWORD * BITS_PER_BYTE);
+ int dword = bit / (MLXSW_REG_BYTES_PER_DWORD * BITS_PER_BYTE);
+ u8 (*getters[])(const char *, u16) = {
+ mlxsw_reg_mcam_mng_feature_cap_mask_dw0_get,
+ mlxsw_reg_mcam_mng_feature_cap_mask_dw1_get,
+ mlxsw_reg_mcam_mng_feature_cap_mask_dw2_get,
+ mlxsw_reg_mcam_mng_feature_cap_mask_dw3_get,
+ };
+
+ if (!WARN_ON_ONCE(dword >= ARRAY_SIZE(getters)))
+ *p_mng_feature_cap_val = getters[dword](payload, offset);
+}
+
/* MPSC - Monitoring Packet Sampling Configuration Register
* --------------------------------------------------------
* MPSC Register is used to configure the Packet Sampling mechanism.
@@ -12810,6 +12944,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pacl),
MLXSW_REG(pagt),
MLXSW_REG(ptar),
+ MLXSW_REG(pprr),
MLXSW_REG(ppbs),
MLXSW_REG(prcr),
MLXSW_REG(pefa),
@@ -12892,10 +13027,11 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mcion),
MLXSW_REG(mtpps),
MLXSW_REG(mtutc),
- MLXSW_REG(mpsc),
MLXSW_REG(mcqi),
MLXSW_REG(mcc),
MLXSW_REG(mcda),
+ MLXSW_REG(mcam),
+ MLXSW_REG(mpsc),
MLXSW_REG(mgpc),
MLXSW_REG(mprs),
MLXSW_REG(mogcr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 19ae0d1c74a8..89dd2777ec4d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -39,6 +39,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_ACL_FLEX_KEYS,
MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE,
MLXSW_RES_ID_ACL_ACTIONS_PER_SET,
+ MLXSW_RES_ID_ACL_MAX_L4_PORT_RANGE,
MLXSW_RES_ID_ACL_MAX_ERPT_BANKS,
MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE,
MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID,
@@ -99,6 +100,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910,
[MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911,
[MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912,
+ [MLXSW_RES_ID_ACL_MAX_L4_PORT_RANGE] = 0x2920,
[MLXSW_RES_ID_ACL_MAX_ERPT_BANKS] = 0x2940,
[MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE] = 0x2941,
[MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID] = 0x2942,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 25a01dafde1b..9dbd5edff0b0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1132,8 +1132,8 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev,
return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
}
-static int mlxsw_sp_port_kill_vid(struct net_device *dev,
- __be16 __always_unused proto, u16 vid)
+int mlxsw_sp_port_kill_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
@@ -3188,6 +3188,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_nve_init;
}
+ err = mlxsw_sp_port_range_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize port ranges\n");
+ goto err_port_range_init;
+ }
+
err = mlxsw_sp_acl_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
@@ -3280,6 +3286,8 @@ err_ptp_clock_init:
err_router_init:
mlxsw_sp_acl_fini(mlxsw_sp);
err_acl_init:
+ mlxsw_sp_port_range_fini(mlxsw_sp);
+err_port_range_init:
mlxsw_sp_nve_fini(mlxsw_sp);
err_nve_init:
mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
@@ -3462,6 +3470,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
}
mlxsw_sp_router_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
+ mlxsw_sp_port_range_fini(mlxsw_sp);
mlxsw_sp_nve_fini(mlxsw_sp);
mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
mlxsw_sp_afa_fini(mlxsw_sp);
@@ -3730,6 +3739,26 @@ static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
&size_params);
}
+static int
+mlxsw_sp_resources_port_range_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params size_params;
+ u64 max;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, ACL_MAX_L4_PORT_RANGE))
+ return -EIO;
+
+ max = MLXSW_CORE_RES_GET(mlxsw_core, ACL_MAX_L4_PORT_RANGE);
+ devlink_resource_size_params_init(&size_params, max, max, 1,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devl_resource_register(devlink, "port_range_registers", max,
+ MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+}
+
static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
{
int err;
@@ -3758,8 +3787,13 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
goto err_resources_rifs_register;
+ err = mlxsw_sp_resources_port_range_register(mlxsw_core);
+ if (err)
+ goto err_resources_port_range_register;
+
return 0;
+err_resources_port_range_register:
err_resources_rifs_register:
err_resources_rif_mac_profile_register:
err_policer_resources_register:
@@ -3797,8 +3831,13 @@ static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
goto err_resources_rifs_register;
+ err = mlxsw_sp_resources_port_range_register(mlxsw_core);
+ if (err)
+ goto err_resources_port_range_register;
+
return 0;
+err_resources_port_range_register:
err_resources_rifs_register:
err_resources_rif_mac_profile_register:
err_policer_resources_register:
@@ -4073,23 +4112,6 @@ struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
return (struct mlxsw_sp_port *)priv.data;
}
-struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
-{
- struct mlxsw_sp_port *mlxsw_sp_port;
-
- rcu_read_lock();
- mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
- if (mlxsw_sp_port)
- dev_hold(mlxsw_sp_port->dev);
- rcu_read_unlock();
- return mlxsw_sp_port;
-}
-
-void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- dev_put(mlxsw_sp_port->dev);
-}
-
int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp)
{
char mprs_pl[MLXSW_REG_MPRS_LEN];
@@ -4298,6 +4320,88 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
return -EBUSY;
}
+static int mlxsw_sp_lag_uppers_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *upper_dev;
+ struct net_device *master;
+ struct list_head *iter;
+ int done = 0;
+ int err;
+
+ master = netdev_master_upper_dev_get(lag_dev);
+ if (master && netif_is_bridge_master(master)) {
+ err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, lag_dev, master,
+ extack);
+ if (err)
+ return err;
+ }
+
+ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
+ if (!is_vlan_dev(upper_dev))
+ continue;
+
+ master = netdev_master_upper_dev_get(upper_dev);
+ if (master && netif_is_bridge_master(master)) {
+ err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
+ upper_dev, master,
+ extack);
+ if (err)
+ goto err_port_bridge_join;
+ }
+
+ ++done;
+ }
+
+ return 0;
+
+err_port_bridge_join:
+ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
+ if (!is_vlan_dev(upper_dev))
+ continue;
+
+ master = netdev_master_upper_dev_get(upper_dev);
+ if (!master || !netif_is_bridge_master(master))
+ continue;
+
+ if (!done--)
+ break;
+
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
+ }
+
+ master = netdev_master_upper_dev_get(lag_dev);
+ if (master && netif_is_bridge_master(master))
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
+
+ return err;
+}
+
+static void
+mlxsw_sp_lag_uppers_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev)
+{
+ struct net_device *upper_dev;
+ struct net_device *master;
+ struct list_head *iter;
+
+ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
+ if (!is_vlan_dev(upper_dev))
+ continue;
+
+ master = netdev_master_upper_dev_get(upper_dev);
+ if (!master)
+ continue;
+
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
+ }
+
+ master = netdev_master_upper_dev_get(lag_dev);
+ if (master)
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
+}
+
static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev,
struct netlink_ext_ack *extack)
@@ -4322,6 +4426,12 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
if (err)
return err;
+
+ err = mlxsw_sp_lag_uppers_bridge_join(mlxsw_sp_port, lag_dev,
+ extack);
+ if (err)
+ goto err_lag_uppers_bridge_join;
+
err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
if (err)
goto err_col_port_add;
@@ -4342,8 +4452,14 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
goto err_router_join;
+ err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, lag_dev, extack);
+ if (err)
+ goto err_replay;
+
return 0;
+err_replay:
+ mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
err_router_join:
lag->ref_count--;
mlxsw_sp_port->lagged = 0;
@@ -4351,6 +4467,8 @@ err_router_join:
mlxsw_sp_port->local_port);
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
err_col_port_add:
+ mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev);
+err_lag_uppers_bridge_join:
if (!lag->ref_count)
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
return err;
@@ -4600,9 +4718,62 @@ static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
return true;
}
+static bool mlxsw_sp_netdev_is_master(struct net_device *upper_dev,
+ struct net_device *dev)
+{
+ return upper_dev == netdev_master_upper_dev_get(dev);
+}
+
+static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
+ unsigned long event, void *ptr,
+ bool process_foreign);
+
+static int mlxsw_sp_netdevice_validate_uppers(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *upper_dev;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) {
+ struct netdev_notifier_changeupper_info info = {
+ .info = {
+ .dev = dev,
+ .extack = extack,
+ },
+ .master = mlxsw_sp_netdev_is_master(upper_dev, dev),
+ .upper_dev = upper_dev,
+ .linking = true,
+
+ /* upper_info is relevant for LAG devices. But we would
+ * only need this if LAG were a valid upper above
+ * another upper (e.g. a bridge that is a member of a
+ * LAG), and that is never a valid configuration. So we
+ * can keep this as NULL.
+ */
+ .upper_info = NULL,
+ };
+
+ err = __mlxsw_sp_netdevice_event(mlxsw_sp,
+ NETDEV_PRECHANGEUPPER,
+ &info, true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, upper_dev,
+ extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
struct net_device *dev,
- unsigned long event, void *ptr)
+ unsigned long event, void *ptr,
+ bool replay_deslavement)
{
struct netdev_notifier_changeupper_info *info;
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -4640,8 +4811,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
(!netif_is_bridge_master(upper_dev) ||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
upper_dev))) {
- NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
- return -EINVAL;
+ err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
+ upper_dev,
+ extack);
+ if (err)
+ return err;
}
if (netif_is_lag_master(upper_dev) &&
!mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
@@ -4656,11 +4830,6 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
return -EINVAL;
}
- if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
- return -EOPNOTSUPP;
- }
if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
return -EINVAL;
@@ -4707,15 +4876,20 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
if (netif_is_bridge_master(upper_dev)) {
- if (info->linking)
+ if (info->linking) {
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
lower_dev,
upper_dev,
extack);
- else
+ } else {
mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
lower_dev,
upper_dev);
+ if (!replay_deslavement)
+ break;
+ mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
+ lower_dev);
+ }
} else if (netif_is_lag_master(upper_dev)) {
if (info->linking) {
err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
@@ -4724,6 +4898,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
mlxsw_sp_port_lag_leave(mlxsw_sp_port,
upper_dev);
+ mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
+ dev);
}
} else if (netif_is_ovs_master(upper_dev)) {
if (info->linking)
@@ -4776,13 +4952,15 @@ static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
struct net_device *port_dev,
- unsigned long event, void *ptr)
+ unsigned long event, void *ptr,
+ bool replay_deslavement)
{
switch (event) {
case NETDEV_PRECHANGEUPPER:
case NETDEV_CHANGEUPPER:
return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
- event, ptr);
+ event, ptr,
+ replay_deslavement);
case NETDEV_CHANGELOWERSTATE:
return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
ptr);
@@ -4791,6 +4969,30 @@ static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
return 0;
}
+/* Called for LAG or its upper VLAN after the per-LAG-lower processing was done,
+ * to do any per-LAG / per-LAG-upper processing.
+ */
+static int mlxsw_sp_netdevice_post_lag_event(struct net_device *dev,
+ unsigned long event,
+ void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+
+ if (!mlxsw_sp)
+ return 0;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ if (info->linking)
+ break;
+ if (netif_is_bridge_master(info->upper_dev))
+ mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, dev);
+ break;
+ }
+ return 0;
+}
+
static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
unsigned long event, void *ptr)
{
@@ -4801,19 +5003,19 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
netdev_for_each_lower_dev(lag_dev, dev, iter) {
if (mlxsw_sp_port_dev_check(dev)) {
ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
- ptr);
+ ptr, false);
if (ret)
return ret;
}
}
- return 0;
+ return mlxsw_sp_netdevice_post_lag_event(lag_dev, event, ptr);
}
static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
struct net_device *dev,
unsigned long event, void *ptr,
- u16 vid)
+ u16 vid, bool replay_deslavement)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -4844,27 +5046,30 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
(!netif_is_bridge_master(upper_dev) ||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
upper_dev))) {
- NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
- return -EINVAL;
- }
- if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
- return -EOPNOTSUPP;
+ err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
+ upper_dev,
+ extack);
+ if (err)
+ return err;
}
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
if (netif_is_bridge_master(upper_dev)) {
- if (info->linking)
+ if (info->linking) {
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
vlan_dev,
upper_dev,
extack);
- else
+ } else {
mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
vlan_dev,
upper_dev);
+ if (!replay_deslavement)
+ break;
+ mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
+ vlan_dev);
+ }
} else if (netif_is_macvlan(upper_dev)) {
if (!info->linking)
mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
@@ -4888,26 +5093,26 @@ static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
if (mlxsw_sp_port_dev_check(dev)) {
ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
event, ptr,
- vid);
+ vid, false);
if (ret)
return ret;
}
}
- return 0;
+ return mlxsw_sp_netdevice_post_lag_event(vlan_dev, event, ptr);
}
-static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
+static int mlxsw_sp_netdevice_bridge_vlan_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vlan_dev,
struct net_device *br_dev,
unsigned long event, void *ptr,
- u16 vid)
+ u16 vid, bool process_foreign)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
struct netdev_notifier_changeupper_info *info = ptr;
struct netlink_ext_ack *extack;
struct net_device *upper_dev;
- if (!mlxsw_sp)
+ if (!process_foreign && !mlxsw_sp_lower_get(vlan_dev))
return 0;
extack = netdev_notifier_info_to_extack(&info->info);
@@ -4920,13 +5125,6 @@ static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EOPNOTSUPP;
}
- if (!info->linking)
- break;
- if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
- return -EOPNOTSUPP;
- }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -4940,36 +5138,42 @@ static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
return 0;
}
-static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
- unsigned long event, void *ptr)
+static int mlxsw_sp_netdevice_vlan_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vlan_dev,
+ unsigned long event, void *ptr,
+ bool process_foreign)
{
struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
u16 vid = vlan_dev_vlan_id(vlan_dev);
if (mlxsw_sp_port_dev_check(real_dev))
return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
- event, ptr, vid);
+ event, ptr, vid,
+ true);
else if (netif_is_lag_master(real_dev))
return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
real_dev, event,
ptr, vid);
else if (netif_is_bridge_master(real_dev))
- return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
- event, ptr, vid);
+ return mlxsw_sp_netdevice_bridge_vlan_event(mlxsw_sp, vlan_dev,
+ real_dev, event,
+ ptr, vid,
+ process_foreign);
return 0;
}
-static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
- unsigned long event, void *ptr)
+static int mlxsw_sp_netdevice_bridge_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev,
+ unsigned long event, void *ptr,
+ bool process_foreign)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
struct netdev_notifier_changeupper_info *info = ptr;
struct netlink_ext_ack *extack;
struct net_device *upper_dev;
u16 proto;
- if (!mlxsw_sp)
+ if (!process_foreign && !mlxsw_sp_lower_get(br_dev))
return 0;
extack = netdev_notifier_info_to_extack(&info->info);
@@ -4997,11 +5201,6 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
return -EOPNOTSUPP;
}
- if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
- return -EOPNOTSUPP;
- }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -5107,35 +5306,48 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
+ unsigned long event, void *ptr,
+ bool process_foreign)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct mlxsw_sp_span_entry *span_entry;
- struct mlxsw_sp *mlxsw_sp;
int err = 0;
- mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
if (event == NETDEV_UNREGISTER) {
span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
if (span_entry)
mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
}
- mlxsw_sp_span_respin(mlxsw_sp);
if (netif_is_vxlan(dev))
err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
else if (mlxsw_sp_port_dev_check(dev))
- err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
+ err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr, true);
else if (netif_is_lag_master(dev))
err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
else if (is_vlan_dev(dev))
- err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
+ err = mlxsw_sp_netdevice_vlan_event(mlxsw_sp, dev, event, ptr,
+ process_foreign);
else if (netif_is_bridge_master(dev))
- err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
+ err = mlxsw_sp_netdevice_bridge_event(mlxsw_sp, dev, event, ptr,
+ process_foreign);
else if (netif_is_macvlan(dev))
err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
+ return err;
+}
+
+static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp;
+ int err;
+
+ mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
+ mlxsw_sp_span_respin(mlxsw_sp);
+ err = __mlxsw_sp_netdevice_event(mlxsw_sp, event, ptr, false);
+
return notifier_from_errno(err);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 231e364cbb7c..02ca2871b6f9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -69,6 +69,7 @@ enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
MLXSW_SP_RESOURCE_RIFS,
+ MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS,
};
struct mlxsw_sp_port;
@@ -175,6 +176,7 @@ struct mlxsw_sp {
struct mlxsw_sp_acl *acl;
struct mlxsw_sp_fid_core *fid_core;
struct mlxsw_sp_policer_core *policer_core;
+ struct mlxsw_sp_port_range_core *pr_core;
struct mlxsw_sp_kvdl *kvdl;
struct mlxsw_sp_nve *nve;
struct notifier_block netdevice_nb;
@@ -698,6 +700,8 @@ int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
+int mlxsw_sp_port_kill_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid);
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
u16 vid_end, bool is_member, bool untagged);
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
@@ -716,8 +720,6 @@ int mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
-struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
-void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev);
int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp);
@@ -865,9 +867,13 @@ struct mlxsw_sp_acl_rule_info {
egress_bind_blocker:1,
counter_valid:1,
policer_index_valid:1,
- ipv6_valid:1;
+ ipv6_valid:1,
+ src_port_range_reg_valid:1,
+ dst_port_range_reg_valid:1;
unsigned int counter_index;
u16 policer_index;
+ u8 src_port_range_reg_index;
+ u8 dst_port_range_reg_index;
struct {
u32 prev_val;
enum mlxsw_sp_acl_mangle_field prev_field;
@@ -992,7 +998,8 @@ void mlxsw_sp_acl_ruleset_prio_get(struct mlxsw_sp_acl_ruleset *ruleset,
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
struct mlxsw_afa_block *afa_block);
-void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei);
+void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei);
void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
unsigned int priority);
@@ -1043,6 +1050,9 @@ int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u16 fid, struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_ignore(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool disable_learning, bool disable_security);
int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_flow_block *block,
@@ -1261,7 +1271,6 @@ int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f);
/* spectrum_fid.c */
-bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
u16 fid_index);
int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex);
@@ -1394,10 +1403,6 @@ void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp);
-/* spectrum_nve_vxlan.c */
-int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp);
-void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp);
-
/* spectrum_trap.c */
int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_devlink_traps_fini(struct mlxsw_sp *mlxsw_sp);
@@ -1484,4 +1489,18 @@ int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp);
+/* spectrum_port_range.c */
+struct mlxsw_sp_port_range {
+ u16 min;
+ u16 max;
+ u8 source:1; /* Source or destination */
+};
+
+int mlxsw_sp_port_range_reg_get(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_port_range *range,
+ struct netlink_ext_ack *extack,
+ u8 *p_prr_index);
+void mlxsw_sp_port_range_reg_put(struct mlxsw_sp *mlxsw_sp, u8 prr_index);
+int mlxsw_sp_port_range_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_port_range_fini(struct mlxsw_sp *mlxsw_sp);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
index 3a636f753607..dfcdd37e797b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
@@ -90,7 +90,7 @@ mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
err_entry_add:
err_rulei_commit:
err_rulei_act_continue:
- mlxsw_sp_acl_rulei_destroy(rulei);
+ mlxsw_sp_acl_rulei_destroy(mlxsw_sp, rulei);
err_rulei_create:
mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
return err;
@@ -105,7 +105,7 @@ mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ctcam_entry_del(mlxsw_sp, &region->cregion,
&region->catchall.cchunk,
&region->catchall.centry);
- mlxsw_sp_acl_rulei_destroy(rulei);
+ mlxsw_sp_acl_rulei_destroy(mlxsw_sp, rulei);
mlxsw_sp_acl_ctcam_chunk_fini(&region->catchall.cchunk);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 0423ac262d89..7c59c8a13584 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -339,10 +339,17 @@ err_afa_block_create:
return ERR_PTR(err);
}
-void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
+void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei)
{
if (rulei->action_created)
mlxsw_afa_block_destroy(rulei->act_block);
+ if (rulei->src_port_range_reg_valid)
+ mlxsw_sp_port_range_reg_put(mlxsw_sp,
+ rulei->src_port_range_reg_index);
+ if (rulei->dst_port_range_reg_valid)
+ mlxsw_sp_port_range_reg_put(mlxsw_sp,
+ rulei->dst_port_range_reg_index);
kfree(rulei);
}
@@ -768,6 +775,15 @@ int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
}
+int mlxsw_sp_acl_rulei_act_ignore(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool disable_learning, bool disable_security)
+{
+ return mlxsw_afa_block_append_ignore(rulei->act_block,
+ disable_learning,
+ disable_security);
+}
+
int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_flow_block *block,
@@ -834,7 +850,7 @@ void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
- mlxsw_sp_acl_rulei_destroy(rule->rulei);
+ mlxsw_sp_acl_rulei_destroy(mlxsw_sp, rule->rulei);
kfree(rule);
mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index ae2d6f12b799..cb746a43b24b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -31,12 +31,14 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_0_31, 0x00, 4),
+ MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 16, 16),
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
};
@@ -205,6 +207,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_0[] = {
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l4_2[] = {
MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x04, 16, 9), /* TCP_CONTROL + TCP_ECN */
+ MLXSW_AFK_ELEMENT_INST_U32(L4_PORT_RANGE, 0x04, 0, 16),
};
static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index b6ee2d658b0c..9df098474743 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -137,16 +137,6 @@ static const int *mlxsw_sp_packet_type_sfgc_types[] = {
[MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types,
};
-bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index)
-{
- enum mlxsw_sp_fid_type fid_type = MLXSW_SP_FID_TYPE_DUMMY;
- struct mlxsw_sp_fid_family *fid_family;
-
- fid_family = mlxsw_sp->fid_core->fid_family_arr[fid_type];
-
- return fid_family->start_index == fid_index;
-}
-
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
u16 fid_index)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 72917f09e806..9fd1ca079258 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -160,6 +160,16 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
*/
rulei->egress_bind_blocker = 1;
+ /* Ignore learning and security lookup as redirection
+ * using ingress filters happens before the bridge.
+ */
+ err = mlxsw_sp_acl_rulei_act_ignore(mlxsw_sp, rulei,
+ true, true);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append ignore action");
+ return err;
+ }
+
fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
fid_index = mlxsw_sp_fid_index(fid);
err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
@@ -418,6 +428,68 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+static int
+mlxsw_sp_flower_parse_ports_range(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct flow_cls_offload *f, u8 ip_proto)
+{
+ const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_match_ports_range match;
+ u32 key_mask_value = 0;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS_RANGE))
+ return 0;
+
+ if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
+ return -EINVAL;
+ }
+
+ flow_rule_match_ports_range(rule, &match);
+
+ if (match.mask->tp_min.src) {
+ struct mlxsw_sp_port_range range = {
+ .min = ntohs(match.key->tp_min.src),
+ .max = ntohs(match.key->tp_max.src),
+ .source = true,
+ };
+ u8 prr_index;
+ int err;
+
+ err = mlxsw_sp_port_range_reg_get(mlxsw_sp, &range,
+ f->common.extack, &prr_index);
+ if (err)
+ return err;
+
+ rulei->src_port_range_reg_index = prr_index;
+ rulei->src_port_range_reg_valid = true;
+ key_mask_value |= BIT(prr_index);
+ }
+
+ if (match.mask->tp_min.dst) {
+ struct mlxsw_sp_port_range range = {
+ .min = ntohs(match.key->tp_min.dst),
+ .max = ntohs(match.key->tp_max.dst),
+ };
+ u8 prr_index;
+ int err;
+
+ err = mlxsw_sp_port_range_reg_get(mlxsw_sp, &range,
+ f->common.extack, &prr_index);
+ if (err)
+ return err;
+
+ rulei->dst_port_range_reg_index = prr_index;
+ rulei->dst_port_range_reg_valid = true;
+ key_mask_value |= BIT(prr_index);
+ }
+
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_L4_PORT_RANGE,
+ key_mask_value, key_mask_value);
+
+ return 0;
+}
+
static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct flow_cls_offload *f,
@@ -496,16 +568,17 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
int err;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_META) |
- BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_TCP) |
- BIT(FLOW_DISSECTOR_KEY_IP) |
- BIT(FLOW_DISSECTOR_KEY_VLAN))) {
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_META) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS_RANGE) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_TCP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) {
dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
return -EOPNOTSUPP;
@@ -604,6 +677,11 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
if (err)
return err;
+
+ err = mlxsw_sp_flower_parse_ports_range(mlxsw_sp, rulei, f, ip_proto);
+ if (err)
+ return err;
+
err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index d2b57a045aa4..5479a1c19d2e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -989,6 +989,9 @@ void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
int nve_ifindex;
__be32 vni;
+ /* Necessary for __dev_get_by_index() below. */
+ ASSERT_RTNL();
+
mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid);
mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index);
mlxsw_sp_nve_ipv6_addr_flush_by_fid(mlxsw_sp, fid_index);
@@ -997,15 +1000,13 @@ void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_vni(fid, &vni)))
goto out;
- nve_dev = dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
+ nve_dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
if (!nve_dev)
goto out;
mlxsw_sp_nve_fdb_clear_offload(mlxsw_sp, fid, nve_dev, vni);
mlxsw_sp_fid_fdb_clear_offload(fid, nve_dev);
- dev_put(nve_dev);
-
out:
mlxsw_sp_fid_vni_clear(fid);
mlxsw_sp_nve_tunnel_fini(mlxsw_sp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
new file mode 100644
index 000000000000..2d193de12be6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_port_range.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/bits.h>
+#include <linux/netlink.h>
+#include <linux/refcount.h>
+#include <linux/xarray.h>
+#include <net/devlink.h>
+
+#include "spectrum.h"
+
+struct mlxsw_sp_port_range_reg {
+ struct mlxsw_sp_port_range range;
+ refcount_t refcount;
+ u32 index;
+};
+
+struct mlxsw_sp_port_range_core {
+ struct xarray prr_xa;
+ struct xa_limit prr_ids;
+ atomic_t prr_count;
+};
+
+static int
+mlxsw_sp_port_range_reg_configure(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_port_range_reg *prr)
+{
+ char pprr_pl[MLXSW_REG_PPRR_LEN];
+
+ /* We do not care if packet is IPv4/IPv6 and TCP/UDP, so set all four
+ * fields.
+ */
+ mlxsw_reg_pprr_pack(pprr_pl, prr->index);
+ mlxsw_reg_pprr_ipv4_set(pprr_pl, true);
+ mlxsw_reg_pprr_ipv6_set(pprr_pl, true);
+ mlxsw_reg_pprr_src_set(pprr_pl, prr->range.source);
+ mlxsw_reg_pprr_dst_set(pprr_pl, !prr->range.source);
+ mlxsw_reg_pprr_tcp_set(pprr_pl, true);
+ mlxsw_reg_pprr_udp_set(pprr_pl, true);
+ mlxsw_reg_pprr_port_range_min_set(pprr_pl, prr->range.min);
+ mlxsw_reg_pprr_port_range_max_set(pprr_pl, prr->range.max);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pprr), pprr_pl);
+}
+
+static struct mlxsw_sp_port_range_reg *
+mlxsw_sp_port_range_reg_create(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_port_range *range,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core;
+ struct mlxsw_sp_port_range_reg *prr;
+ int err;
+
+ prr = kzalloc(sizeof(*prr), GFP_KERNEL);
+ if (!prr)
+ return ERR_PTR(-ENOMEM);
+
+ prr->range = *range;
+ refcount_set(&prr->refcount, 1);
+
+ err = xa_alloc(&pr_core->prr_xa, &prr->index, prr, pr_core->prr_ids,
+ GFP_KERNEL);
+ if (err) {
+ if (err == -EBUSY)
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of port range registers");
+ goto err_xa_alloc;
+ }
+
+ err = mlxsw_sp_port_range_reg_configure(mlxsw_sp, prr);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to configure port range register");
+ goto err_reg_configure;
+ }
+
+ atomic_inc(&pr_core->prr_count);
+
+ return prr;
+
+err_reg_configure:
+ xa_erase(&pr_core->prr_xa, prr->index);
+err_xa_alloc:
+ kfree(prr);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_port_range_reg_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_port_range_reg *prr)
+{
+ struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core;
+
+ atomic_dec(&pr_core->prr_count);
+ xa_erase(&pr_core->prr_xa, prr->index);
+ kfree(prr);
+}
+
+static struct mlxsw_sp_port_range_reg *
+mlxsw_sp_port_range_reg_find(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_port_range *range)
+{
+ struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core;
+ struct mlxsw_sp_port_range_reg *prr;
+ unsigned long index;
+
+ xa_for_each(&pr_core->prr_xa, index, prr) {
+ if (prr->range.min == range->min &&
+ prr->range.max == range->max &&
+ prr->range.source == range->source)
+ return prr;
+ }
+
+ return NULL;
+}
+
+int mlxsw_sp_port_range_reg_get(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_port_range *range,
+ struct netlink_ext_ack *extack,
+ u8 *p_prr_index)
+{
+ struct mlxsw_sp_port_range_reg *prr;
+
+ prr = mlxsw_sp_port_range_reg_find(mlxsw_sp, range);
+ if (prr) {
+ refcount_inc(&prr->refcount);
+ *p_prr_index = prr->index;
+ return 0;
+ }
+
+ prr = mlxsw_sp_port_range_reg_create(mlxsw_sp, range, extack);
+ if (IS_ERR(prr))
+ return PTR_ERR(prr);
+
+ *p_prr_index = prr->index;
+
+ return 0;
+}
+
+void mlxsw_sp_port_range_reg_put(struct mlxsw_sp *mlxsw_sp, u8 prr_index)
+{
+ struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core;
+ struct mlxsw_sp_port_range_reg *prr;
+
+ prr = xa_load(&pr_core->prr_xa, prr_index);
+ if (WARN_ON(!prr))
+ return;
+
+ if (!refcount_dec_and_test(&prr->refcount))
+ return;
+
+ mlxsw_sp_port_range_reg_destroy(mlxsw_sp, prr);
+}
+
+static u64 mlxsw_sp_port_range_reg_occ_get(void *priv)
+{
+ struct mlxsw_sp_port_range_core *pr_core = priv;
+
+ return atomic_read(&pr_core->prr_count);
+}
+
+int mlxsw_sp_port_range_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_port_range_core *pr_core;
+ struct mlxsw_core *core = mlxsw_sp->core;
+ u64 max;
+
+ if (!MLXSW_CORE_RES_VALID(core, ACL_MAX_L4_PORT_RANGE))
+ return -EIO;
+ max = MLXSW_CORE_RES_GET(core, ACL_MAX_L4_PORT_RANGE);
+
+ /* Each port range register is represented using a single bit in the
+ * two bytes "l4_port_range" ACL key element.
+ */
+ WARN_ON(max > BITS_PER_BYTE * sizeof(u16));
+
+ pr_core = kzalloc(sizeof(*mlxsw_sp->pr_core), GFP_KERNEL);
+ if (!pr_core)
+ return -ENOMEM;
+ mlxsw_sp->pr_core = pr_core;
+
+ pr_core->prr_ids.max = max - 1;
+ xa_init_flags(&pr_core->prr_xa, XA_FLAGS_ALLOC);
+
+ devl_resource_occ_get_register(priv_to_devlink(core),
+ MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS,
+ mlxsw_sp_port_range_reg_occ_get,
+ pr_core);
+
+ return 0;
+}
+
+void mlxsw_sp_port_range_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_port_range_core *pr_core = mlxsw_sp->pr_core;
+
+ devl_resource_occ_get_unregister(priv_to_devlink(mlxsw_sp->core),
+ MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS);
+ WARN_ON(!xa_empty(&pr_core->prr_xa));
+ xa_destroy(&pr_core->prr_xa);
+ kfree(pr_core);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index b32adf277a22..debd2c466f11 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -71,6 +71,7 @@ static const struct rhashtable_params mlxsw_sp_crif_ht_params = {
struct mlxsw_sp_rif {
struct mlxsw_sp_crif *crif; /* NULL for underlay RIF */
+ netdevice_tracker dev_tracker;
struct list_head neigh_list;
struct mlxsw_sp_fid *fid;
unsigned char addr[ETH_ALEN];
@@ -139,6 +140,7 @@ struct mlxsw_sp_rif_ops {
struct netlink_ext_ack *extack);
void (*deconfigure)(struct mlxsw_sp_rif *rif);
struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
+ const struct mlxsw_sp_rif_params *params,
struct netlink_ext_ack *extack);
void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
};
@@ -2871,6 +2873,21 @@ static bool mlxsw_sp_dev_lower_is_port(struct net_device *dev)
return !!mlxsw_sp_port;
}
+static int mlxsw_sp_router_schedule_neigh_work(struct mlxsw_sp_router *router,
+ struct neighbour *n)
+{
+ struct net *net;
+
+ net = neigh_parms_net(n->parms);
+
+ /* Take a reference to ensure the neighbour won't be destructed until we
+ * drop the reference in delayed work.
+ */
+ neigh_clone(n);
+ return mlxsw_sp_router_schedule_work(net, router, n,
+ mlxsw_sp_router_neigh_event_work);
+}
+
static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -2878,7 +2895,6 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
unsigned long interval;
struct neigh_parms *p;
struct neighbour *n;
- struct net *net;
router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
@@ -2902,7 +2918,6 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
break;
case NETEVENT_NEIGH_UPDATE:
n = ptr;
- net = neigh_parms_net(n->parms);
if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
return NOTIFY_DONE;
@@ -2910,13 +2925,7 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
if (!mlxsw_sp_dev_lower_is_port(n->dev))
return NOTIFY_DONE;
- /* Take a reference to ensure the neighbour won't be
- * destructed until we drop the reference in delayed
- * work.
- */
- neigh_clone(n);
- return mlxsw_sp_router_schedule_work(net, router, n,
- mlxsw_sp_router_neigh_event_work);
+ return mlxsw_sp_router_schedule_neigh_work(router, n);
case NETEVENT_IPV4_MPATH_HASH_UPDATE:
case NETEVENT_IPV6_MPATH_HASH_UPDATE:
@@ -2975,6 +2984,52 @@ static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
}
}
+struct mlxsw_sp_neigh_rif_made_sync {
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+ int err;
+};
+
+static void mlxsw_sp_neigh_rif_made_sync_each(struct neighbour *n, void *data)
+{
+ struct mlxsw_sp_neigh_rif_made_sync *rms = data;
+ int rc;
+
+ if (rms->err)
+ return;
+ if (n->dev != mlxsw_sp_rif_dev(rms->rif))
+ return;
+ rc = mlxsw_sp_router_schedule_neigh_work(rms->mlxsw_sp->router, n);
+ if (rc != NOTIFY_DONE)
+ rms->err = -ENOMEM;
+}
+
+static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_neigh_rif_made_sync rms = {
+ .mlxsw_sp = mlxsw_sp,
+ .rif = rif,
+ };
+
+ neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
+ if (rms.err)
+ goto err_arp;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ neigh_for_each(&nd_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
+#endif
+ if (rms.err)
+ goto err_nd;
+
+ return 0;
+
+err_nd:
+err_arp:
+ mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
+ return rms.err;
+}
+
enum mlxsw_sp_nexthop_type {
MLXSW_SP_NEXTHOP_TYPE_ETH,
MLXSW_SP_NEXTHOP_TYPE_IPIP,
@@ -4396,6 +4451,19 @@ err_neigh_init:
return err;
}
+static int mlxsw_sp_nexthop_type_rif_made(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ switch (nh->type) {
+ case MLXSW_SP_NEXTHOP_TYPE_ETH:
+ return mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
+ case MLXSW_SP_NEXTHOP_TYPE_IPIP:
+ break;
+ }
+
+ return 0;
+}
+
static void mlxsw_sp_nexthop_type_rif_gone(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
@@ -4524,6 +4592,35 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
}
}
+static int mlxsw_sp_nexthop_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_nexthop *nh, *tmp;
+ unsigned int n = 0;
+ int err;
+
+ list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
+ crif_list_node) {
+ err = mlxsw_sp_nexthop_type_rif_made(mlxsw_sp, nh);
+ if (err)
+ goto err_nexthop_type_rif;
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
+ n++;
+ }
+
+ return 0;
+
+err_nexthop_type_rif:
+ list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
+ crif_list_node) {
+ if (!n--)
+ break;
+ mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
+ }
+ return err;
+}
+
static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
@@ -7451,6 +7548,7 @@ struct mlxsw_sp_fib6_event_work {
struct mlxsw_sp_fib_event_work {
struct work_struct work;
+ netdevice_tracker dev_tracker;
union {
struct mlxsw_sp_fib6_event_work fib6_work;
struct fib_entry_notifier_info fen_info;
@@ -7624,12 +7722,12 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
&fib_work->ven_info);
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
- dev_put(fib_work->ven_info.dev);
+ netdev_put(fib_work->ven_info.dev, &fib_work->dev_tracker);
break;
case FIB_EVENT_VIF_DEL:
mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
&fib_work->ven_info);
- dev_put(fib_work->ven_info.dev);
+ netdev_put(fib_work->ven_info.dev, &fib_work->dev_tracker);
break;
}
mutex_unlock(&mlxsw_sp->router->lock);
@@ -7700,7 +7798,8 @@ mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
case FIB_EVENT_VIF_ADD:
case FIB_EVENT_VIF_DEL:
memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
- dev_hold(fib_work->ven_info.dev);
+ netdev_hold(fib_work->ven_info.dev, &fib_work->dev_tracker,
+ GFP_ATOMIC);
break;
}
}
@@ -7884,6 +7983,26 @@ static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
+static int mlxsw_sp_router_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
+{
+ int err;
+
+ err = mlxsw_sp_neigh_rif_made_sync(mlxsw_sp, rif);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_nexthop_rif_made_sync(mlxsw_sp, rif);
+ if (err)
+ goto err_nexthop;
+
+ return 0;
+
+err_nexthop:
+ mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
+ return err;
+}
+
static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
@@ -8190,6 +8309,7 @@ mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
struct mlxsw_sp_router_hwstats_notify_work {
struct work_struct work;
struct net_device *dev;
+ netdevice_tracker dev_tracker;
};
static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
@@ -8201,7 +8321,7 @@ static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
rtnl_lock();
rtnl_offload_xstats_notify(hws_work->dev);
rtnl_unlock();
- dev_put(hws_work->dev);
+ netdev_put(hws_work->dev, &hws_work->dev_tracker);
kfree(hws_work);
}
@@ -8221,7 +8341,7 @@ mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
return;
INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
- dev_hold(dev);
+ netdev_hold(dev, &hws_work->dev_tracker, GFP_KERNEL);
hws_work->dev = dev;
mlxsw_core_schedule_work(&hws_work->work);
}
@@ -8293,14 +8413,14 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
err = -ENOMEM;
goto err_rif_alloc;
}
- dev_hold(params->dev);
+ netdev_hold(params->dev, &rif->dev_tracker, GFP_KERNEL);
mlxsw_sp->router->rifs[rif_index] = rif;
rif->mlxsw_sp = mlxsw_sp;
rif->ops = ops;
rif->rif_entries = rif_entries;
if (ops->fid_get) {
- fid = ops->fid_get(rif, extack);
+ fid = ops->fid_get(rif, params, extack);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
goto err_fid_get;
@@ -8321,6 +8441,10 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
goto err_mr_rif_add;
}
+ err = mlxsw_sp_router_rif_made_sync(mlxsw_sp, rif);
+ if (err)
+ goto err_rif_made_sync;
+
if (netdev_offload_xstats_enabled(params->dev,
NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
err = mlxsw_sp_router_port_l3_stats_enable(rif);
@@ -8335,6 +8459,8 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
return rif;
err_stats_enable:
+ mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
+err_rif_made_sync:
err_mr_rif_add:
for (i--; i >= 0; i--)
mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
@@ -8344,7 +8470,7 @@ err_configure:
mlxsw_sp_fid_put(fid);
err_fid_get:
mlxsw_sp->router->rifs[rif_index] = NULL;
- dev_put(params->dev);
+ netdev_put(params->dev, &rif->dev_tracker);
mlxsw_sp_rif_free(rif);
err_rif_alloc:
err_crif_lookup:
@@ -8386,7 +8512,7 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
/* Loopback RIFs are not associated with a FID. */
mlxsw_sp_fid_put(fid);
mlxsw_sp->router->rifs[rif->rif_index] = NULL;
- dev_put(dev);
+ netdev_put(dev, &rif->dev_tracker);
mlxsw_sp_rif_free(rif);
mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
vr->rif_count--;
@@ -8410,6 +8536,110 @@ out:
mutex_unlock(&mlxsw_sp->router->lock);
}
+static void mlxsw_sp_rif_destroy_vlan_upper(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev,
+ u16 vid)
+{
+ struct net_device *upper_dev;
+ struct mlxsw_sp_crif *crif;
+
+ rcu_read_lock();
+ upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q), vid);
+ rcu_read_unlock();
+
+ if (!upper_dev)
+ return;
+
+ crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, upper_dev);
+ if (!crif || !crif->rif)
+ return;
+
+ mlxsw_sp_rif_destroy(crif->rif);
+}
+
+static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
+ int lower_pvid,
+ unsigned long event,
+ struct netlink_ext_ack *extack);
+
+int mlxsw_sp_router_bridge_vlan_add(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev,
+ u16 new_vid, bool is_pvid,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif *old_rif;
+ struct mlxsw_sp_rif *new_rif;
+ struct net_device *upper_dev;
+ u16 old_pvid = 0;
+ u16 new_pvid;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ old_rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
+ if (old_rif) {
+ /* If the RIF on the bridge is not a VLAN RIF, we shouldn't have
+ * gotten a PVID notification.
+ */
+ if (WARN_ON(old_rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN))
+ old_rif = NULL;
+ else
+ old_pvid = mlxsw_sp_fid_8021q_vid(old_rif->fid);
+ }
+
+ if (is_pvid)
+ new_pvid = new_vid;
+ else if (old_pvid == new_vid)
+ new_pvid = 0;
+ else
+ goto out;
+
+ if (old_pvid == new_pvid)
+ goto out;
+
+ if (new_pvid) {
+ struct mlxsw_sp_rif_params params = {
+ .dev = br_dev,
+ .vid = new_pvid,
+ };
+
+ /* If there is a VLAN upper with the same VID as the new PVID,
+ * kill its RIF, if there is one.
+ */
+ mlxsw_sp_rif_destroy_vlan_upper(mlxsw_sp, br_dev, new_pvid);
+
+ if (mlxsw_sp_dev_addr_list_empty(br_dev))
+ goto out;
+ new_rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
+ if (IS_ERR(new_rif)) {
+ err = PTR_ERR(new_rif);
+ goto out;
+ }
+
+ if (old_pvid)
+ mlxsw_sp_rif_migrate_destroy(mlxsw_sp, old_rif, new_rif,
+ true);
+ } else {
+ mlxsw_sp_rif_destroy(old_rif);
+ }
+
+ if (old_pvid) {
+ rcu_read_lock();
+ upper_dev = __vlan_find_dev_deep_rcu(br_dev, htons(ETH_P_8021Q),
+ old_pvid);
+ rcu_read_unlock();
+ if (upper_dev)
+ err = mlxsw_sp_inetaddr_bridge_event(mlxsw_sp,
+ upper_dev,
+ new_pvid,
+ NETDEV_UP, extack);
+ }
+
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
+}
+
static void
mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
@@ -8664,21 +8894,24 @@ __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_rif_params params = {
- .dev = l3_dev,
- };
+ struct mlxsw_sp_rif_params params;
u16 vid = mlxsw_sp_port_vlan->vid;
struct mlxsw_sp_rif *rif;
struct mlxsw_sp_fid *fid;
int err;
+ params = (struct mlxsw_sp_rif_params) {
+ .dev = l3_dev,
+ .vid = vid,
+ };
+
mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
if (IS_ERR(rif))
return PTR_ERR(rif);
/* FID was already created, just take a reference */
- fid = rif->ops->fid_get(rif, extack);
+ fid = rif->ops->fid_get(rif, &params, extack);
err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
if (err)
goto err_fid_port_vid_map;
@@ -8776,10 +9009,11 @@ static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
}
static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
- unsigned long event,
+ unsigned long event, bool nomaster,
struct netlink_ext_ack *extack)
{
- if (netif_is_any_bridge_port(port_dev) || netif_is_lag_port(port_dev))
+ if (!nomaster && (netif_is_any_bridge_port(port_dev) ||
+ netif_is_lag_port(port_dev)))
return 0;
return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
@@ -8810,10 +9044,10 @@ static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
}
static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
- unsigned long event,
+ unsigned long event, bool nomaster,
struct netlink_ext_ack *extack)
{
- if (netif_is_bridge_port(lag_dev))
+ if (!nomaster && netif_is_bridge_port(lag_dev))
return 0;
return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
@@ -8822,6 +9056,7 @@ static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *l3_dev,
+ int lower_pvid,
unsigned long event,
struct netlink_ext_ack *extack)
{
@@ -8829,6 +9064,7 @@ static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
.dev = l3_dev,
};
struct mlxsw_sp_rif *rif;
+ int err;
switch (event) {
case NETDEV_UP:
@@ -8840,7 +9076,21 @@ static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
return -EOPNOTSUPP;
}
+ err = br_vlan_get_pvid(l3_dev, &params.vid);
+ if (err)
+ return err;
+ if (!params.vid)
+ return 0;
+ } else if (is_vlan_dev(l3_dev)) {
+ params.vid = vlan_dev_vlan_id(l3_dev);
+
+ /* If the VID matches PVID of the bridge below, the
+ * bridge owns the RIF for this VLAN. Don't do anything.
+ */
+ if ((int)params.vid == lower_pvid)
+ return 0;
}
+
rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
if (IS_ERR(rif))
return PTR_ERR(rif);
@@ -8856,24 +9106,32 @@ static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *vlan_dev,
- unsigned long event,
+ unsigned long event, bool nomaster,
struct netlink_ext_ack *extack)
{
struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
u16 vid = vlan_dev_vlan_id(vlan_dev);
+ u16 lower_pvid;
+ int err;
- if (netif_is_bridge_port(vlan_dev))
+ if (!nomaster && netif_is_bridge_port(vlan_dev))
return 0;
- if (mlxsw_sp_port_dev_check(real_dev))
+ if (mlxsw_sp_port_dev_check(real_dev)) {
return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
event, vid, extack);
- else if (netif_is_lag_master(real_dev))
+ } else if (netif_is_lag_master(real_dev)) {
return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
vid, extack);
- else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
- return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
+ } else if (netif_is_bridge_master(real_dev) &&
+ br_vlan_enabled(real_dev)) {
+ err = br_vlan_get_pvid(real_dev, &lower_pvid);
+ if (err)
+ return err;
+ return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev,
+ lower_pvid, event,
extack);
+ }
return 0;
}
@@ -8927,10 +9185,8 @@ static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
int err;
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
- if (!rif) {
- NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
- return -EOPNOTSUPP;
- }
+ if (!rif)
+ return 0;
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), true);
@@ -9000,19 +9256,21 @@ static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev,
- unsigned long event,
+ unsigned long event, bool nomaster,
struct netlink_ext_ack *extack)
{
if (mlxsw_sp_port_dev_check(dev))
- return mlxsw_sp_inetaddr_port_event(dev, event, extack);
+ return mlxsw_sp_inetaddr_port_event(dev, event, nomaster,
+ extack);
else if (netif_is_lag_master(dev))
- return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
+ return mlxsw_sp_inetaddr_lag_event(dev, event, nomaster,
+ extack);
else if (netif_is_bridge_master(dev))
- return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
+ return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, -1, event,
extack);
else if (is_vlan_dev(dev))
return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
- extack);
+ nomaster, extack);
else if (netif_is_macvlan(dev))
return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
extack);
@@ -9039,7 +9297,8 @@ static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
+ err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, false,
+ NULL);
out:
mutex_unlock(&router->lock);
return notifier_from_errno(err);
@@ -9063,7 +9322,8 @@ static int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
+ err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
+ ivi->extack);
out:
mutex_unlock(&mlxsw_sp->router->lock);
return notifier_from_errno(err);
@@ -9073,6 +9333,7 @@ struct mlxsw_sp_inet6addr_event_work {
struct work_struct work;
struct mlxsw_sp *mlxsw_sp;
struct net_device *dev;
+ netdevice_tracker dev_tracker;
unsigned long event;
};
@@ -9092,11 +9353,11 @@ static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
+ __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false, NULL);
out:
mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
- dev_put(dev);
+ netdev_put(dev, &inet6addr_work->dev_tracker);
kfree(inet6addr_work);
}
@@ -9122,7 +9383,7 @@ static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
inet6addr_work->mlxsw_sp = router->mlxsw_sp;
inet6addr_work->dev = dev;
inet6addr_work->event = event;
- dev_hold(dev);
+ netdev_hold(dev, &inet6addr_work->dev_tracker, GFP_ATOMIC);
mlxsw_core_schedule_work(&inet6addr_work->work);
return NOTIFY_DONE;
@@ -9146,7 +9407,8 @@ static int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
+ err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, false,
+ i6vi->extack);
out:
mutex_unlock(&mlxsw_sp->router->lock);
return notifier_from_errno(err);
@@ -9466,10 +9728,11 @@ static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
*/
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (rif)
- __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
+ __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false,
extack);
- return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
+ return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, false,
+ extack);
}
static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
@@ -9480,7 +9743,7 @@ static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (!rif)
return;
- __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
+ __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, false, NULL);
}
static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
@@ -9523,6 +9786,116 @@ mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
return err;
}
+struct mlxsw_sp_router_replay_inetaddr_up {
+ struct mlxsw_sp *mlxsw_sp;
+ struct netlink_ext_ack *extack;
+ unsigned int done;
+ bool deslavement;
+};
+
+static int mlxsw_sp_router_replay_inetaddr_up(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data;
+ bool nomaster = ctx->deslavement;
+ struct mlxsw_sp_crif *crif;
+ int err;
+
+ if (mlxsw_sp_dev_addr_list_empty(dev))
+ return 0;
+
+ crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev);
+ if (!crif || crif->rif)
+ return 0;
+
+ if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP))
+ return 0;
+
+ err = __mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_UP,
+ nomaster, ctx->extack);
+ if (err)
+ return err;
+
+ ctx->done++;
+ return 0;
+}
+
+static int mlxsw_sp_router_unreplay_inetaddr_up(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ struct mlxsw_sp_router_replay_inetaddr_up *ctx = priv->data;
+ bool nomaster = ctx->deslavement;
+ struct mlxsw_sp_crif *crif;
+
+ if (!ctx->done)
+ return 0;
+
+ if (mlxsw_sp_dev_addr_list_empty(dev))
+ return 0;
+
+ crif = mlxsw_sp_crif_lookup(ctx->mlxsw_sp->router, dev);
+ if (!crif || !crif->rif)
+ return 0;
+
+ /* We are rolling back NETDEV_UP, so ask for that. */
+ if (!mlxsw_sp_rif_should_config(crif->rif, dev, NETDEV_UP))
+ return 0;
+
+ __mlxsw_sp_inetaddr_event(ctx->mlxsw_sp, dev, NETDEV_DOWN, nomaster,
+ NULL);
+
+ ctx->done--;
+ return 0;
+}
+
+int mlxsw_sp_netdevice_enslavement_replay(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *upper_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_router_replay_inetaddr_up ctx = {
+ .mlxsw_sp = mlxsw_sp,
+ .extack = extack,
+ .deslavement = false,
+ };
+ struct netdev_nested_priv priv = {
+ .data = &ctx,
+ };
+ int err;
+
+ err = mlxsw_sp_router_replay_inetaddr_up(upper_dev, &priv);
+ if (err)
+ return err;
+
+ err = netdev_walk_all_upper_dev_rcu(upper_dev,
+ mlxsw_sp_router_replay_inetaddr_up,
+ &priv);
+ if (err)
+ goto err_replay_up;
+
+ return 0;
+
+err_replay_up:
+ netdev_walk_all_upper_dev_rcu(upper_dev,
+ mlxsw_sp_router_unreplay_inetaddr_up,
+ &priv);
+ mlxsw_sp_router_unreplay_inetaddr_up(upper_dev, &priv);
+ return err;
+}
+
+void mlxsw_sp_netdevice_deslavement_replay(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev)
+{
+ struct mlxsw_sp_router_replay_inetaddr_up ctx = {
+ .mlxsw_sp = mlxsw_sp,
+ .deslavement = true,
+ };
+ struct netdev_nested_priv priv = {
+ .data = &ctx,
+ };
+
+ mlxsw_sp_router_replay_inetaddr_up(dev, &priv);
+}
+
static int
mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid, struct net_device *dev,
@@ -9539,15 +9912,84 @@ mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port *mlxsw_sp_port,
dev, extack);
}
+static void
+mlxsw_sp_port_vid_router_leave(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ struct net_device *dev)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port,
+ vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return;
+
+ __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+}
+
static int __mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev,
struct netlink_ext_ack *extack)
{
u16 default_vid = MLXSW_SP_DEFAULT_VID;
+ struct net_device *upper_dev;
+ struct list_head *iter;
+ int done = 0;
+ u16 vid;
+ int err;
- return mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port,
- default_vid, lag_dev,
- extack);
+ err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, default_vid,
+ lag_dev, extack);
+ if (err)
+ return err;
+
+ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
+ if (!is_vlan_dev(upper_dev))
+ continue;
+
+ vid = vlan_dev_vlan_id(upper_dev);
+ err = mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port, vid,
+ upper_dev, extack);
+ if (err)
+ goto err_router_join_dev;
+
+ ++done;
+ }
+
+ return 0;
+
+err_router_join_dev:
+ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
+ if (!is_vlan_dev(upper_dev))
+ continue;
+ if (!done--)
+ break;
+
+ vid = vlan_dev_vlan_id(upper_dev);
+ mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev);
+ }
+
+ mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev);
+ return err;
+}
+
+static void
+__mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev)
+{
+ u16 default_vid = MLXSW_SP_DEFAULT_VID;
+ struct net_device *upper_dev;
+ struct list_head *iter;
+ u16 vid;
+
+ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
+ if (!is_vlan_dev(upper_dev))
+ continue;
+
+ vid = vlan_dev_vlan_id(upper_dev);
+ mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, vid, upper_dev);
+ }
+
+ mlxsw_sp_port_vid_router_leave(mlxsw_sp_port, default_vid, lag_dev);
}
int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -9563,6 +10005,14 @@ int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
}
+void mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev)
+{
+ mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock);
+ __mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
+ mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock);
+}
+
static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -9608,6 +10058,40 @@ out:
return notifier_from_errno(err);
}
+struct mlxsw_sp_macvlan_replay {
+ struct mlxsw_sp *mlxsw_sp;
+ struct netlink_ext_ack *extack;
+};
+
+static int mlxsw_sp_macvlan_replay_upper(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ const struct mlxsw_sp_macvlan_replay *rms = priv->data;
+ struct netlink_ext_ack *extack = rms->extack;
+ struct mlxsw_sp *mlxsw_sp = rms->mlxsw_sp;
+
+ if (!netif_is_macvlan(dev))
+ return 0;
+
+ return mlxsw_sp_rif_macvlan_add(mlxsw_sp, dev, extack);
+}
+
+static int mlxsw_sp_macvlan_replay(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_macvlan_replay rms = {
+ .mlxsw_sp = rif->mlxsw_sp,
+ .extack = extack,
+ };
+ struct netdev_nested_priv priv = {
+ .data = &rms,
+ };
+
+ return netdev_walk_all_upper_dev_rcu(mlxsw_sp_rif_dev(rif),
+ mlxsw_sp_macvlan_replay_upper,
+ &priv);
+}
+
static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
struct netdev_nested_priv *priv)
{
@@ -9630,7 +10114,6 @@ static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
if (!netif_is_macvlan_port(dev))
return 0;
- netdev_warn(dev, "Router interface is deleted. Upper macvlans will not work\n");
return netdev_walk_all_upper_dev_rcu(dev,
__mlxsw_sp_rif_macvlan_flush, &priv);
}
@@ -9688,6 +10171,10 @@ static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_subport_op;
+ err = mlxsw_sp_macvlan_replay(rif, extack);
+ if (err)
+ goto err_macvlan_replay;
+
err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), true);
if (err)
@@ -9703,6 +10190,8 @@ err_fid_rif_set:
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
+ mlxsw_sp_rif_macvlan_flush(rif);
+err_macvlan_replay:
mlxsw_sp_rif_subport_op(rif, false);
err_rif_subport_op:
mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
@@ -9724,6 +10213,7 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
static struct mlxsw_sp_fid *
mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
+ const struct mlxsw_sp_rif_params *params,
struct netlink_ext_ack *extack)
{
return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
@@ -9788,6 +10278,10 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_fid_bc_flood_set;
+ err = mlxsw_sp_macvlan_replay(rif, extack);
+ if (err)
+ goto err_macvlan_replay;
+
err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), true);
if (err)
@@ -9803,6 +10297,8 @@ err_fid_rif_set:
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
+ mlxsw_sp_rif_macvlan_flush(rif);
+err_macvlan_replay:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_bc_flood_set:
@@ -9836,6 +10332,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
static struct mlxsw_sp_fid *
mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
+ const struct mlxsw_sp_rif_params *params,
struct netlink_ext_ack *extack)
{
int rif_ifindex = mlxsw_sp_rif_dev_ifindex(rif);
@@ -9869,27 +10366,22 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
static struct mlxsw_sp_fid *
mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
+ const struct mlxsw_sp_rif_params *params,
struct netlink_ext_ack *extack)
{
struct net_device *dev = mlxsw_sp_rif_dev(rif);
struct net_device *br_dev;
- u16 vid;
- int err;
+
+ if (WARN_ON(!params->vid))
+ return ERR_PTR(-EINVAL);
if (is_vlan_dev(dev)) {
- vid = vlan_dev_vlan_id(dev);
br_dev = vlan_dev_real_dev(dev);
if (WARN_ON(!netif_is_bridge_master(br_dev)))
return ERR_PTR(-EINVAL);
- } else {
- err = br_vlan_get_pvid(dev, &vid);
- if (err < 0 || !vid) {
- NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
- return ERR_PTR(-EINVAL);
- }
}
- return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
+ return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, params->vid);
}
static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
@@ -9954,6 +10446,10 @@ static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
if (err)
goto err_fid_bc_flood_set;
+ err = mlxsw_sp_macvlan_replay(rif, extack);
+ if (err)
+ goto err_macvlan_replay;
+
err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), true);
if (err)
@@ -9969,6 +10465,8 @@ err_fid_rif_set:
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
+ mlxsw_sp_rif_macvlan_flush(rif);
+err_macvlan_replay:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_bc_flood_set:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 9a2669a08480..ed3b628caafe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -171,8 +171,19 @@ int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp);
struct net_device *
mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev);
+int mlxsw_sp_router_bridge_vlan_add(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev,
+ u16 new_vid, bool is_pvid,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev,
struct netlink_ext_ack *extack);
+void mlxsw_sp_router_port_leave_lag(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev);
+int mlxsw_sp_netdevice_enslavement_replay(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *upper_dev,
+ struct netlink_ext_ack *extack);
+void mlxsw_sp_netdevice_deslavement_replay(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev);
#endif /* _MLXSW_ROUTER_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 82e711afb02b..c59b5f11f357 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -93,13 +93,8 @@ void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp);
struct mlxsw_sp_span_entry *
mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev);
-
void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_span_entry *span_entry);
-
-int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu);
-void mlxsw_sp_span_speed_update_work(struct work_struct *work);
-
int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id,
const struct mlxsw_sp_span_agent_parms *parms);
void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index d88e62bc759f..6c749c148148 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -384,6 +384,91 @@ mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
}
+static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack);
+static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj);
+
+struct mlxsw_sp_bridge_port_replay_switchdev_objs {
+ struct net_device *brport_dev;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ int done;
+};
+
+static int
+mlxsw_sp_bridge_port_replay_switchdev_objs(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_port_obj_info *port_obj_info = ptr;
+ struct netlink_ext_ack *extack = port_obj_info->info.extack;
+ struct mlxsw_sp_bridge_port_replay_switchdev_objs *rso;
+ int err = 0;
+
+ rso = (void *)port_obj_info->info.ctx;
+
+ if (event != SWITCHDEV_PORT_OBJ_ADD ||
+ dev != rso->brport_dev)
+ goto out;
+
+ /* When a port is joining the bridge through a LAG, there likely are
+ * VLANs configured on that LAG already. The replay will thus attempt to
+ * have the given port-vlans join the corresponding FIDs. But the LAG
+ * netdevice has already called the ndo_vlan_rx_add_vid NDO for its VLAN
+ * memberships, back before CHANGEUPPER was distributed and netdevice
+ * master set. So now before propagating the VLAN events further, we
+ * first need to kill the corresponding VID at the mlxsw_sp_port.
+ *
+ * Note that this doesn't need to be rolled back on failure -- if the
+ * replay fails, the enslavement is off, and the VIDs would be killed by
+ * LAG anyway as part of its rollback.
+ */
+ if (port_obj_info->obj->id == SWITCHDEV_OBJ_ID_PORT_VLAN) {
+ u16 vid = SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj)->vid;
+
+ err = mlxsw_sp_port_kill_vid(rso->mlxsw_sp_port->dev, 0, vid);
+ if (err)
+ goto out;
+ }
+
+ ++rso->done;
+ err = mlxsw_sp_port_obj_add(rso->mlxsw_sp_port->dev, NULL,
+ port_obj_info->obj, extack);
+
+out:
+ return notifier_from_errno(err);
+}
+
+static struct notifier_block mlxsw_sp_bridge_port_replay_switchdev_objs_nb = {
+ .notifier_call = mlxsw_sp_bridge_port_replay_switchdev_objs,
+};
+
+static int
+mlxsw_sp_bridge_port_unreplay_switchdev_objs(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_port_obj_info *port_obj_info = ptr;
+ struct mlxsw_sp_bridge_port_replay_switchdev_objs *rso;
+
+ rso = (void *)port_obj_info->info.ctx;
+
+ if (event != SWITCHDEV_PORT_OBJ_ADD ||
+ dev != rso->brport_dev)
+ return NOTIFY_DONE;
+ if (!rso->done--)
+ return NOTIFY_STOP;
+
+ mlxsw_sp_port_obj_del(rso->mlxsw_sp_port->dev, NULL,
+ port_obj_info->obj);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mlxsw_sp_bridge_port_unreplay_switchdev_objs_nb = {
+ .notifier_call = mlxsw_sp_bridge_port_unreplay_switchdev_objs,
+};
+
static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
struct net_device *brport_dev,
@@ -405,7 +490,7 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
bridge_port->system_port = mlxsw_sp_port->local_port;
bridge_port->dev = brport_dev;
bridge_port->bridge_device = bridge_device;
- bridge_port->stp_state = BR_STATE_DISABLED;
+ bridge_port->stp_state = br_port_get_stp_state(brport_dev);
bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
BR_MCAST_FLOOD;
INIT_LIST_HEAD(&bridge_port->vlans_list);
@@ -1479,29 +1564,15 @@ err_port_vlan_set:
}
static int
-mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
- const struct switchdev_obj_port_vlan *vlan)
+mlxsw_sp_br_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
- u16 pvid;
-
- pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
- if (!pvid)
- return 0;
-
- if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
- if (vlan->vid != pvid) {
- netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
- return -EBUSY;
- }
- } else {
- if (vlan->vid == pvid) {
- netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
- return -EBUSY;
- }
- }
+ bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- return 0;
+ return mlxsw_sp_router_bridge_vlan_add(mlxsw_sp, br_dev, vlan->vid,
+ flag_pvid, extack);
}
static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1518,8 +1589,8 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
int err = 0;
if (br_vlan_enabled(orig_dev))
- err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
- orig_dev, vlan);
+ err = mlxsw_sp_br_rif_pvid_change(mlxsw_sp, orig_dev,
+ vlan, extack);
if (!err)
err = -EOPNOTSUPP;
return err;
@@ -2365,6 +2436,33 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
}
static int
+mlxsw_sp_bridge_port_replay(struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_bridge_port_replay_switchdev_objs rso = {
+ .brport_dev = bridge_port->dev,
+ .mlxsw_sp_port = mlxsw_sp_port,
+ };
+ struct notifier_block *nb;
+ int err;
+
+ nb = &mlxsw_sp_bridge_port_replay_switchdev_objs_nb;
+ err = switchdev_bridge_port_replay(bridge_port->dev, mlxsw_sp_port->dev,
+ &rso, NULL, nb, extack);
+ if (err)
+ goto err_replay;
+
+ return 0;
+
+err_replay:
+ nb = &mlxsw_sp_bridge_port_unreplay_switchdev_objs_nb;
+ switchdev_bridge_port_replay(bridge_port->dev, mlxsw_sp_port->dev,
+ &rso, NULL, nb, extack);
+ return err;
+}
+
+static int
mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port,
struct netlink_ext_ack *extack)
@@ -2378,7 +2476,7 @@ mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port,
if (mlxsw_sp_port->default_vlan->fid)
mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
- return 0;
+ return mlxsw_sp_bridge_port_replay(bridge_port, mlxsw_sp_port, extack);
}
static int
@@ -2550,6 +2648,7 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct net_device *dev = bridge_port->dev;
u16 vid;
+ int err;
vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
@@ -2565,8 +2664,20 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
if (mlxsw_sp_port_vlan->fid)
mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
- return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
- extack);
+ err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
+ extack);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_bridge_port_replay(bridge_port, mlxsw_sp_port, extack);
+ if (err)
+ goto err_replay;
+
+ return 0;
+
+err_replay:
+ mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
+ return err;
}
static void
@@ -2783,8 +2894,15 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
goto err_port_join;
+ err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, br_dev, extack);
+ if (err)
+ goto err_replay;
+
return 0;
+err_replay:
+ bridge_device->ops->port_leave(bridge_device, bridge_port,
+ mlxsw_sp_port);
err_port_join:
mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
return err;
@@ -2948,9 +3066,6 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
- if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
- goto just_remove;
-
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
if (!mlxsw_sp_port_vlan) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
@@ -3018,9 +3133,6 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
- if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
- goto just_remove;
-
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
if (!mlxsw_sp_port_vlan) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
@@ -3262,6 +3374,7 @@ out:
struct mlxsw_sp_switchdev_event_work {
struct work_struct work;
+ netdevice_tracker dev_tracker;
union {
struct switchdev_notifier_fdb_info fdb_info;
struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
@@ -3418,8 +3531,8 @@ static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
out:
rtnl_unlock();
kfree(switchdev_work->fdb_info.addr);
+ netdev_put(dev, &switchdev_work->dev_tracker);
kfree(switchdev_work);
- dev_put(dev);
}
static void
@@ -3430,7 +3543,6 @@ mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
struct mlxsw_sp_bridge_device *bridge_device;
struct net_device *dev = switchdev_work->dev;
- u8 all_zeros_mac[ETH_ALEN] = { 0 };
enum mlxsw_sp_l3proto proto;
union mlxsw_sp_l3addr addr;
struct net_device *br_dev;
@@ -3452,7 +3564,7 @@ mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
&proto, &addr);
- if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
+ if (is_zero_ether_addr(vxlan_fdb_info->eth_addr)) {
err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
if (err) {
mlxsw_sp_fid_put(fid);
@@ -3504,7 +3616,6 @@ mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device *bridge_device;
struct net_device *dev = switchdev_work->dev;
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
- u8 all_zeros_mac[ETH_ALEN] = { 0 };
enum mlxsw_sp_l3proto proto;
union mlxsw_sp_l3addr addr;
struct mlxsw_sp_fid *fid;
@@ -3525,7 +3636,7 @@ mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
&proto, &addr);
- if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
+ if (is_zero_ether_addr(vxlan_fdb_info->eth_addr)) {
mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
mlxsw_sp_fid_put(fid);
return;
@@ -3574,8 +3685,8 @@ static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
out:
rtnl_unlock();
+ netdev_put(dev, &switchdev_work->dev_tracker);
kfree(switchdev_work);
- dev_put(dev);
}
static int
@@ -3675,7 +3786,7 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
* upper device containig mlxsw_sp_port or just a
* mlxsw_sp_port
*/
- dev_hold(dev);
+ netdev_hold(dev, &switchdev_work->dev_tracker, GFP_ATOMIC);
break;
case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
@@ -3685,7 +3796,7 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
info);
if (err)
goto err_vxlan_work_prepare;
- dev_hold(dev);
+ netdev_hold(dev, &switchdev_work->dev_tracker, GFP_ATOMIC);
break;
default:
kfree(switchdev_work);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index a36f6369f132..c81cdeb4d4e7 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1515,7 +1515,7 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
&fphy_status, NULL);
if (IS_ERR(phydev)) {
netdev_err(netdev, "No PHY/fixed_PHY found\n");
- return -EIO;
+ return PTR_ERR(phydev);
}
} else {
goto return_error;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
index bd72fbc2220f..3960534ac2ad 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -2,6 +2,7 @@
#include <linux/bpf.h>
#include <linux/filter.h>
+#include <net/page_pool/helpers.h>
#include "lan966x_main.h"
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index fbb0bb4594cd..0d6e79af2410 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -5,9 +5,10 @@
#include <linux/if_vlan.h>
#include <linux/iopoll.h>
#include <linux/ip.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include <net/addrconf.h>
@@ -449,39 +450,46 @@ static int lan966x_port_get_parent_id(struct net_device *dev,
return 0;
}
-static int lan966x_port_ioctl(struct net_device *dev, struct ifreq *ifr,
- int cmd)
+static int lan966x_port_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ if (!port->lan966x->ptp)
+ return -EOPNOTSUPP;
+
+ lan966x_ptp_hwtstamp_get(port, cfg);
+
+ return 0;
+}
+
+static int lan966x_port_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct lan966x_port *port = netdev_priv(dev);
int err;
- if (cmd == SIOCSHWTSTAMP) {
- err = lan966x_ptp_setup_traps(port, ifr);
- if (err)
- return err;
- }
+ if (cfg->source != HWTSTAMP_SOURCE_NETDEV &&
+ cfg->source != HWTSTAMP_SOURCE_PHYLIB)
+ return -EOPNOTSUPP;
- if (!phy_has_hwtstamp(dev->phydev) && port->lan966x->ptp) {
- switch (cmd) {
- case SIOCSHWTSTAMP:
- err = lan966x_ptp_hwtstamp_set(port, ifr);
- if (err)
- lan966x_ptp_del_traps(port);
+ err = lan966x_ptp_setup_traps(port, cfg);
+ if (err)
+ return err;
+ if (cfg->source == HWTSTAMP_SOURCE_NETDEV) {
+ if (!port->lan966x->ptp)
+ return -EOPNOTSUPP;
+
+ err = lan966x_ptp_hwtstamp_set(port, cfg, extack);
+ if (err) {
+ lan966x_ptp_del_traps(port);
return err;
- case SIOCGHWTSTAMP:
- return lan966x_ptp_hwtstamp_get(port, ifr);
}
}
- if (!dev->phydev)
- return -ENODEV;
-
- err = phy_mii_ioctl(dev->phydev, ifr, cmd);
- if (err && cmd == SIOCSHWTSTAMP)
- lan966x_ptp_del_traps(port);
-
- return err;
+ return 0;
}
static const struct net_device_ops lan966x_port_netdev_ops = {
@@ -494,10 +502,12 @@ static const struct net_device_ops lan966x_port_netdev_ops = {
.ndo_get_stats64 = lan966x_stats_get,
.ndo_set_mac_address = lan966x_port_set_mac_address,
.ndo_get_port_parent_id = lan966x_port_get_parent_id,
- .ndo_eth_ioctl = lan966x_port_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_setup_tc = lan966x_tc_setup,
.ndo_bpf = lan966x_xdp,
.ndo_xdp_xmit = lan966x_xdp_xmit,
+ .ndo_hwtstamp_get = lan966x_port_hwtstamp_get,
+ .ndo_hwtstamp_set = lan966x_port_hwtstamp_set,
};
bool lan966x_netdevice_check(const struct net_device *dev)
@@ -807,6 +817,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
NETIF_F_HW_VLAN_STAG_TX |
NETIF_F_HW_TC;
dev->hw_features |= NETIF_F_HW_TC;
+ dev->priv_flags |= IFF_SEE_ALL_HWTSTAMP_REQUESTS;
dev->needed_headroom = IFH_LEN_BYTES;
eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
@@ -1108,8 +1119,8 @@ static int lan966x_probe(struct platform_device *pdev)
/* set irq */
lan966x->xtr_irq = platform_get_irq_byname(pdev, "xtr");
- if (lan966x->xtr_irq <= 0)
- return -EINVAL;
+ if (lan966x->xtr_irq < 0)
+ return lan966x->xtr_irq;
err = devm_request_threaded_irq(&pdev->dev, lan966x->xtr_irq, NULL,
lan966x_xtr_irq_handler, IRQF_ONESHOT,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 27f272831ea5..caa9e0533c96 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -10,10 +10,11 @@
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/ptp_clock_kernel.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
#include <net/switchdev.h>
+#include <net/xdp.h>
#include <vcap_api.h>
#include <vcap_api_client.h>
@@ -298,7 +299,7 @@ struct lan966x_phc {
struct ptp_clock *clock;
struct ptp_clock_info info;
struct ptp_pin_desc pins[LAN966X_PHC_PINS_NUM];
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
struct lan966x *lan966x;
u8 index;
};
@@ -578,8 +579,11 @@ void lan966x_mdb_restore_entries(struct lan966x *lan966x);
int lan966x_ptp_init(struct lan966x *lan966x);
void lan966x_ptp_deinit(struct lan966x *lan966x);
-int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr);
-int lan966x_ptp_hwtstamp_get(struct lan966x_port *port, struct ifreq *ifr);
+int lan966x_ptp_hwtstamp_set(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
+void lan966x_ptp_hwtstamp_get(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg);
void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
u64 src_port, u64 timestamp);
int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
@@ -590,7 +594,8 @@ irqreturn_t lan966x_ptp_irq_handler(int irq, void *args);
irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args);
u32 lan966x_ptp_get_period_ps(void);
int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
-int lan966x_ptp_setup_traps(struct lan966x_port *port, struct ifreq *ifr);
+int lan966x_ptp_setup_traps(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg);
int lan966x_ptp_del_traps(struct lan966x_port *port);
int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index 266a21a2d124..63905bb5a63a 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -59,7 +59,7 @@ static int lan966x_ptp_add_trap(struct lan966x_port *port,
int err;
vrule = vcap_get_rule(lan966x->vcap_ctrl, rule_id);
- if (vrule) {
+ if (!IS_ERR(vrule)) {
u32 value, mask;
/* Just modify the ingress port mask and exit */
@@ -106,7 +106,7 @@ static int lan966x_ptp_del_trap(struct lan966x_port *port,
int err;
vrule = vcap_get_rule(lan966x->vcap_ctrl, rule_id);
- if (!vrule)
+ if (IS_ERR(vrule))
return -EEXIST;
vcap_rule_get_key_u32(vrule, VCAP_KF_IF_IGR_PORT_MASK, &value, &mask);
@@ -248,29 +248,23 @@ int lan966x_ptp_del_traps(struct lan966x_port *port)
return err;
}
-int lan966x_ptp_setup_traps(struct lan966x_port *port, struct ifreq *ifr)
+int lan966x_ptp_setup_traps(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg)
{
- struct hwtstamp_config cfg;
-
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- if (cfg.rx_filter == HWTSTAMP_FILTER_NONE)
+ if (cfg->rx_filter == HWTSTAMP_FILTER_NONE)
return lan966x_ptp_del_traps(port);
else
return lan966x_ptp_add_traps(port);
}
-int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
+int lan966x_ptp_hwtstamp_set(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct lan966x *lan966x = port->lan966x;
- struct hwtstamp_config cfg;
struct lan966x_phc *phc;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- switch (cfg.tx_type) {
+ switch (cfg->tx_type) {
case HWTSTAMP_TX_ON:
port->ptp_tx_cmd = IFH_REW_OP_TWO_STEP_PTP;
break;
@@ -284,7 +278,7 @@ int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
return -ERANGE;
}
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
port->ptp_rx_cmd = false;
break;
@@ -303,7 +297,7 @@ int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
port->ptp_rx_cmd = true;
- cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ cfg->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
@@ -312,20 +306,20 @@ int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
/* Commit back the result & save it */
mutex_lock(&lan966x->ptp_lock);
phc = &lan966x->phc[LAN966X_PHC_PORT];
- memcpy(&phc->hwtstamp_config, &cfg, sizeof(cfg));
+ phc->hwtstamp_config = *cfg;
mutex_unlock(&lan966x->ptp_lock);
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
-int lan966x_ptp_hwtstamp_get(struct lan966x_port *port, struct ifreq *ifr)
+void lan966x_ptp_hwtstamp_get(struct lan966x_port *port,
+ struct kernel_hwtstamp_config *cfg)
{
struct lan966x *lan966x = port->lan966x;
struct lan966x_phc *phc;
phc = &lan966x->phc[LAN966X_PHC_PORT];
- return copy_to_user(ifr->ifr_data, &phc->hwtstamp_config,
- sizeof(phc->hwtstamp_config)) ? -EFAULT : 0;
+ *cfg = phc->hwtstamp_config;
}
static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
index 96b3def6c474..d696cf9dbd19 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
@@ -75,7 +75,7 @@ lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
goto out;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
return err;
@@ -172,7 +172,7 @@ lan966x_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st)
}
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_BASIC);
return err;
out:
NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 62c85463b634..6f565c0c0c3d 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -205,7 +205,7 @@ enum sparx5_core_clockfreq {
struct sparx5_phc {
struct ptp_clock *clock;
struct ptp_clock_info info;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
struct sparx5 *sparx5;
u8 index;
};
@@ -388,8 +388,11 @@ void sparx5_unregister_netdevs(struct sparx5 *sparx5);
/* sparx5_ptp.c */
int sparx5_ptp_init(struct sparx5 *sparx5);
void sparx5_ptp_deinit(struct sparx5 *sparx5);
-int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr);
-int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr);
+int sparx5_ptp_hwtstamp_set(struct sparx5_port *port,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
+void sparx5_ptp_hwtstamp_get(struct sparx5_port *port,
+ struct kernel_hwtstamp_config *cfg);
void sparx5_ptp_rxtstamp(struct sparx5 *sparx5, struct sk_buff *skb,
u64 timestamp);
int sparx5_ptp_txtstamp_request(struct sparx5_port *port,
@@ -411,7 +414,6 @@ enum sparx5_pgid_type {
};
void sparx5_pgid_init(struct sparx5 *spx5);
-int sparx5_pgid_alloc_glag(struct sparx5 *spx5, u16 *idx);
int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx);
int sparx5_pgid_free(struct sparx5 *spx5, u16 idx);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
index d078156581d5..705a004b324f 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
@@ -210,22 +210,31 @@ static int sparx5_get_port_parent_id(struct net_device *dev,
return 0;
}
-static int sparx5_port_ioctl(struct net_device *dev, struct ifreq *ifr,
- int cmd)
+static int sparx5_port_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
{
struct sparx5_port *sparx5_port = netdev_priv(dev);
struct sparx5 *sparx5 = sparx5_port->sparx5;
- if (!phy_has_hwtstamp(dev->phydev) && sparx5->ptp) {
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return sparx5_ptp_hwtstamp_set(sparx5_port, ifr);
- case SIOCGHWTSTAMP:
- return sparx5_ptp_hwtstamp_get(sparx5_port, ifr);
- }
- }
+ if (!sparx5->ptp)
+ return -EOPNOTSUPP;
+
+ sparx5_ptp_hwtstamp_get(sparx5_port, cfg);
+
+ return 0;
+}
+
+static int sparx5_port_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct sparx5_port *sparx5_port = netdev_priv(dev);
+ struct sparx5 *sparx5 = sparx5_port->sparx5;
+
+ if (!sparx5->ptp)
+ return -EOPNOTSUPP;
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
+ return sparx5_ptp_hwtstamp_set(sparx5_port, cfg, extack);
}
static const struct net_device_ops sparx5_port_netdev_ops = {
@@ -238,8 +247,10 @@ static const struct net_device_ops sparx5_port_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats64 = sparx5_get_stats64,
.ndo_get_port_parent_id = sparx5_get_port_parent_id,
- .ndo_eth_ioctl = sparx5_port_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_setup_tc = sparx5_port_setup_tc,
+ .ndo_hwtstamp_get = sparx5_port_hwtstamp_get,
+ .ndo_hwtstamp_set = sparx5_port_hwtstamp_set,
};
bool sparx5_netdevice_check(const struct net_device *dev)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
index 0edb98cef7e4..5a932460db58 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
@@ -74,10 +74,11 @@ static u64 sparx5_ptp_get_nominal_value(struct sparx5 *sparx5)
return res;
}
-int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr)
+int sparx5_ptp_hwtstamp_set(struct sparx5_port *port,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct sparx5 *sparx5 = port->sparx5;
- struct hwtstamp_config cfg;
struct sparx5_phc *phc;
/* For now don't allow to run ptp on ports that are part of a bridge,
@@ -88,10 +89,7 @@ int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr)
if (test_bit(port->portno, sparx5->bridge_mask))
return -EINVAL;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- switch (cfg.tx_type) {
+ switch (cfg->tx_type) {
case HWTSTAMP_TX_ON:
port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
break;
@@ -105,7 +103,7 @@ int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr)
return -ERANGE;
}
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_ALL:
@@ -122,7 +120,7 @@ int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ cfg->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
@@ -131,20 +129,20 @@ int sparx5_ptp_hwtstamp_set(struct sparx5_port *port, struct ifreq *ifr)
/* Commit back the result & save it */
mutex_lock(&sparx5->ptp_lock);
phc = &sparx5->phc[SPARX5_PHC_PORT];
- memcpy(&phc->hwtstamp_config, &cfg, sizeof(cfg));
+ phc->hwtstamp_config = *cfg;
mutex_unlock(&sparx5->ptp_lock);
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
-int sparx5_ptp_hwtstamp_get(struct sparx5_port *port, struct ifreq *ifr)
+void sparx5_ptp_hwtstamp_get(struct sparx5_port *port,
+ struct kernel_hwtstamp_config *cfg)
{
struct sparx5 *sparx5 = port->sparx5;
struct sparx5_phc *phc;
phc = &sparx5->phc[SPARX5_PHC_PORT];
- return copy_to_user(ifr->ifr_data, &phc->hwtstamp_config,
- sizeof(phc->hwtstamp_config)) ? -EFAULT : 0;
+ *cfg = phc->hwtstamp_config;
}
static void sparx5_ptp_classify(struct sparx5_port *port, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
index 3f87a5285a6d..523e0c470894 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
@@ -126,7 +126,7 @@ sparx5_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st)
}
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_BASIC);
return err;
@@ -175,7 +175,7 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
goto out;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
return err;
@@ -1274,7 +1274,7 @@ static int sparx5_tc_free_rule_resources(struct net_device *ndev,
int ret = 0;
vrule = vcap_get_rule(vctrl, rule_id);
- if (!vrule || IS_ERR(vrule))
+ if (IS_ERR(vrule))
return -EINVAL;
sparx5_tc_free_psfp_resources(sparx5, vrule);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c
index a418ad8e8770..300fe1a93dce 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c
@@ -2396,7 +2396,7 @@ struct vcap_rule *vcap_decode_rule(struct vcap_rule_internal *elem)
ri = vcap_dup_rule(elem, elem->state == VCAP_RS_DISABLED);
if (IS_ERR(ri))
- return ERR_PTR(PTR_ERR(ri));
+ return ERR_CAST(ri);
if (ri->state == VCAP_RS_DISABLED)
goto out;
@@ -2429,7 +2429,7 @@ struct vcap_rule *vcap_get_rule(struct vcap_control *vctrl, u32 id)
elem = vcap_get_locked_rule(vctrl, id);
if (!elem)
- return NULL;
+ return ERR_PTR(-ENOENT);
rule = vcap_decode_rule(elem);
mutex_unlock(&elem->admin->lock);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.h b/drivers/net/ethernet/microchip/vcap/vcap_api.h
index 62db270f65af..9eccfa633c1a 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.h
@@ -277,7 +277,4 @@ struct vcap_control {
struct list_head list; /* list of vcap instances */
};
-/* Set client control interface on the API */
-int vcap_api_set_client(struct vcap_control *vctrl);
-
#endif /* __VCAP_API__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
index d9d1f7c9d762..88641508f885 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
@@ -226,9 +226,6 @@ int vcap_chain_offset(struct vcap_control *vctrl, int from_cid, int to_cid);
bool vcap_is_next_lookup(struct vcap_control *vctrl, int cur_cid, int next_cid);
/* Is this chain id the last lookup of all VCAPs */
bool vcap_is_last_chain(struct vcap_control *vctrl, int cid, bool ingress);
-/* Provide all rules via a callback interface */
-int vcap_rule_iter(struct vcap_control *vctrl,
- int (*callback)(void *, struct vcap_rule *), void *arg);
/* Match a list of keys against the keysets available in a vcap type */
bool vcap_rule_find_keysets(struct vcap_rule *rule,
struct vcap_keyset_list *matches);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_tc.c b/drivers/net/ethernet/microchip/vcap/vcap_tc.c
index 09abe7944af6..27e2dffb65e6 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_tc.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_tc.c
@@ -50,7 +50,7 @@ int vcap_tc_flower_handler_ethaddr_usage(struct vcap_tc_flower_parse_usage *st)
goto out;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS);
return err;
@@ -86,7 +86,7 @@ int vcap_tc_flower_handler_ipv4_usage(struct vcap_tc_flower_parse_usage *st)
}
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
return err;
@@ -124,7 +124,7 @@ int vcap_tc_flower_handler_ipv6_usage(struct vcap_tc_flower_parse_usage *st)
goto out;
}
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
return err;
out:
NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
@@ -158,7 +158,7 @@ int vcap_tc_flower_handler_portnum_usage(struct vcap_tc_flower_parse_usage *st)
goto out;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_PORTS);
return err;
@@ -201,7 +201,7 @@ int vcap_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st)
goto out;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN);
return 0;
out:
@@ -238,7 +238,7 @@ int vcap_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st,
if (mt.mask->vlan_tpid)
st->tpid = be16_to_cpu(mt.key->vlan_tpid);
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_VLAN);
return 0;
out:
@@ -313,7 +313,7 @@ int vcap_tc_flower_handler_tcp_usage(struct vcap_tc_flower_parse_usage *st)
goto out;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_TCP);
return err;
@@ -376,7 +376,7 @@ int vcap_tc_flower_handler_arp_usage(struct vcap_tc_flower_parse_usage *st)
goto out;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP);
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_ARP);
return 0;
@@ -401,7 +401,7 @@ int vcap_tc_flower_handler_ip_usage(struct vcap_tc_flower_parse_usage *st)
goto out;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP);
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_IP);
return err;
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_tc.h b/drivers/net/ethernet/microchip/vcap/vcap_tc.h
index 071f892f9aa4..49b02d032906 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_tc.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_tc.h
@@ -14,7 +14,7 @@ struct vcap_tc_flower_parse_usage {
u16 l3_proto;
u8 l4_proto;
u16 tpid;
- unsigned int used_keys;
+ unsigned long long used_keys;
};
int vcap_tc_flower_handler_ethaddr_usage(struct vcap_tc_flower_parse_usage *st);
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 8f3f78b68592..6367de0c2c2e 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -106,6 +106,25 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev)
return 0;
}
+static int mana_gd_query_hwc_timeout(struct pci_dev *pdev, u32 *timeout_val)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct gdma_query_hwc_timeout_resp resp = {};
+ struct gdma_query_hwc_timeout_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_HWC_TIMEOUT,
+ sizeof(req), sizeof(resp));
+ req.timeout_ms = *timeout_val;
+ err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+ if (err || resp.hdr.status)
+ return err ? err : -EPROTO;
+
+ *timeout_val = resp.timeout_ms;
+
+ return 0;
+}
+
static int mana_gd_detect_devices(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -300,8 +319,11 @@ static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index,
void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
{
+ /* Hardware Spec specifies that software client should set 0 for
+ * wqe_cnt for Receive Queues. This value is not used in Send Queues.
+ */
mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
- queue->id, queue->head * GDMA_WQE_BU_SIZE, 1);
+ queue->id, queue->head * GDMA_WQE_BU_SIZE, 0);
}
void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
@@ -879,8 +901,10 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev)
struct gdma_context *gc = pci_get_drvdata(pdev);
struct gdma_verify_ver_resp resp = {};
struct gdma_verify_ver_req req = {};
+ struct hw_channel_context *hwc;
int err;
+ hwc = gc->hwc.driver_data;
mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION,
sizeof(req), sizeof(resp));
@@ -907,7 +931,14 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev)
err, resp.hdr.status);
return err ? err : -EPROTO;
}
-
+ if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) {
+ err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout);
+ if (err) {
+ dev_err(gc->dev, "Failed to set the hwc timeout %d\n", err);
+ return err;
+ }
+ dev_dbg(gc->dev, "set the hwc timeout to %u\n", hwc->hwc_timeout);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 2bd1d74021f7..9d1cd3bfcf66 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -174,7 +174,25 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
complete(&hwc->hwc_init_eqe_comp);
break;
+ case GDMA_EQE_HWC_SOC_RECONFIG_DATA:
+ type_data.as_uint32 = event->details[0];
+ type = type_data.type;
+ val = type_data.value;
+
+ switch (type) {
+ case HWC_DATA_CFG_HWC_TIMEOUT:
+ hwc->hwc_timeout = val;
+ break;
+
+ default:
+ dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type);
+ break;
+ }
+
+ break;
+
default:
+ dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
/* Ignore unknown events, which should never happen. */
break;
}
@@ -696,6 +714,7 @@ int mana_hwc_create_channel(struct gdma_context *gc)
gd->driver_data = hwc;
hwc->gdma_dev = gd;
hwc->dev = gc->dev;
+ hwc->hwc_timeout = HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS;
/* HWC's instance number is always 0. */
gd->dev_id.as_uint32 = 0;
@@ -770,6 +789,8 @@ void mana_hwc_destroy_channel(struct gdma_context *gc)
hwc->gdma_dev->doorbell = INVALID_DOORBELL;
hwc->gdma_dev->pdid = INVALID_PDID;
+ hwc->hwc_timeout = 0;
+
kfree(hwc);
gc->hwc.driver_data = NULL;
gc->hwc.gdma_context = NULL;
@@ -825,7 +846,8 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
goto out;
}
- if (!wait_for_completion_timeout(&ctx->comp_event, 30 * HZ)) {
+ if (!wait_for_completion_timeout(&ctx->comp_event,
+ (msecs_to_jiffies(hwc->hwc_timeout) * HZ))) {
dev_err(hwc->dev, "HWC: Request timed out!\n");
err = -ETIMEDOUT;
goto out;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index c2ad0921e893..4a16ebff3d1d 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -12,6 +12,8 @@
#include <net/checksum.h>
#include <net/ip6_checksum.h>
+#include <net/page_pool/helpers.h>
+#include <net/xdp.h>
#include <net/mana/mana.h>
#include <net/mana/mana_auxiliary.h>
@@ -1387,8 +1389,8 @@ static void mana_post_pkt_rxq(struct mana_rxq *rxq)
recv_buf_oob = &rxq->rx_oobs[curr_index];
- err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
- &recv_buf_oob->wqe_inf);
+ err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
+ &recv_buf_oob->wqe_inf);
if (WARN_ON_ONCE(err))
return;
@@ -1415,8 +1417,8 @@ static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
return skb;
}
-static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
- struct mana_rxq *rxq)
+static void mana_rx_skb(void *buf_va, bool from_pool,
+ struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq)
{
struct mana_stats_rx *rx_stats = &rxq->stats;
struct net_device *ndev = rxq->ndev;
@@ -1449,6 +1451,9 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
if (!skb)
goto drop;
+ if (from_pool)
+ skb_mark_for_recycle(skb);
+
skb->dev = napi->dev;
skb->protocol = eth_type_trans(skb, ndev);
@@ -1499,9 +1504,14 @@ drop_xdp:
u64_stats_update_end(&rx_stats->syncp);
drop:
- WARN_ON_ONCE(rxq->xdp_save_va);
- /* Save for reuse */
- rxq->xdp_save_va = buf_va;
+ if (from_pool) {
+ page_pool_recycle_direct(rxq->page_pool,
+ virt_to_head_page(buf_va));
+ } else {
+ WARN_ON_ONCE(rxq->xdp_save_va);
+ /* Save for reuse */
+ rxq->xdp_save_va = buf_va;
+ }
++ndev->stats.rx_dropped;
@@ -1509,11 +1519,13 @@ drop:
}
static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
- dma_addr_t *da, bool is_napi)
+ dma_addr_t *da, bool *from_pool, bool is_napi)
{
struct page *page;
void *va;
+ *from_pool = false;
+
/* Reuse XDP dropped page if available */
if (rxq->xdp_save_va) {
va = rxq->xdp_save_va;
@@ -1534,17 +1546,22 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
return NULL;
}
} else {
- page = dev_alloc_page();
+ page = page_pool_dev_alloc_pages(rxq->page_pool);
if (!page)
return NULL;
+ *from_pool = true;
va = page_to_virt(page);
}
*da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, *da)) {
- put_page(virt_to_head_page(va));
+ if (*from_pool)
+ page_pool_put_full_page(rxq->page_pool, page, false);
+ else
+ put_page(virt_to_head_page(va));
+
return NULL;
}
@@ -1553,21 +1570,25 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
/* Allocate frag for rx buffer, and save the old buf */
static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
- struct mana_recv_buf_oob *rxoob, void **old_buf)
+ struct mana_recv_buf_oob *rxoob, void **old_buf,
+ bool *old_fp)
{
+ bool from_pool;
dma_addr_t da;
void *va;
- va = mana_get_rxfrag(rxq, dev, &da, true);
+ va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
if (!va)
return;
dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
DMA_FROM_DEVICE);
*old_buf = rxoob->buf_va;
+ *old_fp = rxoob->from_pool;
rxoob->buf_va = va;
rxoob->sgl[0].address = da;
+ rxoob->from_pool = from_pool;
}
static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
@@ -1581,6 +1602,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
struct device *dev = gc->dev;
void *old_buf = NULL;
u32 curr, pktlen;
+ bool old_fp;
apc = netdev_priv(ndev);
@@ -1623,12 +1645,12 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
rxbuf_oob = &rxq->rx_oobs[curr];
WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
- mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf);
+ mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp);
/* Unsuccessful refill will have old_buf == NULL.
* In this case, mana_rx_skb() will drop the packet.
*/
- mana_rx_skb(old_buf, oob, rxq);
+ mana_rx_skb(old_buf, old_fp, oob, rxq);
drop:
mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
@@ -1658,6 +1680,12 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
mana_process_rx_cqe(rxq, cq, &comp[i]);
}
+ if (comp_read > 0) {
+ struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
+
+ mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq);
+ }
+
if (rxq->xdp_flush)
xdp_do_flush();
}
@@ -1882,6 +1910,7 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
struct mana_recv_buf_oob *rx_oob;
struct device *dev = gc->dev;
struct napi_struct *napi;
+ struct page *page;
int i;
if (!rxq)
@@ -1914,10 +1943,18 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
dma_unmap_single(dev, rx_oob->sgl[0].address,
rx_oob->sgl[0].size, DMA_FROM_DEVICE);
- put_page(virt_to_head_page(rx_oob->buf_va));
+ page = virt_to_head_page(rx_oob->buf_va);
+
+ if (rx_oob->from_pool)
+ page_pool_put_full_page(rxq->page_pool, page, false);
+ else
+ put_page(page);
+
rx_oob->buf_va = NULL;
}
+ page_pool_destroy(rxq->page_pool);
+
if (rxq->gdma_rq)
mana_gd_destroy_queue(gc, rxq->gdma_rq);
@@ -1928,18 +1965,20 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
struct mana_rxq *rxq, struct device *dev)
{
struct mana_port_context *mpc = netdev_priv(rxq->ndev);
+ bool from_pool = false;
dma_addr_t da;
void *va;
if (mpc->rxbufs_pre)
va = mana_get_rxbuf_pre(rxq, &da);
else
- va = mana_get_rxfrag(rxq, dev, &da, false);
+ va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
if (!va)
return -ENOMEM;
rx_oob->buf_va = va;
+ rx_oob->from_pool = from_pool;
rx_oob->sgl[0].address = da;
rx_oob->sgl[0].size = rxq->datasize;
@@ -2009,6 +2048,26 @@ static int mana_push_wqe(struct mana_rxq *rxq)
return 0;
}
+static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
+{
+ struct page_pool_params pprm = {};
+ int ret;
+
+ pprm.pool_size = RX_BUFFERS_PER_QUEUE;
+ pprm.nid = gc->numa_node;
+ pprm.napi = &rxq->rx_cq.napi;
+
+ rxq->page_pool = page_pool_create(&pprm);
+
+ if (IS_ERR(rxq->page_pool)) {
+ ret = PTR_ERR(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
u32 rxq_idx, struct mana_eq *eq,
struct net_device *ndev)
@@ -2038,6 +2097,13 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
&rxq->headroom);
+ /* Create page pool for RX queue */
+ err = mana_create_page_pool(rxq, gc);
+ if (err) {
+ netdev_err(ndev, "Create page pool err:%d\n", err);
+ goto out;
+ }
+
err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
if (err)
goto out;
@@ -2109,8 +2175,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
cq->napi.napi_id));
- WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL));
+ WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rxq->page_pool));
napi_enable(&cq->napi);
@@ -2229,6 +2295,46 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
return 0;
}
+void mana_query_gf_stats(struct mana_port_context *apc)
+{
+ struct mana_query_gf_stat_resp resp = {};
+ struct mana_query_gf_stat_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
+ sizeof(req), sizeof(resp));
+ req.req_stats = STATISTICS_FLAGS_HC_TX_BYTES |
+ STATISTICS_FLAGS_HC_TX_UCAST_PACKETS |
+ STATISTICS_FLAGS_HC_TX_UCAST_BYTES |
+ STATISTICS_FLAGS_HC_TX_MCAST_PACKETS |
+ STATISTICS_FLAGS_HC_TX_MCAST_BYTES |
+ STATISTICS_FLAGS_HC_TX_BCAST_PACKETS |
+ STATISTICS_FLAGS_HC_TX_BCAST_BYTES;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(ndev, "Failed to query GF stats: %d\n", err);
+ return;
+ }
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err,
+ resp.hdr.status);
+ return;
+ }
+
+ apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes;
+ apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
+ apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
+ apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
+ apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
+ apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
+ apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
+}
+
static int mana_init_port(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index 0dc78679f620..607150165ab4 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -13,6 +13,19 @@ static const struct {
} mana_eth_stats[] = {
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
+ {"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)},
+ {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats,
+ hc_tx_ucast_pkts)},
+ {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats,
+ hc_tx_ucast_bytes)},
+ {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats,
+ hc_tx_bcast_pkts)},
+ {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats,
+ hc_tx_bcast_bytes)},
+ {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats,
+ hc_tx_mcast_pkts)},
+ {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats,
+ hc_tx_mcast_bytes)},
{"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
{"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
tx_cqe_unknown_type)},
@@ -114,6 +127,8 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
if (!apc->port_is_up)
return;
+ /* we call mana function to update stats from GDMA */
+ mana_query_gf_stats(apc);
for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 87f2055c242c..e50be508c166 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -97,8 +97,6 @@ int ocelot_netdev_to_port(struct net_device *dev);
int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
struct device_node *portnp);
void ocelot_release_port(struct ocelot_port *ocelot_port);
-int ocelot_devlink_init(struct ocelot *ocelot);
-void ocelot_devlink_teardown(struct ocelot *ocelot);
int ocelot_port_devlink_init(struct ocelot *ocelot, int port,
enum devlink_port_flavour flavour);
void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port);
diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c
index 83a3ce0c568e..312a46832154 100644
--- a/drivers/net/ethernet/mscc/ocelot_fdma.c
+++ b/drivers/net/ethernet/mscc/ocelot_fdma.c
@@ -12,7 +12,6 @@
#include <linux/dmapool.h>
#include <linux/dsa/ocelot.h>
#include <linux/netdevice.h>
-#include <linux/of_platform.h>
#include <linux/skbuff.h>
#include "ocelot_fdma.h"
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index e0916afcddfb..33b438c6aec5 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -581,14 +581,14 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
int ret;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_META) |
- BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_VLAN) |
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_META) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
return -EOPNOTSUPP;
}
@@ -641,12 +641,12 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
* then just bail out
*/
if ((dissector->used_keys &
- (BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_CONTROL))) !=
- (BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_CONTROL)))
+ (BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL))) !=
+ (BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL)))
return -EOPNOTSUPP;
flow_rule_match_eth_addrs(rule, &match);
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.h b/drivers/net/ethernet/mscc/ocelot_vcap.h
index 523611ccc48f..6f546695faa5 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.h
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.h
@@ -15,7 +15,6 @@
int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
struct ocelot_vcap_filter *rule);
-void ocelot_detect_vcap_constants(struct ocelot *ocelot);
int ocelot_vcap_init(struct ocelot *ocelot);
int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 97e90e2869d4..151b42465348 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -10,8 +10,9 @@
#include <linux/of_net.h>
#include <linux/netdevice.h>
#include <linux/phylink.h>
+#include <linux/of.h>
#include <linux/of_mdio.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
#include <linux/skbuff.h>
#include <net/switchdev.h>
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 700c05fb05b9..61d8bfd12d5f 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5091,13 +5091,10 @@ static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
{
int i;
- u64 mac_addr = 0;
+ u64 mac_addr;
struct config_param *config = &sp->config;
- for (i = 0; i < ETH_ALEN; i++) {
- mac_addr <<= 8;
- mac_addr |= addr[i];
- }
+ mac_addr = ether_addr_to_u64(addr);
if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
return SUCCESS;
@@ -5220,7 +5217,7 @@ static int s2io_set_mac_addr(struct net_device *dev, void *p)
static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr)
{
struct s2io_nic *sp = netdev_priv(dev);
- register u64 mac_addr = 0, perm_addr = 0;
+ register u64 mac_addr, perm_addr;
int i;
u64 tmp64;
struct config_param *config = &sp->config;
@@ -5230,12 +5227,8 @@ static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr)
* change on the device address registered with the OS. It will be
* at offset 0.
*/
- for (i = 0; i < ETH_ALEN; i++) {
- mac_addr <<= 8;
- mac_addr |= addr[i];
- perm_addr <<= 8;
- perm_addr |= sp->def_mac_addr[0].mac_addr[i];
- }
+ mac_addr = ether_addr_to_u64(addr);
+ perm_addr = ether_addr_to_u64(sp->def_mac_addr[0].mac_addr);
/* check if the dev_addr is different than perm_addr */
if (mac_addr == perm_addr)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index 73032173ac4e..2643c4b3ff1f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -61,7 +61,7 @@ bool is_pre_ct_flow(struct flow_cls_offload *flow)
struct flow_match_ct ct;
int i;
- if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
+ if (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT)) {
flow_rule_match_ct(rule, &ct);
if (ct.key->ct_state)
return false;
@@ -94,7 +94,7 @@ bool is_post_ct_flow(struct flow_cls_offload *flow)
struct flow_match_ct ct;
int i;
- if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
+ if (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT)) {
flow_rule_match_ct(rule, &ct);
if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
return true;
@@ -236,10 +236,11 @@ static bool nfp_ct_merge_check_cannot_skip(struct nfp_fl_ct_flow_entry *entry1,
static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
struct nfp_fl_ct_flow_entry *entry2)
{
- unsigned int ovlp_keys = entry1->rule->match.dissector->used_keys &
- entry2->rule->match.dissector->used_keys;
+ unsigned long long ovlp_keys;
bool out, is_v6 = false;
u8 ip_proto = 0;
+ ovlp_keys = entry1->rule->match.dissector->used_keys &
+ entry2->rule->match.dissector->used_keys;
/* Temporary buffer for mangling keys, 64 is enough to cover max
* struct size of key in various fields that may be mangled.
* Supported fields to mangle:
@@ -257,7 +258,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
/* Check the overlapped fields one by one, the unmasked part
* should not conflict with each other.
*/
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) {
+ if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control match1, match2;
flow_rule_match_control(entry1->rule, &match1);
@@ -267,7 +268,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) {
+ if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match1, match2;
flow_rule_match_basic(entry1->rule, &match1);
@@ -289,7 +290,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
* will be do merge check when do nft and post ct merge,
* so skip this ip merge check here.
*/
- if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) &&
+ if ((ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) &&
nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
struct flow_match_ipv4_addrs match1, match2;
@@ -311,7 +312,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
* will be do merge check when do nft and post ct merge,
* so skip this ip merge check here.
*/
- if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) &&
+ if ((ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) &&
nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
struct flow_match_ipv6_addrs match1, match2;
@@ -333,7 +334,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
* will be do merge check when do nft and post ct merge,
* so skip this tport merge check here.
*/
- if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) &&
+ if ((ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_PORTS)) &&
nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
enum flow_action_mangle_base htype = FLOW_ACT_MANGLE_UNSPEC;
struct flow_match_ports match1, match2;
@@ -355,7 +356,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match1, match2;
flow_rule_match_eth_addrs(entry1->rule, &match1);
@@ -371,7 +372,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_VLAN)) {
+ if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match1, match2;
flow_rule_match_vlan(entry1->rule, &match1);
@@ -381,7 +382,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_MPLS)) {
+ if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_MPLS)) {
struct flow_match_mpls match1, match2;
flow_rule_match_mpls(entry1->rule, &match1);
@@ -391,7 +392,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_TCP)) {
+ if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_TCP)) {
struct flow_match_tcp match1, match2;
flow_rule_match_tcp(entry1->rule, &match1);
@@ -401,7 +402,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IP)) {
+ if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_IP)) {
struct flow_match_ip match1, match2;
flow_rule_match_ip(entry1->rule, &match1);
@@ -413,7 +414,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID)) {
struct flow_match_enc_keyid match1, match2;
flow_rule_match_enc_keyid(entry1->rule, &match1);
@@ -423,7 +424,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match1, match2;
flow_rule_match_enc_ipv4_addrs(entry1->rule, &match1);
@@ -433,7 +434,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
+ if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
struct flow_match_ipv6_addrs match1, match2;
flow_rule_match_enc_ipv6_addrs(entry1->rule, &match1);
@@ -443,7 +444,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
struct flow_match_control match1, match2;
flow_rule_match_enc_control(entry1->rule, &match1);
@@ -453,7 +454,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IP)) {
+ if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP)) {
struct flow_match_ip match1, match2;
flow_rule_match_enc_ip(entry1->rule, &match1);
@@ -463,7 +464,7 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS)) {
struct flow_match_enc_opts match1, match2;
flow_rule_match_enc_opts(entry1->rule, &match1);
@@ -589,7 +590,7 @@ static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry,
int i;
ct_met = get_flow_act(nft_entry->rule, FLOW_ACTION_CT_METADATA);
- if (ct_met && (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))) {
+ if (ct_met && (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT))) {
u32 *act_lbl;
act_lbl = ct_met->ct_metadata.labels;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 18328eb7f5c3..c153f0575b92 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -24,43 +24,43 @@
FLOW_DIS_FIRST_FRAG)
#define NFP_FLOWER_WHITELIST_DISSECTOR \
- (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
- BIT(FLOW_DISSECTOR_KEY_BASIC) | \
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
- BIT(FLOW_DISSECTOR_KEY_TCP) | \
- BIT(FLOW_DISSECTOR_KEY_PORTS) | \
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
- BIT(FLOW_DISSECTOR_KEY_VLAN) | \
- BIT(FLOW_DISSECTOR_KEY_CVLAN) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
- BIT(FLOW_DISSECTOR_KEY_MPLS) | \
- BIT(FLOW_DISSECTOR_KEY_CT) | \
- BIT(FLOW_DISSECTOR_KEY_META) | \
- BIT(FLOW_DISSECTOR_KEY_IP))
+ (BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_CT) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_META) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_IP))
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
- (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_IP))
+ (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP))
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
- (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
+ (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \
- (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS))
+ (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS))
#define NFP_FLOWER_MERGE_FIELDS \
(NFP_FLOWER_LAYER_PORT | \
@@ -1303,7 +1303,7 @@ static bool offload_pre_check(struct flow_cls_offload *flow)
struct flow_dissector *dissector = rule->match.dissector;
struct flow_match_ct ct;
- if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
+ if (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT)) {
flow_rule_match_ct(rule, &ct);
/* Allow special case where CT match is all 0 */
if (memchr_inv(ct.key, 0, sizeof(*ct.key)))
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 6b1fb5708434..de0a5d5ded30 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -924,7 +924,7 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
*/
static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
{
- u32 new_ctrl, update;
+ u32 new_ctrl, new_ctrl_w1, update;
unsigned int r;
int err;
@@ -937,14 +937,29 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
- nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
- nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN)) {
+ nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
+ nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
+ }
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
err = nfp_net_reconfig(nn, update);
if (err)
nn_err(nn, "Could not disable device: %d\n", err);
+ if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN) {
+ new_ctrl_w1 = nn->dp.ctrl_w1;
+ new_ctrl_w1 &= ~NFP_NET_CFG_CTRL_FREELIST_EN;
+ nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
+ nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
+
+ nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
+ err = nfp_net_reconfig(nn, update);
+ if (err)
+ nn_err(nn, "Could not disable FREELIST_EN: %d\n", err);
+ nn->dp.ctrl_w1 = new_ctrl_w1;
+ }
+
for (r = 0; r < nn->dp.num_rx_rings; r++) {
nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
if (nfp_net_has_xsk_pool_slow(&nn->dp, nn->dp.rx_rings[r].idx))
@@ -964,11 +979,12 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
*/
static int nfp_net_set_config_and_enable(struct nfp_net *nn)
{
- u32 bufsz, new_ctrl, update = 0;
+ u32 bufsz, new_ctrl, new_ctrl_w1, update = 0;
unsigned int r;
int err;
new_ctrl = nn->dp.ctrl;
+ new_ctrl_w1 = nn->dp.ctrl_w1;
if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) {
nfp_net_rss_write_key(nn);
@@ -1001,16 +1017,25 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
- /* Enable device */
- new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
+ /* Enable device
+ * Step 1: Replace the CTRL_ENABLE by NFP_NET_CFG_CTRL_FREELIST_EN if
+ * FREELIST_EN exits.
+ */
+ if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN)
+ new_ctrl_w1 |= NFP_NET_CFG_CTRL_FREELIST_EN;
+ else
+ new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
update |= NFP_NET_CFG_UPDATE_GEN;
update |= NFP_NET_CFG_UPDATE_MSIX;
update |= NFP_NET_CFG_UPDATE_RING;
if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
+ /* Step 2: Send the configuration and write the freelist.
+ * - The freelist only need to be written once.
+ */
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
- nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, nn->dp.ctrl_w1);
+ nn_writel(nn, NFP_NET_CFG_CTRL_WORD1, new_ctrl_w1);
err = nfp_net_reconfig(nn, update);
if (err) {
nfp_net_clear_config_and_disable(nn);
@@ -1018,10 +1043,25 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
}
nn->dp.ctrl = new_ctrl;
+ nn->dp.ctrl_w1 = new_ctrl_w1;
for (r = 0; r < nn->dp.num_rx_rings; r++)
nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
+ /* Step 3: Do the NFP_NET_CFG_CTRL_ENABLE. Send the configuration.
+ */
+ if (nn->cap_w1 & NFP_NET_CFG_CTRL_FREELIST_EN) {
+ new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
+ nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
+
+ err = nfp_net_reconfig(nn, update);
+ if (err) {
+ nfp_net_clear_config_and_disable(nn);
+ return err;
+ }
+ nn->dp.ctrl = new_ctrl;
+ }
+
return 0;
}
@@ -2068,9 +2108,6 @@ static int nfp_net_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue;
- if (nla_len(attr) < sizeof(mode))
- return -EINVAL;
-
new_ctrl = nn->dp.ctrl;
mode = nla_get_u16(attr);
if (mode == BRIDGE_MODE_VEPA)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 669b9dccb6a9..3e63f6d6a563 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -268,6 +268,7 @@
#define NFP_NET_CFG_CTRL_PKT_TYPE (0x1 << 0) /* Pkttype offload */
#define NFP_NET_CFG_CTRL_IPSEC (0x1 << 1) /* IPsec offload */
#define NFP_NET_CFG_CTRL_MCAST_FILTER (0x1 << 2) /* Multicast Filter */
+#define NFP_NET_CFG_CTRL_FREELIST_EN (0x1 << 6) /* Freelist enable flag bit */
#define NFP_NET_CFG_CAP_WORD1 0x00a4
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 0fd156286d4d..ba27bbc68f85 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -7,11 +7,10 @@
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <linux/phy.h>
#include <linux/mii.h>
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 602f4d45d529..2453a40f6ee8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -81,7 +81,6 @@ int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_wait);
void ionic_dev_cmd_dev_err_print(struct ionic *ionic, u8 opcode, u8 status,
int err);
-int ionic_set_dma_mask(struct ionic *ionic);
int ionic_setup(struct ionic *ionic);
int ionic_identify(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index ab7d217b98b3..d6ce113a4210 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -213,29 +213,18 @@ out:
return ret;
}
-static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static void ionic_clear_pci(struct ionic *ionic)
{
- struct device *dev = &pdev->dev;
- struct ionic *ionic;
- int num_vfs;
- int err;
-
- ionic = ionic_devlink_alloc(dev);
- if (!ionic)
- return -ENOMEM;
-
- ionic->pdev = pdev;
- ionic->dev = dev;
- pci_set_drvdata(pdev, ionic);
- mutex_init(&ionic->dev_cmd_lock);
+ ionic_unmap_bars(ionic);
+ pci_release_regions(ionic->pdev);
+ pci_disable_device(ionic->pdev);
+}
- /* Query system for DMA addressing limitation for the device. */
- err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(IONIC_ADDR_LEN));
- if (err) {
- dev_err(dev, "Unable to obtain 64-bit DMA for consistent allocations, aborting. err=%d\n",
- err);
- goto err_out_clear_drvdata;
- }
+static int ionic_setup_one(struct ionic *ionic)
+{
+ struct pci_dev *pdev = ionic->pdev;
+ struct device *dev = ionic->dev;
+ int err;
ionic_debugfs_add_dev(ionic);
@@ -249,20 +238,19 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_request_regions(pdev, IONIC_DRV_NAME);
if (err) {
dev_err(dev, "Cannot request PCI regions: %d, aborting\n", err);
- goto err_out_pci_disable_device;
+ goto err_out_clear_pci;
}
-
pcie_print_link_status(pdev);
err = ionic_map_bars(ionic);
if (err)
- goto err_out_pci_release_regions;
+ goto err_out_clear_pci;
/* Configure the device */
err = ionic_setup(ionic);
if (err) {
dev_err(dev, "Cannot setup device: %d, aborting\n", err);
- goto err_out_unmap_bars;
+ goto err_out_clear_pci;
}
pci_set_master(pdev);
@@ -279,24 +267,64 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_teardown;
}
- /* Configure the ports */
+ /* Configure the port */
err = ionic_port_identify(ionic);
if (err) {
dev_err(dev, "Cannot identify port: %d, aborting\n", err);
- goto err_out_reset;
+ goto err_out_teardown;
}
err = ionic_port_init(ionic);
if (err) {
dev_err(dev, "Cannot init port: %d, aborting\n", err);
- goto err_out_reset;
+ goto err_out_teardown;
+ }
+
+ return 0;
+
+err_out_teardown:
+ ionic_dev_teardown(ionic);
+err_out_clear_pci:
+ ionic_clear_pci(ionic);
+err_out_debugfs_del_dev:
+ ionic_debugfs_del_dev(ionic);
+
+ return err;
+}
+
+static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct ionic *ionic;
+ int num_vfs;
+ int err;
+
+ ionic = ionic_devlink_alloc(dev);
+ if (!ionic)
+ return -ENOMEM;
+
+ ionic->pdev = pdev;
+ ionic->dev = dev;
+ pci_set_drvdata(pdev, ionic);
+ mutex_init(&ionic->dev_cmd_lock);
+
+ /* Query system for DMA addressing limitation for the device. */
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(IONIC_ADDR_LEN));
+ if (err) {
+ dev_err(dev, "Unable to obtain 64-bit DMA for consistent allocations, aborting. err=%d\n",
+ err);
+ goto err_out;
}
+ err = ionic_setup_one(ionic);
+ if (err)
+ goto err_out;
+
/* Allocate and init the LIF */
err = ionic_lif_size(ionic);
if (err) {
dev_err(dev, "Cannot size LIF: %d, aborting\n", err);
- goto err_out_port_reset;
+ goto err_out_pci;
}
err = ionic_lif_alloc(ionic);
@@ -347,21 +375,10 @@ err_out_free_lifs:
ionic->lif = NULL;
err_out_free_irqs:
ionic_bus_free_irq_vectors(ionic);
-err_out_port_reset:
- ionic_port_reset(ionic);
-err_out_reset:
- ionic_reset(ionic);
-err_out_teardown:
+err_out_pci:
ionic_dev_teardown(ionic);
-err_out_unmap_bars:
- ionic_unmap_bars(ionic);
-err_out_pci_release_regions:
- pci_release_regions(pdev);
-err_out_pci_disable_device:
- pci_disable_device(pdev);
-err_out_debugfs_del_dev:
- ionic_debugfs_del_dev(ionic);
-err_out_clear_drvdata:
+ ionic_clear_pci(ionic);
+err_out:
mutex_destroy(&ionic->dev_cmd_lock);
ionic_devlink_free(ionic);
@@ -386,20 +403,71 @@ static void ionic_remove(struct pci_dev *pdev)
ionic_port_reset(ionic);
ionic_reset(ionic);
ionic_dev_teardown(ionic);
- ionic_unmap_bars(ionic);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
+ ionic_clear_pci(ionic);
ionic_debugfs_del_dev(ionic);
mutex_destroy(&ionic->dev_cmd_lock);
ionic_devlink_free(ionic);
}
+static void ionic_reset_prepare(struct pci_dev *pdev)
+{
+ struct ionic *ionic = pci_get_drvdata(pdev);
+ struct ionic_lif *lif = ionic->lif;
+
+ dev_dbg(ionic->dev, "%s: device stopping\n", __func__);
+
+ del_timer_sync(&ionic->watchdog_timer);
+ cancel_work_sync(&lif->deferred.work);
+
+ mutex_lock(&lif->queue_lock);
+ ionic_stop_queues_reconfig(lif);
+ ionic_txrx_free(lif);
+ ionic_lif_deinit(lif);
+ ionic_qcqs_free(lif);
+ mutex_unlock(&lif->queue_lock);
+
+ ionic_dev_teardown(ionic);
+ ionic_clear_pci(ionic);
+ ionic_debugfs_del_dev(ionic);
+}
+
+static void ionic_reset_done(struct pci_dev *pdev)
+{
+ struct ionic *ionic = pci_get_drvdata(pdev);
+ struct ionic_lif *lif = ionic->lif;
+ int err;
+
+ err = ionic_setup_one(ionic);
+ if (err)
+ goto err_out;
+
+ ionic_debugfs_add_sizes(ionic);
+ ionic_debugfs_add_lif(ionic->lif);
+
+ err = ionic_restart_lif(lif);
+ if (err)
+ goto err_out;
+
+ mod_timer(&ionic->watchdog_timer, jiffies + 1);
+
+err_out:
+ dev_dbg(ionic->dev, "%s: device recovery %s\n",
+ __func__, err ? "failed" : "done");
+}
+
+static const struct pci_error_handlers ionic_err_handler = {
+ /* FLR handling */
+ .reset_prepare = ionic_reset_prepare,
+ .reset_done = ionic_reset_done,
+};
+
static struct pci_driver ionic_driver = {
.name = IONIC_DRV_NAME,
.id_table = ionic_id_table,
.probe = ionic_probe,
.remove = ionic_remove,
.sriov_configure = ionic_sriov_configure,
+ .err_handler = &ionic_err_handler
};
int ionic_bus_register_driver(void)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 0bea208bfba2..6aac98bcb9f4 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -376,7 +376,6 @@ void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_
void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void *cb_arg);
-void ionic_q_rewind(struct ionic_queue *q, struct ionic_desc_info *start);
void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
unsigned int stop_index);
int ionic_heartbeat_check(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 432fb93aa801..2c3e36b2dd7f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -434,7 +434,7 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
}
}
-static void ionic_qcqs_free(struct ionic_lif *lif)
+void ionic_qcqs_free(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
struct ionic_qcq *adminqcq;
@@ -1754,7 +1754,7 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
return ionic_lif_addr_add(netdev_priv(netdev), mac);
}
-static void ionic_stop_queues_reconfig(struct ionic_lif *lif)
+void ionic_stop_queues_reconfig(struct ionic_lif *lif)
{
/* Stop and clean the queues before reconfiguration */
netif_device_detach(lif->netdev);
@@ -2013,7 +2013,7 @@ static void ionic_txrx_deinit(struct ionic_lif *lif)
}
}
-static void ionic_txrx_free(struct ionic_lif *lif)
+void ionic_txrx_free(struct ionic_lif *lif)
{
unsigned int i;
@@ -3275,27 +3275,11 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
dev_info(ionic->dev, "FW Down: LIFs stopped\n");
}
-static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
+int ionic_restart_lif(struct ionic_lif *lif)
{
struct ionic *ionic = lif->ionic;
int err;
- if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
- return;
-
- dev_info(ionic->dev, "FW Up: restarting LIFs\n");
-
- ionic_init_devinfo(ionic);
- err = ionic_identify(ionic);
- if (err)
- goto err_out;
- err = ionic_port_identify(ionic);
- if (err)
- goto err_out;
- err = ionic_port_init(ionic);
- if (err)
- goto err_out;
-
mutex_lock(&lif->queue_lock);
if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
@@ -3331,12 +3315,8 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
ionic_link_status_check_request(lif, CAN_SLEEP);
netif_device_attach(lif->netdev);
- dev_info(ionic->dev, "FW Up: LIFs restarted\n");
-
- /* restore the hardware timestamping queues */
- ionic_lif_hwstamp_replay(lif);
- return;
+ return 0;
err_txrx_free:
ionic_txrx_free(lif);
@@ -3346,6 +3326,46 @@ err_qcqs_free:
ionic_qcqs_free(lif);
err_unlock:
mutex_unlock(&lif->queue_lock);
+
+ return err;
+}
+
+static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
+{
+ struct ionic *ionic = lif->ionic;
+ int err;
+
+ if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return;
+
+ dev_info(ionic->dev, "FW Up: restarting LIFs\n");
+
+ /* This is a little different from what happens at
+ * probe time because the LIF already exists so we
+ * just need to reanimate it.
+ */
+ ionic_init_devinfo(ionic);
+ err = ionic_identify(ionic);
+ if (err)
+ goto err_out;
+ err = ionic_port_identify(ionic);
+ if (err)
+ goto err_out;
+ err = ionic_port_init(ionic);
+ if (err)
+ goto err_out;
+
+ err = ionic_restart_lif(lif);
+ if (err)
+ goto err_out;
+
+ dev_info(ionic->dev, "FW Up: LIFs restarted\n");
+
+ /* restore the hardware timestamping queues */
+ ionic_lif_hwstamp_replay(lif);
+
+ return;
+
err_out:
dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index fd2ea670e7d8..457c24195ca6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -325,6 +325,11 @@ void ionic_lif_deinit(struct ionic_lif *lif);
int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
+void ionic_stop_queues_reconfig(struct ionic_lif *lif);
+void ionic_txrx_free(struct ionic_lif *lif);
+void ionic_qcqs_free(struct ionic_lif *lif);
+int ionic_restart_lif(struct ionic_lif *lif);
+
int ionic_lif_register(struct ionic_lif *lif);
void ionic_lif_unregister(struct ionic_lif *lif);
int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
index 87b2666f248b..ee9e99cd1b5e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
@@ -43,7 +43,6 @@ struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, const u8
struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif);
void ionic_rx_filter_sync(struct ionic_lif *lif);
int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode);
-int ionic_rx_filters_need_sync(struct ionic_lif *lif);
int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid);
int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index d613095b78e0..1d719726f72b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -909,7 +909,6 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
u32 min_pf_rate);
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-int qed_device_num_engines(struct qed_dev *cdev);
void qed_set_fw_mac_addr(__le16 *fw_msb,
__le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 7b0e390c0b07..0e265ed1f501 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -60,7 +60,7 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10
#define QED_VF_CHANNEL_MSLEEP_DELAY 25
-static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
+static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done)
{
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
struct ustorm_trigger_vf_zone trigger;
@@ -72,9 +72,6 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
/* output tlvs list */
qed_dp_tlv_list(p_hwfn, p_req);
- /* need to add the END TLV to the message size */
- resp_size += sizeof(struct channel_list_end_tlv);
-
/* Send TLVs over HW channel */
memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
trigger.vf_pf_msg_valid = 1;
@@ -172,7 +169,7 @@ static int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final)
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
rc = -EAGAIN;
@@ -301,7 +298,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
/* send acquire request */
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
/* Re-try acquire in case of vf-pf hw channel timeout */
if (retry_cnt && rc == -EBUSY) {
@@ -705,7 +702,7 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
sizeof(struct channel_list_end_tlv));
p_resp = &p_iov->pf2vf_reply->tunn_param_resp;
- rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+ rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status);
if (rc)
goto exit;
@@ -772,7 +769,7 @@ qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->queue_start;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -822,7 +819,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -867,7 +864,7 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->queue_start;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -918,7 +915,7 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -968,7 +965,7 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -997,7 +994,7 @@ int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -1075,12 +1072,10 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
struct vfpf_vport_update_tlv *req;
struct pfvf_def_resp_tlv *resp;
u8 update_rx, update_tx;
- u32 resp_size = 0;
u16 size, tlv;
int rc;
resp = &p_iov->pf2vf_reply->default_resp;
- resp_size = sizeof(*resp);
update_rx = p_params->update_vport_active_rx_flg;
update_tx = p_params->update_vport_active_tx_flg;
@@ -1096,7 +1091,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
size);
- resp_size += sizeof(struct pfvf_def_resp_tlv);
if (update_rx) {
p_act_tlv->update_rx = update_rx;
@@ -1116,7 +1110,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
tlv, size);
- resp_size += sizeof(struct pfvf_def_resp_tlv);
p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
}
@@ -1127,7 +1120,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_MCAST, size);
- resp_size += sizeof(struct pfvf_def_resp_tlv);
memcpy(p_mcast_tlv->bins, p_params->bins,
sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
@@ -1142,7 +1134,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
size = sizeof(struct vfpf_vport_update_accept_param_tlv);
p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
- resp_size += sizeof(struct pfvf_def_resp_tlv);
if (update_rx) {
p_accept_tlv->update_rx_mode = update_rx;
@@ -1166,7 +1157,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
p_rss_tlv = qed_add_tlv(p_hwfn,
&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_RSS, size);
- resp_size += sizeof(struct pfvf_def_resp_tlv);
if (rss_params->update_rss_config)
p_rss_tlv->update_rss_flags |=
@@ -1203,7 +1193,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
- resp_size += sizeof(struct pfvf_def_resp_tlv);
p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
p_any_vlan_tlv->update_accept_any_vlan_flg =
p_params->update_accept_any_vlan_flg;
@@ -1213,7 +1202,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -1245,7 +1234,7 @@ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -1303,7 +1292,7 @@ int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -1332,7 +1321,7 @@ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -1364,7 +1353,7 @@ int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->read_coal_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -1402,7 +1391,7 @@ qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
sizeof(struct channel_list_end_tlv));
p_resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+ rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status);
qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -1433,7 +1422,7 @@ qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 4d83ceebdc49..042a75f34060 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -556,9 +556,6 @@ void qede_config_rx_mode(struct net_device *ndev);
void qede_fill_rss_params(struct qede_dev *edev,
struct qed_update_vport_rss_params *rss, u8 *update);
-void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti);
-void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti);
-
int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp);
#ifdef CONFIG_DCB
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 3010833ddde3..a5ac21a0ee33 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1827,12 +1827,12 @@ qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
memset(tuple, 0, sizeof(*tuple));
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_PORTS))) {
- DP_NOTICE(edev, "Unsupported key set:0x%x\n",
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) {
+ DP_NOTICE(edev, "Unsupported key set:0x%llx\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index 802ef81493e0..e4bc18009d08 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -8,7 +8,9 @@
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/acpi.h>
+#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include "emac.h"
#include "emac-mac.h"
#include "emac-sgmii.h"
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index eaa50050aa0b..19bb16daf4e7 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_net.h>
-#include <linux/of_device.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 4a1b94e5a8ea..bec723028e96 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -35,7 +35,6 @@
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 26646cb6a20a..9adec91f35e9 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -32,7 +32,6 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/sched.h>
#include <linux/serdev.h>
@@ -404,7 +403,7 @@ static struct serdev_device_driver qca_uart_driver = {
.remove = qca_uart_remove,
.driver = {
.name = QCAUART_DRV_NAME,
- .of_match_table = of_match_ptr(qca_uart_of_match),
+ .of_match_table = qca_uart_of_match,
},
};
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 4d6b3b7d6abb..7df9f9f8e134 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -21,10 +21,9 @@
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 4e412ac0965a..6083b1c8e4fb 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -12,15 +12,15 @@
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
#include "rswitch.h"
@@ -1244,7 +1244,6 @@ static void rswitch_adjust_link(struct net_device *ndev)
struct rswitch_device *rdev = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
- /* Current hardware has a restriction not to change speed at runtime */
if (phydev->link != rdev->etha->link) {
phy_print_status(phydev);
if (phydev->link)
@@ -1253,13 +1252,23 @@ static void rswitch_adjust_link(struct net_device *ndev)
phy_power_off(rdev->serdes);
rdev->etha->link = phydev->link;
+
+ if (!rdev->priv->etha_no_runtime_change &&
+ phydev->speed != rdev->etha->speed) {
+ rdev->etha->speed = phydev->speed;
+
+ rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
+ phy_set_speed(rdev->serdes, rdev->etha->speed);
+ }
}
}
static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev,
struct phy_device *phydev)
{
- /* Current hardware has a restriction not to change speed at runtime */
+ if (!rdev->priv->etha_no_runtime_change)
+ return;
+
switch (rdev->etha->speed) {
case SPEED_2500:
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
@@ -1348,7 +1357,8 @@ static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
if (err < 0)
return err;
- rdev->etha->operated = true;
+ if (rdev->priv->etha_no_runtime_change)
+ rdev->etha->operated = true;
}
err = rswitch_mii_register(rdev);
@@ -1654,6 +1664,8 @@ static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *
static const struct ethtool_ops rswitch_ethtool_ops = {
.get_ts_info = rswitch_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct of_device_id renesas_eth_sw_of_table[] = {
@@ -1854,8 +1866,14 @@ err_ts_queue_alloc:
return err;
}
+static const struct soc_device_attribute rswitch_soc_no_speed_change[] = {
+ { .soc_id = "r8a779f0", .revision = "ES1.0" },
+ { /* Sentinel */ }
+};
+
static int renesas_eth_sw_probe(struct platform_device *pdev)
{
+ const struct soc_device_attribute *attr;
struct rswitch_private *priv;
struct resource *res;
int ret;
@@ -1870,6 +1888,10 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ attr = soc_device_match(rswitch_soc_no_speed_change);
+ if (attr)
+ priv->etha_no_runtime_change = true;
+
priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
if (!priv->ptp_priv)
return -ENOMEM;
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index bb9ed971a97c..54f397effbc6 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -1011,6 +1011,7 @@ struct rswitch_private {
struct rswitch_etha etha[RSWITCH_NUM_PORTS];
struct rswitch_mfwd mfwd;
+ bool etha_no_runtime_change;
bool gwca_halt;
};
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index d8ec729825be..274ea16c0a1f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -19,8 +19,6 @@
#include <linux/mdio-bitbang.h>
#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/cache.h>
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 16293b58e0a8..8f446b9bd5ee 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -11,7 +11,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
mae.o tc.o tc_bindings.o tc_counters.o \
- tc_encap_actions.o
+ tc_encap_actions.o tc_conntrack.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index 1f981dfe4bdc..89665fc9b8d0 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -26,6 +26,8 @@
/* Lowest bit numbers and widths */
#define EFX_DUMMY_FIELD_LBN 0
#define EFX_DUMMY_FIELD_WIDTH 0
+#define EFX_BYTE_0_LBN 0
+#define EFX_BYTE_0_WIDTH 8
#define EFX_WORD_0_LBN 0
#define EFX_WORD_0_WIDTH 16
#define EFX_WORD_1_LBN 16
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 8c019f382a7f..6dfa062feebc 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -2209,7 +2209,7 @@ static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
/* low two bits of label are what we want for type */
BUILD_BUG_ON((EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM) != 3);
tx_queue->type = tx_queue->label & 3;
- return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
+ return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd,
(tx_queue->ptr_mask + 1) *
sizeof(efx_qword_t),
GFP_KERNEL);
@@ -4267,8 +4267,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.sriov_init = efx_ef10_sriov_init,
.sriov_fini = efx_ef10_sriov_fini,
.sriov_wanted = efx_ef10_sriov_wanted,
- .sriov_reset = efx_ef10_sriov_reset,
- .sriov_flr = efx_ef10_sriov_flr,
.sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
.sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
.sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 35d8e9811998..6da06931187d 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -224,7 +224,7 @@ int efx_ef100_init_datapath_caps(struct efx_nic *efx)
static int ef100_ev_probe(struct efx_channel *channel)
{
/* Allocate an extra descriptor for the QMDA status completion entry */
- return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
+ return efx_nic_alloc_buffer(channel->efx, &channel->eventq,
(channel->eventq_mask + 2) *
sizeof(efx_qword_t),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
index 849e5555bd12..e6b6be549581 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.c
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -23,7 +23,7 @@
int ef100_tx_probe(struct efx_tx_queue *tx_queue)
{
/* Allocate an extra descriptor for the QMDA status completion entry */
- return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
+ return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd,
(tx_queue->ptr_mask + 2) *
sizeof(efx_oword_t),
GFP_KERNEL);
@@ -101,8 +101,8 @@ static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
static efx_oword_t *ef100_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
{
- if (likely(tx_queue->txd.buf.addr))
- return ((efx_oword_t *)tx_queue->txd.buf.addr) + index;
+ if (likely(tx_queue->txd.addr))
+ return ((efx_oword_t *)tx_queue->txd.addr) + index;
else
return NULL;
}
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
index 3c703ca878b0..be419c9c5dec 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.h
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -35,9 +35,7 @@ static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx)
int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs);
int efx_ef10_sriov_init(struct efx_nic *efx);
-static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
void efx_ef10_sriov_fini(struct efx_nic *efx);
-static inline void efx_ef10_sriov_flr(struct efx_nic *efx, unsigned vf_i) {}
int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, const u8 *mac);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index d670a319b379..19f4b4d0b851 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -605,7 +605,6 @@ static const struct net_device_ops efx_netdev_ops = {
#endif
.ndo_get_phys_port_id = efx_get_phys_port_id,
.ndo_get_phys_port_name = efx_get_phys_port_name,
- .ndo_setup_tc = efx_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
#endif
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 4239c7ece123..48d3623735ba 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -30,8 +30,6 @@ static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct
tx_queue, skb);
}
void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
-int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
- void *type_data);
extern unsigned int efx_piobuf_size;
/* RX */
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 41b33a75333c..8d2d7ea2ebef 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -713,9 +713,6 @@ int efx_probe_channels(struct efx_nic *efx)
struct efx_channel *channel;
int rc;
- /* Restart special buffer allocation */
- efx->next_buffer_table = 0;
-
/* Probe channels in reverse, so that any 'extra' channels
* use the start of the buffer table. This allows the traffic
* channels to be resized without moving them or wasting the
@@ -849,36 +846,14 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel,
*ptp_channel = efx_ptp_channel(efx);
struct efx_ptp_data *ptp_data = efx->ptp_data;
- unsigned int i, next_buffer_table = 0;
u32 old_rxq_entries, old_txq_entries;
+ unsigned int i;
int rc, rc2;
rc = efx_check_disabled(efx);
if (rc)
return rc;
- /* Not all channels should be reallocated. We must avoid
- * reallocating their buffer table entries.
- */
- efx_for_each_channel(channel, efx) {
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
-
- if (channel->type->copy)
- continue;
- next_buffer_table = max(next_buffer_table,
- channel->eventq.index +
- channel->eventq.entries);
- efx_for_each_channel_rx_queue(rx_queue, channel)
- next_buffer_table = max(next_buffer_table,
- rx_queue->rxd.index +
- rx_queue->rxd.entries);
- efx_for_each_channel_tx_queue(tx_queue, channel)
- next_buffer_table = max(next_buffer_table,
- tx_queue->txd.index +
- tx_queue->txd.entries);
- }
-
efx_device_detach_sync(efx);
efx_stop_all(efx);
efx_soft_disable_interrupts(efx);
@@ -904,9 +879,6 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
for (i = 0; i < efx->n_channels; i++)
swap(efx->channel[i], other_channel[i]);
- /* Restart buffer table allocation */
- efx->next_buffer_table = next_buffer_table;
-
for (i = 0; i < efx->n_channels; i++) {
channel = efx->channel[i];
if (!channel->type->copy)
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 361687de308d..175bd9cdfdac 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -35,11 +35,6 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
/* This is the time (in jiffies) between invocations of the hardware
* monitor.
- * On Falcon-based NICs, this will:
- * - Check the on-board hardware monitor;
- * - Poll the link state and reconfigure the hardware as necessary.
- * On Siena-based NICs for power systems with EEH support, this will give EEH a
- * chance to start.
*/
static unsigned int efx_monitor_interval = 1 * HZ;
@@ -785,8 +780,6 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
mutex_unlock(&efx->rss_lock);
efx->type->filter_table_restore(efx);
up_write(&efx->filter_sem);
- if (efx->type->sriov_reset)
- efx->type->sriov_reset(efx);
mutex_unlock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/farch_regs.h b/drivers/net/ethernet/sfc/farch_regs.h
deleted file mode 100644
index d138be423e63..000000000000
--- a/drivers/net/ethernet/sfc/farch_regs.h
+++ /dev/null
@@ -1,2929 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/****************************************************************************
- * Driver for Solarflare network controllers and boards
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2012 Solarflare Communications Inc.
- */
-
-#ifndef EFX_FARCH_REGS_H
-#define EFX_FARCH_REGS_H
-
-/*
- * Falcon hardware architecture definitions have a name prefix following
- * the format:
- *
- * F<type>_<min-rev><max-rev>_
- *
- * The following <type> strings are used:
- *
- * MMIO register MC register Host memory structure
- * -------------------------------------------------------------
- * Address R MCR
- * Bitfield RF MCRF SF
- * Enumerator FE MCFE SE
- *
- * <min-rev> is the first revision to which the definition applies:
- *
- * A: Falcon A1 (SFC4000AB)
- * B: Falcon B0 (SFC4000BA)
- * C: Siena A0 (SFL9021AA)
- *
- * If the definition has been changed or removed in later revisions
- * then <max-rev> is the last revision to which the definition applies;
- * otherwise it is "Z".
- */
-
-/**************************************************************************
- *
- * Falcon/Siena registers and descriptors
- *
- **************************************************************************
- */
-
-/* ADR_REGION_REG: Address region register */
-#define FR_AZ_ADR_REGION 0x00000000
-#define FRF_AZ_ADR_REGION3_LBN 96
-#define FRF_AZ_ADR_REGION3_WIDTH 18
-#define FRF_AZ_ADR_REGION2_LBN 64
-#define FRF_AZ_ADR_REGION2_WIDTH 18
-#define FRF_AZ_ADR_REGION1_LBN 32
-#define FRF_AZ_ADR_REGION1_WIDTH 18
-#define FRF_AZ_ADR_REGION0_LBN 0
-#define FRF_AZ_ADR_REGION0_WIDTH 18
-
-/* INT_EN_REG_KER: Kernel driver Interrupt enable register */
-#define FR_AZ_INT_EN_KER 0x00000010
-#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8
-#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
-#define FRF_AZ_KER_INT_CHAR_LBN 4
-#define FRF_AZ_KER_INT_CHAR_WIDTH 1
-#define FRF_AZ_KER_INT_KER_LBN 3
-#define FRF_AZ_KER_INT_KER_WIDTH 1
-#define FRF_AZ_DRV_INT_EN_KER_LBN 0
-#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1
-
-/* INT_EN_REG_CHAR: Char Driver interrupt enable register */
-#define FR_BZ_INT_EN_CHAR 0x00000020
-#define FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8
-#define FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6
-#define FRF_BZ_CHAR_INT_CHAR_LBN 4
-#define FRF_BZ_CHAR_INT_CHAR_WIDTH 1
-#define FRF_BZ_CHAR_INT_KER_LBN 3
-#define FRF_BZ_CHAR_INT_KER_WIDTH 1
-#define FRF_BZ_DRV_INT_EN_CHAR_LBN 0
-#define FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1
-
-/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */
-#define FR_AZ_INT_ADR_KER 0x00000030
-#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
-#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
-#define FRF_AZ_INT_ADR_KER_LBN 0
-#define FRF_AZ_INT_ADR_KER_WIDTH 64
-
-/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */
-#define FR_BZ_INT_ADR_CHAR 0x00000040
-#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64
-#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
-#define FRF_BZ_INT_ADR_CHAR_LBN 0
-#define FRF_BZ_INT_ADR_CHAR_WIDTH 64
-
-/* INT_ACK_KER: Kernel interrupt acknowledge register */
-#define FR_AA_INT_ACK_KER 0x00000050
-#define FRF_AA_INT_ACK_KER_FIELD_LBN 0
-#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
-
-/* INT_ISR0_REG: Function 0 Interrupt Acknowledge Status register */
-#define FR_BZ_INT_ISR0 0x00000090
-#define FRF_BZ_INT_ISR_REG_LBN 0
-#define FRF_BZ_INT_ISR_REG_WIDTH 64
-
-/* HW_INIT_REG: Hardware initialization register */
-#define FR_AZ_HW_INIT 0x000000c0
-#define FRF_BB_BDMRD_CPLF_FULL_LBN 124
-#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
-#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
-#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
-#define FRF_CZ_TX_MRG_TAGS_LBN 120
-#define FRF_CZ_TX_MRG_TAGS_WIDTH 1
-#define FRF_AB_TRGT_MASK_ALL_LBN 100
-#define FRF_AB_TRGT_MASK_ALL_WIDTH 1
-#define FRF_AZ_DOORBELL_DROP_LBN 92
-#define FRF_AZ_DOORBELL_DROP_WIDTH 8
-#define FRF_AB_TX_RREQ_MASK_EN_LBN 76
-#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
-#define FRF_AB_PE_EIDLE_DIS_LBN 75
-#define FRF_AB_PE_EIDLE_DIS_WIDTH 1
-#define FRF_AA_FC_BLOCKING_EN_LBN 45
-#define FRF_AA_FC_BLOCKING_EN_WIDTH 1
-#define FRF_BZ_B2B_REQ_EN_LBN 45
-#define FRF_BZ_B2B_REQ_EN_WIDTH 1
-#define FRF_AA_B2B_REQ_EN_LBN 44
-#define FRF_AA_B2B_REQ_EN_WIDTH 1
-#define FRF_BB_FC_BLOCKING_EN_LBN 44
-#define FRF_BB_FC_BLOCKING_EN_WIDTH 1
-#define FRF_AZ_POST_WR_MASK_LBN 40
-#define FRF_AZ_POST_WR_MASK_WIDTH 4
-#define FRF_AZ_TLP_TC_LBN 34
-#define FRF_AZ_TLP_TC_WIDTH 3
-#define FRF_AZ_TLP_ATTR_LBN 32
-#define FRF_AZ_TLP_ATTR_WIDTH 2
-#define FRF_AB_INTB_VEC_LBN 24
-#define FRF_AB_INTB_VEC_WIDTH 5
-#define FRF_AB_INTA_VEC_LBN 16
-#define FRF_AB_INTA_VEC_WIDTH 5
-#define FRF_AZ_WD_TIMER_LBN 8
-#define FRF_AZ_WD_TIMER_WIDTH 8
-#define FRF_AZ_US_DISABLE_LBN 5
-#define FRF_AZ_US_DISABLE_WIDTH 1
-#define FRF_AZ_TLP_EP_LBN 4
-#define FRF_AZ_TLP_EP_WIDTH 1
-#define FRF_AZ_ATTR_SEL_LBN 3
-#define FRF_AZ_ATTR_SEL_WIDTH 1
-#define FRF_AZ_TD_SEL_LBN 1
-#define FRF_AZ_TD_SEL_WIDTH 1
-#define FRF_AZ_TLP_TD_LBN 0
-#define FRF_AZ_TLP_TD_WIDTH 1
-
-/* EE_SPI_HCMD_REG: SPI host command register */
-#define FR_AB_EE_SPI_HCMD 0x00000100
-#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
-#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
-#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
-#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
-#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
-#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
-#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
-#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
-#define FRF_AB_EE_SPI_HCMD_READ_LBN 15
-#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
-#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
-#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
-#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
-#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
-#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0
-#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
-
-/* USR_EV_CFG: User Level Event Configuration register */
-#define FR_CZ_USR_EV_CFG 0x00000100
-#define FRF_CZ_USREV_DIS_LBN 16
-#define FRF_CZ_USREV_DIS_WIDTH 1
-#define FRF_CZ_DFLT_EVQ_LBN 0
-#define FRF_CZ_DFLT_EVQ_WIDTH 10
-
-/* EE_SPI_HADR_REG: SPI host address register */
-#define FR_AB_EE_SPI_HADR 0x00000110
-#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
-#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
-#define FRF_AB_EE_SPI_HADR_ADR_LBN 0
-#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
-
-/* EE_SPI_HDATA_REG: SPI host data register */
-#define FR_AB_EE_SPI_HDATA 0x00000120
-#define FRF_AB_EE_SPI_HDATA3_LBN 96
-#define FRF_AB_EE_SPI_HDATA3_WIDTH 32
-#define FRF_AB_EE_SPI_HDATA2_LBN 64
-#define FRF_AB_EE_SPI_HDATA2_WIDTH 32
-#define FRF_AB_EE_SPI_HDATA1_LBN 32
-#define FRF_AB_EE_SPI_HDATA1_WIDTH 32
-#define FRF_AB_EE_SPI_HDATA0_LBN 0
-#define FRF_AB_EE_SPI_HDATA0_WIDTH 32
-
-/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */
-#define FR_AB_EE_BASE_PAGE 0x00000130
-#define FRF_AB_EE_EXPROM_MASK_LBN 16
-#define FRF_AB_EE_EXPROM_MASK_WIDTH 13
-#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
-#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
-
-/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */
-#define FR_AB_EE_VPD_CFG0 0x00000140
-#define FRF_AB_EE_SF_FASTRD_EN_LBN 127
-#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
-#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120
-#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
-#define FRF_AB_EE_VPD_WIP_POLL_LBN 119
-#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
-#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112
-#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
-#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
-#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
-#define FRF_AB_EE_VPDW_LENGTH_LBN 80
-#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15
-#define FRF_AB_EE_VPDW_BASE_LBN 64
-#define FRF_AB_EE_VPDW_BASE_WIDTH 15
-#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
-#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
-#define FRF_AB_EE_VPD_BASE_LBN 32
-#define FRF_AB_EE_VPD_BASE_WIDTH 24
-#define FRF_AB_EE_VPD_LENGTH_LBN 16
-#define FRF_AB_EE_VPD_LENGTH_WIDTH 15
-#define FRF_AB_EE_VPD_AD_SIZE_LBN 8
-#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
-#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5
-#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
-#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
-#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
-#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
-#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
-#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
-#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
-#define FRF_AB_EE_VPD_EN_LBN 0
-#define FRF_AB_EE_VPD_EN_WIDTH 1
-
-/* EE_VPD_SW_CNTL_REG: VPD access SW control register */
-#define FR_AB_EE_VPD_SW_CNTL 0x00000150
-#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
-#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
-#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28
-#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
-#define FRF_AB_EE_VPD_CYC_ADR_LBN 0
-#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
-
-/* EE_VPD_SW_DATA_REG: VPD access SW data register */
-#define FR_AB_EE_VPD_SW_DATA 0x00000160
-#define FRF_AB_EE_VPD_CYC_DAT_LBN 0
-#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
-
-/* PBMX_DBG_IADDR_REG: Capture Module address register */
-#define FR_CZ_PBMX_DBG_IADDR 0x000001f0
-#define FRF_CZ_PBMX_DBG_IADDR_LBN 0
-#define FRF_CZ_PBMX_DBG_IADDR_WIDTH 32
-
-/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */
-#define FR_BB_PCIE_CORE_INDIRECT 0x000001f0
-#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
-#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
-#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
-#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
-#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
-#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
-
-/* PBMX_DBG_IDATA_REG: Capture Module data register */
-#define FR_CZ_PBMX_DBG_IDATA 0x000001f8
-#define FRF_CZ_PBMX_DBG_IDATA_LBN 0
-#define FRF_CZ_PBMX_DBG_IDATA_WIDTH 64
-
-/* NIC_STAT_REG: NIC status register */
-#define FR_AB_NIC_STAT 0x00000200
-#define FRF_BB_AER_DIS_LBN 34
-#define FRF_BB_AER_DIS_WIDTH 1
-#define FRF_BB_EE_STRAP_EN_LBN 31
-#define FRF_BB_EE_STRAP_EN_WIDTH 1
-#define FRF_BB_EE_STRAP_LBN 24
-#define FRF_BB_EE_STRAP_WIDTH 4
-#define FRF_BB_REVISION_ID_LBN 17
-#define FRF_BB_REVISION_ID_WIDTH 7
-#define FRF_AB_ONCHIP_SRAM_LBN 16
-#define FRF_AB_ONCHIP_SRAM_WIDTH 1
-#define FRF_AB_SF_PRST_LBN 9
-#define FRF_AB_SF_PRST_WIDTH 1
-#define FRF_AB_EE_PRST_LBN 8
-#define FRF_AB_EE_PRST_WIDTH 1
-#define FRF_AB_ATE_MODE_LBN 3
-#define FRF_AB_ATE_MODE_WIDTH 1
-#define FRF_AB_STRAP_PINS_LBN 0
-#define FRF_AB_STRAP_PINS_WIDTH 3
-
-/* GPIO_CTL_REG: GPIO control register */
-#define FR_AB_GPIO_CTL 0x00000210
-#define FRF_AB_GPIO_OUT3_LBN 112
-#define FRF_AB_GPIO_OUT3_WIDTH 16
-#define FRF_AB_GPIO_IN3_LBN 104
-#define FRF_AB_GPIO_IN3_WIDTH 8
-#define FRF_AB_GPIO_PWRUP_VALUE3_LBN 96
-#define FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8
-#define FRF_AB_GPIO_OUT2_LBN 80
-#define FRF_AB_GPIO_OUT2_WIDTH 16
-#define FRF_AB_GPIO_IN2_LBN 72
-#define FRF_AB_GPIO_IN2_WIDTH 8
-#define FRF_AB_GPIO_PWRUP_VALUE2_LBN 64
-#define FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8
-#define FRF_AB_GPIO15_OEN_LBN 63
-#define FRF_AB_GPIO15_OEN_WIDTH 1
-#define FRF_AB_GPIO14_OEN_LBN 62
-#define FRF_AB_GPIO14_OEN_WIDTH 1
-#define FRF_AB_GPIO13_OEN_LBN 61
-#define FRF_AB_GPIO13_OEN_WIDTH 1
-#define FRF_AB_GPIO12_OEN_LBN 60
-#define FRF_AB_GPIO12_OEN_WIDTH 1
-#define FRF_AB_GPIO11_OEN_LBN 59
-#define FRF_AB_GPIO11_OEN_WIDTH 1
-#define FRF_AB_GPIO10_OEN_LBN 58
-#define FRF_AB_GPIO10_OEN_WIDTH 1
-#define FRF_AB_GPIO9_OEN_LBN 57
-#define FRF_AB_GPIO9_OEN_WIDTH 1
-#define FRF_AB_GPIO8_OEN_LBN 56
-#define FRF_AB_GPIO8_OEN_WIDTH 1
-#define FRF_AB_GPIO15_OUT_LBN 55
-#define FRF_AB_GPIO15_OUT_WIDTH 1
-#define FRF_AB_GPIO14_OUT_LBN 54
-#define FRF_AB_GPIO14_OUT_WIDTH 1
-#define FRF_AB_GPIO13_OUT_LBN 53
-#define FRF_AB_GPIO13_OUT_WIDTH 1
-#define FRF_AB_GPIO12_OUT_LBN 52
-#define FRF_AB_GPIO12_OUT_WIDTH 1
-#define FRF_AB_GPIO11_OUT_LBN 51
-#define FRF_AB_GPIO11_OUT_WIDTH 1
-#define FRF_AB_GPIO10_OUT_LBN 50
-#define FRF_AB_GPIO10_OUT_WIDTH 1
-#define FRF_AB_GPIO9_OUT_LBN 49
-#define FRF_AB_GPIO9_OUT_WIDTH 1
-#define FRF_AB_GPIO8_OUT_LBN 48
-#define FRF_AB_GPIO8_OUT_WIDTH 1
-#define FRF_AB_GPIO15_IN_LBN 47
-#define FRF_AB_GPIO15_IN_WIDTH 1
-#define FRF_AB_GPIO14_IN_LBN 46
-#define FRF_AB_GPIO14_IN_WIDTH 1
-#define FRF_AB_GPIO13_IN_LBN 45
-#define FRF_AB_GPIO13_IN_WIDTH 1
-#define FRF_AB_GPIO12_IN_LBN 44
-#define FRF_AB_GPIO12_IN_WIDTH 1
-#define FRF_AB_GPIO11_IN_LBN 43
-#define FRF_AB_GPIO11_IN_WIDTH 1
-#define FRF_AB_GPIO10_IN_LBN 42
-#define FRF_AB_GPIO10_IN_WIDTH 1
-#define FRF_AB_GPIO9_IN_LBN 41
-#define FRF_AB_GPIO9_IN_WIDTH 1
-#define FRF_AB_GPIO8_IN_LBN 40
-#define FRF_AB_GPIO8_IN_WIDTH 1
-#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
-#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
-#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
-#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
-#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
-#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
-#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
-#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
-#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
-#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
-#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
-#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
-#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
-#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
-#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
-#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
-#define FRF_AB_CLK156_OUT_EN_LBN 31
-#define FRF_AB_CLK156_OUT_EN_WIDTH 1
-#define FRF_AB_USE_NIC_CLK_LBN 30
-#define FRF_AB_USE_NIC_CLK_WIDTH 1
-#define FRF_AB_GPIO5_OEN_LBN 29
-#define FRF_AB_GPIO5_OEN_WIDTH 1
-#define FRF_AB_GPIO4_OEN_LBN 28
-#define FRF_AB_GPIO4_OEN_WIDTH 1
-#define FRF_AB_GPIO3_OEN_LBN 27
-#define FRF_AB_GPIO3_OEN_WIDTH 1
-#define FRF_AB_GPIO2_OEN_LBN 26
-#define FRF_AB_GPIO2_OEN_WIDTH 1
-#define FRF_AB_GPIO1_OEN_LBN 25
-#define FRF_AB_GPIO1_OEN_WIDTH 1
-#define FRF_AB_GPIO0_OEN_LBN 24
-#define FRF_AB_GPIO0_OEN_WIDTH 1
-#define FRF_AB_GPIO7_OUT_LBN 23
-#define FRF_AB_GPIO7_OUT_WIDTH 1
-#define FRF_AB_GPIO6_OUT_LBN 22
-#define FRF_AB_GPIO6_OUT_WIDTH 1
-#define FRF_AB_GPIO5_OUT_LBN 21
-#define FRF_AB_GPIO5_OUT_WIDTH 1
-#define FRF_AB_GPIO4_OUT_LBN 20
-#define FRF_AB_GPIO4_OUT_WIDTH 1
-#define FRF_AB_GPIO3_OUT_LBN 19
-#define FRF_AB_GPIO3_OUT_WIDTH 1
-#define FRF_AB_GPIO2_OUT_LBN 18
-#define FRF_AB_GPIO2_OUT_WIDTH 1
-#define FRF_AB_GPIO1_OUT_LBN 17
-#define FRF_AB_GPIO1_OUT_WIDTH 1
-#define FRF_AB_GPIO0_OUT_LBN 16
-#define FRF_AB_GPIO0_OUT_WIDTH 1
-#define FRF_AB_GPIO7_IN_LBN 15
-#define FRF_AB_GPIO7_IN_WIDTH 1
-#define FRF_AB_GPIO6_IN_LBN 14
-#define FRF_AB_GPIO6_IN_WIDTH 1
-#define FRF_AB_GPIO5_IN_LBN 13
-#define FRF_AB_GPIO5_IN_WIDTH 1
-#define FRF_AB_GPIO4_IN_LBN 12
-#define FRF_AB_GPIO4_IN_WIDTH 1
-#define FRF_AB_GPIO3_IN_LBN 11
-#define FRF_AB_GPIO3_IN_WIDTH 1
-#define FRF_AB_GPIO2_IN_LBN 10
-#define FRF_AB_GPIO2_IN_WIDTH 1
-#define FRF_AB_GPIO1_IN_LBN 9
-#define FRF_AB_GPIO1_IN_WIDTH 1
-#define FRF_AB_GPIO0_IN_LBN 8
-#define FRF_AB_GPIO0_IN_WIDTH 1
-#define FRF_AB_GPIO7_PWRUP_VALUE_LBN 7
-#define FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1
-#define FRF_AB_GPIO6_PWRUP_VALUE_LBN 6
-#define FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1
-#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
-#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
-#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
-#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
-#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
-#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
-#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
-#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
-#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
-#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
-#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
-#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
-
-/* GLB_CTL_REG: Global control register */
-#define FR_AB_GLB_CTL 0x00000220
-#define FRF_AB_EXT_PHY_RST_CTL_LBN 63
-#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
-#define FRF_AB_XAUI_SD_RST_CTL_LBN 62
-#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
-#define FRF_AB_PCIE_SD_RST_CTL_LBN 61
-#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
-#define FRF_AA_PCIX_RST_CTL_LBN 60
-#define FRF_AA_PCIX_RST_CTL_WIDTH 1
-#define FRF_BB_BIU_RST_CTL_LBN 60
-#define FRF_BB_BIU_RST_CTL_WIDTH 1
-#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59
-#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
-#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
-#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
-#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57
-#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
-#define FRF_AB_XGRX_RST_CTL_LBN 56
-#define FRF_AB_XGRX_RST_CTL_WIDTH 1
-#define FRF_AB_XGTX_RST_CTL_LBN 55
-#define FRF_AB_XGTX_RST_CTL_WIDTH 1
-#define FRF_AB_EM_RST_CTL_LBN 54
-#define FRF_AB_EM_RST_CTL_WIDTH 1
-#define FRF_AB_EV_RST_CTL_LBN 53
-#define FRF_AB_EV_RST_CTL_WIDTH 1
-#define FRF_AB_SR_RST_CTL_LBN 52
-#define FRF_AB_SR_RST_CTL_WIDTH 1
-#define FRF_AB_RX_RST_CTL_LBN 51
-#define FRF_AB_RX_RST_CTL_WIDTH 1
-#define FRF_AB_TX_RST_CTL_LBN 50
-#define FRF_AB_TX_RST_CTL_WIDTH 1
-#define FRF_AB_EE_RST_CTL_LBN 49
-#define FRF_AB_EE_RST_CTL_WIDTH 1
-#define FRF_AB_CS_RST_CTL_LBN 48
-#define FRF_AB_CS_RST_CTL_WIDTH 1
-#define FRF_AB_HOT_RST_CTL_LBN 40
-#define FRF_AB_HOT_RST_CTL_WIDTH 2
-#define FRF_AB_RST_EXT_PHY_LBN 31
-#define FRF_AB_RST_EXT_PHY_WIDTH 1
-#define FRF_AB_RST_XAUI_SD_LBN 30
-#define FRF_AB_RST_XAUI_SD_WIDTH 1
-#define FRF_AB_RST_PCIE_SD_LBN 29
-#define FRF_AB_RST_PCIE_SD_WIDTH 1
-#define FRF_AA_RST_PCIX_LBN 28
-#define FRF_AA_RST_PCIX_WIDTH 1
-#define FRF_BB_RST_BIU_LBN 28
-#define FRF_BB_RST_BIU_WIDTH 1
-#define FRF_AB_RST_PCIE_STKY_LBN 27
-#define FRF_AB_RST_PCIE_STKY_WIDTH 1
-#define FRF_AB_RST_PCIE_NSTKY_LBN 26
-#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1
-#define FRF_AB_RST_PCIE_CORE_LBN 25
-#define FRF_AB_RST_PCIE_CORE_WIDTH 1
-#define FRF_AB_RST_XGRX_LBN 24
-#define FRF_AB_RST_XGRX_WIDTH 1
-#define FRF_AB_RST_XGTX_LBN 23
-#define FRF_AB_RST_XGTX_WIDTH 1
-#define FRF_AB_RST_EM_LBN 22
-#define FRF_AB_RST_EM_WIDTH 1
-#define FRF_AB_RST_EV_LBN 21
-#define FRF_AB_RST_EV_WIDTH 1
-#define FRF_AB_RST_SR_LBN 20
-#define FRF_AB_RST_SR_WIDTH 1
-#define FRF_AB_RST_RX_LBN 19
-#define FRF_AB_RST_RX_WIDTH 1
-#define FRF_AB_RST_TX_LBN 18
-#define FRF_AB_RST_TX_WIDTH 1
-#define FRF_AB_RST_SF_LBN 17
-#define FRF_AB_RST_SF_WIDTH 1
-#define FRF_AB_RST_CS_LBN 16
-#define FRF_AB_RST_CS_WIDTH 1
-#define FRF_AB_INT_RST_DUR_LBN 4
-#define FRF_AB_INT_RST_DUR_WIDTH 3
-#define FRF_AB_EXT_PHY_RST_DUR_LBN 1
-#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
-#define FFE_AB_EXT_PHY_RST_DUR_10240US 7
-#define FFE_AB_EXT_PHY_RST_DUR_5120US 6
-#define FFE_AB_EXT_PHY_RST_DUR_2560US 5
-#define FFE_AB_EXT_PHY_RST_DUR_1280US 4
-#define FFE_AB_EXT_PHY_RST_DUR_640US 3
-#define FFE_AB_EXT_PHY_RST_DUR_320US 2
-#define FFE_AB_EXT_PHY_RST_DUR_160US 1
-#define FFE_AB_EXT_PHY_RST_DUR_80US 0
-#define FRF_AB_SWRST_LBN 0
-#define FRF_AB_SWRST_WIDTH 1
-
-/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
-#define FR_AZ_FATAL_INTR_KER 0x00000230
-#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
-#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
-#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
-#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
-#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
-#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
-#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
-#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
-#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
-#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
-#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
-#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
-#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
-#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
-#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
-#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
-#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
-#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
-#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
-#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
-#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
-#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
-#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
-#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
-#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
-#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
-#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
-#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
-#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
-#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
-#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11
-#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
-#define FRF_CZ_MBU_PERR_INT_KER_LBN 11
-#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
-#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10
-#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
-#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
-#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
-#define FRF_AZ_MEM_PERR_INT_KER_LBN 8
-#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
-#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7
-#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
-#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6
-#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
-#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
-#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
-#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
-#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
-#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3
-#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
-#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2
-#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
-#define FRF_AZ_ILL_ADR_INT_KER_LBN 1
-#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
-#define FRF_AZ_SRM_PERR_INT_KER_LBN 0
-#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
-
-/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */
-#define FR_BZ_FATAL_INTR_CHAR 0x00000240
-#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
-#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
-#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43
-#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
-#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
-#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
-#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42
-#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
-#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41
-#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
-#define FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40
-#define FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
-#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39
-#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
-#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38
-#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
-#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
-#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
-#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
-#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
-#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35
-#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
-#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34
-#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
-#define FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33
-#define FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
-#define FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32
-#define FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
-#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
-#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
-#define FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11
-#define FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1
-#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
-#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
-#define FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10
-#define FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1
-#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9
-#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
-#define FRF_BZ_MEM_PERR_INT_CHAR_LBN 8
-#define FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1
-#define FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7
-#define FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1
-#define FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6
-#define FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1
-#define FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5
-#define FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
-#define FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4
-#define FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
-#define FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3
-#define FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1
-#define FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2
-#define FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1
-#define FRF_BZ_ILL_ADR_INT_CHAR_LBN 1
-#define FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1
-#define FRF_BZ_SRM_PERR_INT_CHAR_LBN 0
-#define FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1
-
-/* DP_CTRL_REG: Datapath control register */
-#define FR_BZ_DP_CTRL 0x00000250
-#define FRF_BZ_FLS_EVQ_ID_LBN 0
-#define FRF_BZ_FLS_EVQ_ID_WIDTH 12
-
-/* MEM_STAT_REG: Memory status register */
-#define FR_AZ_MEM_STAT 0x00000260
-#define FRF_AB_MEM_PERR_VEC_LBN 53
-#define FRF_AB_MEM_PERR_VEC_WIDTH 38
-#define FRF_AB_MBIST_CORR_LBN 38
-#define FRF_AB_MBIST_CORR_WIDTH 15
-#define FRF_AB_MBIST_ERR_LBN 0
-#define FRF_AB_MBIST_ERR_WIDTH 40
-#define FRF_CZ_MEM_PERR_VEC_LBN 0
-#define FRF_CZ_MEM_PERR_VEC_WIDTH 35
-
-/* CS_DEBUG_REG: Debug register */
-#define FR_AZ_CS_DEBUG 0x00000270
-#define FRF_AB_GLB_DEBUG2_SEL_LBN 50
-#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
-#define FRF_AB_DEBUG_BLK_SEL2_LBN 47
-#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
-#define FRF_AB_DEBUG_BLK_SEL1_LBN 44
-#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
-#define FRF_AB_DEBUG_BLK_SEL0_LBN 41
-#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
-#define FRF_CZ_CS_PORT_NUM_LBN 40
-#define FRF_CZ_CS_PORT_NUM_WIDTH 2
-#define FRF_AB_MISC_DEBUG_ADDR_LBN 36
-#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
-#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31
-#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
-#define FRF_CZ_CS_PORT_FPE_LBN 1
-#define FRF_CZ_CS_PORT_FPE_WIDTH 35
-#define FRF_AB_EM_DEBUG_ADDR_LBN 26
-#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5
-#define FRF_AB_SR_DEBUG_ADDR_LBN 21
-#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5
-#define FRF_AB_EV_DEBUG_ADDR_LBN 16
-#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5
-#define FRF_AB_RX_DEBUG_ADDR_LBN 11
-#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5
-#define FRF_AB_TX_DEBUG_ADDR_LBN 6
-#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5
-#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
-#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
-#define FRF_AZ_CS_DEBUG_EN_LBN 0
-#define FRF_AZ_CS_DEBUG_EN_WIDTH 1
-
-/* DRIVER_REG: Driver scratch register [0-7] */
-#define FR_AZ_DRIVER 0x00000280
-#define FR_AZ_DRIVER_STEP 16
-#define FR_AZ_DRIVER_ROWS 8
-#define FRF_AZ_DRIVER_DW0_LBN 0
-#define FRF_AZ_DRIVER_DW0_WIDTH 32
-
-/* ALTERA_BUILD_REG: Altera build register */
-#define FR_AZ_ALTERA_BUILD 0x00000300
-#define FRF_AZ_ALTERA_BUILD_VER_LBN 0
-#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
-
-/* CSR_SPARE_REG: Spare register */
-#define FR_AZ_CSR_SPARE 0x00000310
-#define FRF_AB_MEM_PERR_EN_LBN 64
-#define FRF_AB_MEM_PERR_EN_WIDTH 38
-#define FRF_CZ_MEM_PERR_EN_LBN 64
-#define FRF_CZ_MEM_PERR_EN_WIDTH 35
-#define FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72
-#define FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2
-#define FRF_AZ_CSR_SPARE_BITS_LBN 0
-#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32
-
-/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */
-#define FR_AB_PCIE_SD_CTL0123 0x00000320
-#define FRF_AB_PCIE_TESTSIG_H_LBN 96
-#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19
-#define FRF_AB_PCIE_TESTSIG_L_LBN 64
-#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19
-#define FRF_AB_PCIE_OFFSET_LBN 56
-#define FRF_AB_PCIE_OFFSET_WIDTH 8
-#define FRF_AB_PCIE_OFFSETEN_H_LBN 55
-#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
-#define FRF_AB_PCIE_OFFSETEN_L_LBN 54
-#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
-#define FRF_AB_PCIE_HIVMODE_H_LBN 53
-#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1
-#define FRF_AB_PCIE_HIVMODE_L_LBN 52
-#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1
-#define FRF_AB_PCIE_PARRESET_H_LBN 51
-#define FRF_AB_PCIE_PARRESET_H_WIDTH 1
-#define FRF_AB_PCIE_PARRESET_L_LBN 50
-#define FRF_AB_PCIE_PARRESET_L_WIDTH 1
-#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49
-#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
-#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48
-#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
-#define FRF_AB_PCIE_LPBK_LBN 40
-#define FRF_AB_PCIE_LPBK_WIDTH 8
-#define FRF_AB_PCIE_PARLPBK_LBN 32
-#define FRF_AB_PCIE_PARLPBK_WIDTH 8
-#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30
-#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
-#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28
-#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
-#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
-#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
-#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
-#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0
-#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26
-#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
-#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24
-#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
-#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
-#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
-#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
-#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0
-#define FRF_AB_PCIE_RXEQCTL_H_LBN 18
-#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
-#define FRF_AB_PCIE_RXEQCTL_L_LBN 16
-#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
-#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
-#define FFE_AB_PCIE_RXEQCTL_OFF 2
-#define FFE_AB_PCIE_RXEQCTL_MIN 1
-#define FFE_AB_PCIE_RXEQCTL_MAX 0
-#define FRF_AB_PCIE_HIDRV_LBN 8
-#define FRF_AB_PCIE_HIDRV_WIDTH 8
-#define FRF_AB_PCIE_LODRV_LBN 0
-#define FRF_AB_PCIE_LODRV_WIDTH 8
-
-/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */
-#define FR_AB_PCIE_SD_CTL45 0x00000330
-#define FRF_AB_PCIE_DTX7_LBN 60
-#define FRF_AB_PCIE_DTX7_WIDTH 4
-#define FRF_AB_PCIE_DTX6_LBN 56
-#define FRF_AB_PCIE_DTX6_WIDTH 4
-#define FRF_AB_PCIE_DTX5_LBN 52
-#define FRF_AB_PCIE_DTX5_WIDTH 4
-#define FRF_AB_PCIE_DTX4_LBN 48
-#define FRF_AB_PCIE_DTX4_WIDTH 4
-#define FRF_AB_PCIE_DTX3_LBN 44
-#define FRF_AB_PCIE_DTX3_WIDTH 4
-#define FRF_AB_PCIE_DTX2_LBN 40
-#define FRF_AB_PCIE_DTX2_WIDTH 4
-#define FRF_AB_PCIE_DTX1_LBN 36
-#define FRF_AB_PCIE_DTX1_WIDTH 4
-#define FRF_AB_PCIE_DTX0_LBN 32
-#define FRF_AB_PCIE_DTX0_WIDTH 4
-#define FRF_AB_PCIE_DEQ7_LBN 28
-#define FRF_AB_PCIE_DEQ7_WIDTH 4
-#define FRF_AB_PCIE_DEQ6_LBN 24
-#define FRF_AB_PCIE_DEQ6_WIDTH 4
-#define FRF_AB_PCIE_DEQ5_LBN 20
-#define FRF_AB_PCIE_DEQ5_WIDTH 4
-#define FRF_AB_PCIE_DEQ4_LBN 16
-#define FRF_AB_PCIE_DEQ4_WIDTH 4
-#define FRF_AB_PCIE_DEQ3_LBN 12
-#define FRF_AB_PCIE_DEQ3_WIDTH 4
-#define FRF_AB_PCIE_DEQ2_LBN 8
-#define FRF_AB_PCIE_DEQ2_WIDTH 4
-#define FRF_AB_PCIE_DEQ1_LBN 4
-#define FRF_AB_PCIE_DEQ1_WIDTH 4
-#define FRF_AB_PCIE_DEQ0_LBN 0
-#define FRF_AB_PCIE_DEQ0_WIDTH 4
-
-/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */
-#define FR_AB_PCIE_PCS_CTL_STAT 0x00000340
-#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
-#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
-#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
-#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
-#define FRF_AB_PCIE_PRBSERR_LBN 40
-#define FRF_AB_PCIE_PRBSERR_WIDTH 8
-#define FRF_AB_PCIE_PRBSERRH0_LBN 32
-#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8
-#define FRF_AB_PCIE_FASTINIT_H_LBN 15
-#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1
-#define FRF_AB_PCIE_FASTINIT_L_LBN 14
-#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1
-#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13
-#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
-#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12
-#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
-#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11
-#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
-#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10
-#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
-#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9
-#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
-#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8
-#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
-#define FRF_AB_PCIE_PRBSSEL_LBN 0
-#define FRF_AB_PCIE_PRBSSEL_WIDTH 8
-
-/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */
-#define FR_BB_DEBUG_DATA_OUT 0x00000350
-#define FRF_BB_DEBUG2_PORT_LBN 25
-#define FRF_BB_DEBUG2_PORT_WIDTH 15
-#define FRF_BB_DEBUG1_PORT_LBN 0
-#define FRF_BB_DEBUG1_PORT_WIDTH 25
-
-/* EVQ_RPTR_REGP0: Event queue read pointer register */
-#define FR_BZ_EVQ_RPTR_P0 0x00000400
-#define FR_BZ_EVQ_RPTR_P0_STEP 8192
-#define FR_BZ_EVQ_RPTR_P0_ROWS 1024
-/* EVQ_RPTR_REG_KER: Event queue read pointer register */
-#define FR_AA_EVQ_RPTR_KER 0x00011b00
-#define FR_AA_EVQ_RPTR_KER_STEP 4
-#define FR_AA_EVQ_RPTR_KER_ROWS 4
-/* EVQ_RPTR_REG: Event queue read pointer register */
-#define FR_BZ_EVQ_RPTR 0x00fa0000
-#define FR_BZ_EVQ_RPTR_STEP 16
-#define FR_BB_EVQ_RPTR_ROWS 4096
-#define FR_CZ_EVQ_RPTR_ROWS 1024
-/* EVQ_RPTR_REGP123: Event queue read pointer register */
-#define FR_BB_EVQ_RPTR_P123 0x01000400
-#define FR_BB_EVQ_RPTR_P123_STEP 8192
-#define FR_BB_EVQ_RPTR_P123_ROWS 3072
-#define FRF_AZ_EVQ_RPTR_VLD_LBN 15
-#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
-#define FRF_AZ_EVQ_RPTR_LBN 0
-#define FRF_AZ_EVQ_RPTR_WIDTH 15
-
-/* TIMER_COMMAND_REGP0: Timer Command Registers */
-#define FR_BZ_TIMER_COMMAND_P0 0x00000420
-#define FR_BZ_TIMER_COMMAND_P0_STEP 8192
-#define FR_BZ_TIMER_COMMAND_P0_ROWS 1024
-/* TIMER_COMMAND_REG_KER: Timer Command Registers */
-#define FR_AA_TIMER_COMMAND_KER 0x00000420
-#define FR_AA_TIMER_COMMAND_KER_STEP 8192
-#define FR_AA_TIMER_COMMAND_KER_ROWS 4
-/* TIMER_COMMAND_REGP123: Timer Command Registers */
-#define FR_BB_TIMER_COMMAND_P123 0x01000420
-#define FR_BB_TIMER_COMMAND_P123_STEP 8192
-#define FR_BB_TIMER_COMMAND_P123_ROWS 3072
-#define FRF_CZ_TC_TIMER_MODE_LBN 14
-#define FRF_CZ_TC_TIMER_MODE_WIDTH 2
-#define FRF_AB_TC_TIMER_MODE_LBN 12
-#define FRF_AB_TC_TIMER_MODE_WIDTH 2
-#define FRF_CZ_TC_TIMER_VAL_LBN 0
-#define FRF_CZ_TC_TIMER_VAL_WIDTH 14
-#define FRF_AB_TC_TIMER_VAL_LBN 0
-#define FRF_AB_TC_TIMER_VAL_WIDTH 12
-
-/* DRV_EV_REG: Driver generated event register */
-#define FR_AZ_DRV_EV 0x00000440
-#define FRF_AZ_DRV_EV_QID_LBN 64
-#define FRF_AZ_DRV_EV_QID_WIDTH 12
-#define FRF_AZ_DRV_EV_DATA_LBN 0
-#define FRF_AZ_DRV_EV_DATA_WIDTH 64
-
-/* EVQ_CTL_REG: Event queue control register */
-#define FR_AZ_EVQ_CTL 0x00000450
-#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
-#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
-#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
-#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
-#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14
-#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
-#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
-#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
-#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
-#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
-
-/* EVQ_CNT1_REG: Event counter 1 register */
-#define FR_AZ_EVQ_CNT1 0x00000460
-#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
-#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
-#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100
-#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
-#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
-#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
-#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
-#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
-#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
-#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
-#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
-#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
-#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
-#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
-
-/* EVQ_CNT2_REG: Event counter 2 register */
-#define FR_AZ_EVQ_CNT2 0x00000470
-#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
-#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
-#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
-#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
-#define FRF_AZ_EVQ_RDY_CNT_LBN 80
-#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4
-#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
-#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
-#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
-#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
-#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
-#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
-#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
-#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
-
-/* USR_EV_REG: Event mailbox register */
-#define FR_CZ_USR_EV 0x00000540
-#define FR_CZ_USR_EV_STEP 8192
-#define FR_CZ_USR_EV_ROWS 1024
-#define FRF_CZ_USR_EV_DATA_LBN 0
-#define FRF_CZ_USR_EV_DATA_WIDTH 32
-
-/* BUF_TBL_CFG_REG: Buffer table configuration register */
-#define FR_AZ_BUF_TBL_CFG 0x00000600
-#define FRF_AZ_BUF_TBL_MODE_LBN 3
-#define FRF_AZ_BUF_TBL_MODE_WIDTH 1
-
-/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */
-#define FR_AZ_SRM_RX_DC_CFG 0x00000610
-#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21
-#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
-#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
-#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
-
-/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */
-#define FR_AZ_SRM_TX_DC_CFG 0x00000620
-#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
-#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
-
-/* SRM_CFG_REG: SRAM configuration register */
-#define FR_AZ_SRM_CFG 0x00000630
-#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
-#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
-#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
-#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
-#define FRF_AZ_SRM_INIT_EN_LBN 3
-#define FRF_AZ_SRM_INIT_EN_WIDTH 1
-#define FRF_AZ_SRM_NUM_BANK_LBN 2
-#define FRF_AZ_SRM_NUM_BANK_WIDTH 1
-#define FRF_AZ_SRM_BANK_SIZE_LBN 0
-#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2
-
-/* BUF_TBL_UPD_REG: Buffer table update register */
-#define FR_AZ_BUF_TBL_UPD 0x00000650
-#define FRF_AZ_BUF_UPD_CMD_LBN 63
-#define FRF_AZ_BUF_UPD_CMD_WIDTH 1
-#define FRF_AZ_BUF_CLR_CMD_LBN 62
-#define FRF_AZ_BUF_CLR_CMD_WIDTH 1
-#define FRF_AZ_BUF_CLR_END_ID_LBN 32
-#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20
-#define FRF_AZ_BUF_CLR_START_ID_LBN 0
-#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20
-
-/* SRM_UPD_EVQ_REG: Buffer table update register */
-#define FR_AZ_SRM_UPD_EVQ 0x00000660
-#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
-#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
-
-/* SRAM_PARITY_REG: SRAM parity register. */
-#define FR_AZ_SRAM_PARITY 0x00000670
-#define FRF_CZ_BYPASS_ECC_LBN 3
-#define FRF_CZ_BYPASS_ECC_WIDTH 1
-#define FRF_CZ_SEC_INT_LBN 2
-#define FRF_CZ_SEC_INT_WIDTH 1
-#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
-#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
-#define FRF_AB_FORCE_SRAM_PERR_LBN 0
-#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1
-#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
-#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
-
-/* RX_CFG_REG: Receive configuration register */
-#define FR_AZ_RX_CFG 0x00000800
-#define FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72
-#define FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14
-#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
-#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
-#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
-#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
-#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
-#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
-#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49
-#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
-#define FRF_BZ_RX_TCP_SUP_LBN 48
-#define FRF_BZ_RX_TCP_SUP_WIDTH 1
-#define FRF_BZ_RX_INGR_EN_LBN 47
-#define FRF_BZ_RX_INGR_EN_WIDTH 1
-#define FRF_BZ_RX_IP_HASH_LBN 46
-#define FRF_BZ_RX_IP_HASH_WIDTH 1
-#define FRF_BZ_RX_HASH_ALG_LBN 45
-#define FRF_BZ_RX_HASH_ALG_WIDTH 1
-#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
-#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
-#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43
-#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
-#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42
-#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
-#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39
-#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
-#define FRF_BZ_RX_OWNERR_CTL_LBN 38
-#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1
-#define FRF_BZ_RX_XON_TX_TH_LBN 33
-#define FRF_BZ_RX_XON_TX_TH_WIDTH 5
-#define FRF_AA_RX_DESC_PUSH_EN_LBN 35
-#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
-#define FRF_AA_RX_RDW_PATCH_EN_LBN 34
-#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
-#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31
-#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
-#define FRF_BZ_RX_XOFF_TX_TH_LBN 28
-#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
-#define FRF_AA_RX_OWNERR_CTL_LBN 30
-#define FRF_AA_RX_OWNERR_CTL_WIDTH 1
-#define FRF_AA_RX_XON_TX_TH_LBN 25
-#define FRF_AA_RX_XON_TX_TH_WIDTH 5
-#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19
-#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
-#define FRF_AA_RX_XOFF_TX_TH_LBN 20
-#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5
-#define FRF_AA_RX_USR_BUF_SIZE_LBN 11
-#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
-#define FRF_BZ_RX_XON_MAC_TH_LBN 10
-#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9
-#define FRF_AA_RX_XON_MAC_TH_LBN 6
-#define FRF_AA_RX_XON_MAC_TH_WIDTH 5
-#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1
-#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
-#define FRF_AA_RX_XOFF_MAC_TH_LBN 1
-#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
-#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0
-#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
-
-/* RX_FILTER_CTL_REG: Receive filter control registers */
-#define FR_BZ_RX_FILTER_CTL 0x00000810
-#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
-#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
-#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
-#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
-#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
-#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
-#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
-#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
-#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
-#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
-#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
-#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
-#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
-#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
-#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
-#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
-#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
-#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
-#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
-#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
-#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
-#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
-#define FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32
-#define FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
-#define FRF_BZ_NUM_KER_LBN 24
-#define FRF_BZ_NUM_KER_WIDTH 2
-#define FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16
-#define FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
-#define FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8
-#define FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
-#define FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0
-#define FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
-
-/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */
-#define FR_AZ_RX_FLUSH_DESCQ 0x00000820
-#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
-#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
-#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0
-#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
-
-/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
-#define FR_BZ_RX_DESC_UPD_P0 0x00000830
-#define FR_BZ_RX_DESC_UPD_P0_STEP 8192
-#define FR_BZ_RX_DESC_UPD_P0_ROWS 1024
-/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */
-#define FR_AA_RX_DESC_UPD_KER 0x00000830
-#define FR_AA_RX_DESC_UPD_KER_STEP 8192
-#define FR_AA_RX_DESC_UPD_KER_ROWS 4
-/* RX_DESC_UPD_REGP123: Receive descriptor update register. */
-#define FR_BB_RX_DESC_UPD_P123 0x01000830
-#define FR_BB_RX_DESC_UPD_P123_STEP 8192
-#define FR_BB_RX_DESC_UPD_P123_ROWS 3072
-#define FRF_AZ_RX_DESC_WPTR_LBN 96
-#define FRF_AZ_RX_DESC_WPTR_WIDTH 12
-#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
-#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
-#define FRF_AZ_RX_DESC_LBN 0
-#define FRF_AZ_RX_DESC_WIDTH 64
-
-/* RX_DC_CFG_REG: Receive descriptor cache configuration register */
-#define FR_AZ_RX_DC_CFG 0x00000840
-#define FRF_AB_RX_MAX_PF_LBN 2
-#define FRF_AB_RX_MAX_PF_WIDTH 2
-#define FRF_AZ_RX_DC_SIZE_LBN 0
-#define FRF_AZ_RX_DC_SIZE_WIDTH 2
-#define FFE_AZ_RX_DC_SIZE_64 3
-#define FFE_AZ_RX_DC_SIZE_32 2
-#define FFE_AZ_RX_DC_SIZE_16 1
-#define FFE_AZ_RX_DC_SIZE_8 0
-
-/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */
-#define FR_AZ_RX_DC_PF_WM 0x00000850
-#define FRF_AZ_RX_DC_PF_HWM_LBN 6
-#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6
-#define FRF_AZ_RX_DC_PF_LWM_LBN 0
-#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6
-
-/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */
-#define FR_BZ_RX_RSS_TKEY 0x00000860
-#define FRF_BZ_RX_RSS_TKEY_HI_LBN 64
-#define FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64
-#define FRF_BZ_RX_RSS_TKEY_LO_LBN 0
-#define FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64
-
-/* RX_NODESC_DROP_REG: Receive dropped packet counter register */
-#define FR_AZ_RX_NODESC_DROP 0x00000880
-#define FRF_CZ_RX_NODESC_DROP_CNT_LBN 0
-#define FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32
-#define FRF_AB_RX_NODESC_DROP_CNT_LBN 0
-#define FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16
-
-/* RX_SELF_RST_REG: Receive self reset register */
-#define FR_AA_RX_SELF_RST 0x00000890
-#define FRF_AA_RX_ISCSI_DIS_LBN 17
-#define FRF_AA_RX_ISCSI_DIS_WIDTH 1
-#define FRF_AA_RX_SW_RST_REG_LBN 16
-#define FRF_AA_RX_SW_RST_REG_WIDTH 1
-#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9
-#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1
-#define FRF_AA_RX_SELF_RST_EN_LBN 8
-#define FRF_AA_RX_SELF_RST_EN_WIDTH 1
-#define FRF_AA_RX_MAX_PF_LAT_LBN 4
-#define FRF_AA_RX_MAX_PF_LAT_WIDTH 4
-#define FRF_AA_RX_MAX_LU_LAT_LBN 0
-#define FRF_AA_RX_MAX_LU_LAT_WIDTH 4
-
-/* RX_DEBUG_REG: undocumented register */
-#define FR_AZ_RX_DEBUG 0x000008a0
-#define FRF_AZ_RX_DEBUG_LBN 0
-#define FRF_AZ_RX_DEBUG_WIDTH 64
-
-/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */
-#define FR_AZ_RX_PUSH_DROP 0x000008b0
-#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
-#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
-
-/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */
-#define FR_CZ_RX_RSS_IPV6_REG1 0x000008d0
-#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
-#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
-
-/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */
-#define FR_CZ_RX_RSS_IPV6_REG2 0x000008e0
-#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
-#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
-
-/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */
-#define FR_CZ_RX_RSS_IPV6_REG3 0x000008f0
-#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
-#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
-#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
-#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
-#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
-#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
-#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
-#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
-
-/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */
-#define FR_AZ_TX_FLUSH_DESCQ 0x00000a00
-#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
-#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
-#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0
-#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
-
-/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
-#define FR_BZ_TX_DESC_UPD_P0 0x00000a10
-#define FR_BZ_TX_DESC_UPD_P0_STEP 8192
-#define FR_BZ_TX_DESC_UPD_P0_ROWS 1024
-/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */
-#define FR_AA_TX_DESC_UPD_KER 0x00000a10
-#define FR_AA_TX_DESC_UPD_KER_STEP 8192
-#define FR_AA_TX_DESC_UPD_KER_ROWS 8
-/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */
-#define FR_BB_TX_DESC_UPD_P123 0x01000a10
-#define FR_BB_TX_DESC_UPD_P123_STEP 8192
-#define FR_BB_TX_DESC_UPD_P123_ROWS 3072
-#define FRF_AZ_TX_DESC_WPTR_LBN 96
-#define FRF_AZ_TX_DESC_WPTR_WIDTH 12
-#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
-#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
-#define FRF_AZ_TX_DESC_LBN 0
-#define FRF_AZ_TX_DESC_WIDTH 95
-
-/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */
-#define FR_AZ_TX_DC_CFG 0x00000a20
-#define FRF_AZ_TX_DC_SIZE_LBN 0
-#define FRF_AZ_TX_DC_SIZE_WIDTH 2
-#define FFE_AZ_TX_DC_SIZE_32 2
-#define FFE_AZ_TX_DC_SIZE_16 1
-#define FFE_AZ_TX_DC_SIZE_8 0
-
-/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */
-#define FR_AA_TX_CHKSM_CFG 0x00000a30
-#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
-#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
-#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
-#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
-#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
-#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
-#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
-#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
-
-/* TX_CFG_REG: Transmit configuration register */
-#define FR_AZ_TX_CFG 0x00000a50
-#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
-#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
-#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
-#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
-#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
-#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
-#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
-#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
-#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
-#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
-#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
-#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
-#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
-#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
-#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
-#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
-#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
-#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
-#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
-#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
-#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47
-#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
-#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
-#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
-#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
-#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
-#define FRF_AZ_TX_P1_PRI_EN_LBN 4
-#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1
-#define FRF_AZ_TX_OWNERR_CTL_LBN 2
-#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1
-#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
-#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
-#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0
-#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
-
-/* TX_PUSH_DROP_REG: Transmit push dropped register */
-#define FR_AZ_TX_PUSH_DROP 0x00000a60
-#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
-#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
-
-/* TX_RESERVED_REG: Transmit configuration register */
-#define FR_AZ_TX_RESERVED 0x00000a80
-#define FRF_AZ_TX_EVT_CNT_LBN 121
-#define FRF_AZ_TX_EVT_CNT_WIDTH 7
-#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119
-#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
-#define FRF_AZ_TX_RD_COMP_TMR_LBN 96
-#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
-#define FRF_AZ_TX_PUSH_EN_LBN 89
-#define FRF_AZ_TX_PUSH_EN_WIDTH 1
-#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
-#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
-#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85
-#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
-#define FRF_AZ_TX_DMAR_ST_P0_LBN 81
-#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
-#define FRF_AZ_TX_DMAQ_ST_LBN 78
-#define FRF_AZ_TX_DMAQ_ST_WIDTH 1
-#define FRF_AZ_TX_RX_SPACER_LBN 64
-#define FRF_AZ_TX_RX_SPACER_WIDTH 8
-#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60
-#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
-#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59
-#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
-#define FRF_AZ_TX_PS_EVT_DIS_LBN 58
-#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
-#define FRF_AZ_TX_RX_SPACER_EN_LBN 57
-#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
-#define FRF_AZ_TX_XP_TIMER_LBN 52
-#define FRF_AZ_TX_XP_TIMER_WIDTH 5
-#define FRF_AZ_TX_PREF_SPACER_LBN 44
-#define FRF_AZ_TX_PREF_SPACER_WIDTH 8
-#define FRF_AZ_TX_PREF_WD_TMR_LBN 22
-#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
-#define FRF_AZ_TX_ONLY1TAG_LBN 21
-#define FRF_AZ_TX_ONLY1TAG_WIDTH 1
-#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19
-#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
-#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
-#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
-#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
-#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
-#define FRF_AA_TX_DMA_FF_THR_LBN 16
-#define FRF_AA_TX_DMA_FF_THR_WIDTH 1
-#define FRF_AZ_TX_DMA_SPACER_LBN 8
-#define FRF_AZ_TX_DMA_SPACER_WIDTH 8
-#define FRF_AA_TX_TCP_DIS_LBN 7
-#define FRF_AA_TX_TCP_DIS_WIDTH 1
-#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
-#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
-#define FRF_AA_TX_IP_DIS_LBN 6
-#define FRF_AA_TX_IP_DIS_WIDTH 1
-#define FRF_AZ_TX_MAX_CPL_LBN 2
-#define FRF_AZ_TX_MAX_CPL_WIDTH 2
-#define FFE_AZ_TX_MAX_CPL_16 3
-#define FFE_AZ_TX_MAX_CPL_8 2
-#define FFE_AZ_TX_MAX_CPL_4 1
-#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0
-#define FRF_AZ_TX_MAX_PREF_LBN 0
-#define FRF_AZ_TX_MAX_PREF_WIDTH 2
-#define FFE_AZ_TX_MAX_PREF_32 3
-#define FFE_AZ_TX_MAX_PREF_16 2
-#define FFE_AZ_TX_MAX_PREF_8 1
-#define FFE_AZ_TX_MAX_PREF_OFF 0
-
-/* TX_PACE_REG: Transmit pace control register */
-#define FR_BZ_TX_PACE 0x00000a90
-#define FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19
-#define FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10
-#define FRF_BZ_TX_PACE_SB_AF_LBN 9
-#define FRF_BZ_TX_PACE_SB_AF_WIDTH 10
-#define FRF_BZ_TX_PACE_FB_BASE_LBN 5
-#define FRF_BZ_TX_PACE_FB_BASE_WIDTH 4
-#define FRF_BZ_TX_PACE_BIN_TH_LBN 0
-#define FRF_BZ_TX_PACE_BIN_TH_WIDTH 5
-
-/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */
-#define FR_BZ_TX_PACE_DROP_QID 0x00000aa0
-#define FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0
-#define FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16
-
-/* TX_VLAN_REG: Transmit VLAN tag register */
-#define FR_BB_TX_VLAN 0x00000ae0
-#define FRF_BB_TX_VLAN_EN_LBN 127
-#define FRF_BB_TX_VLAN_EN_WIDTH 1
-#define FRF_BB_TX_VLAN7_PORT1_EN_LBN 125
-#define FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1
-#define FRF_BB_TX_VLAN7_PORT0_EN_LBN 124
-#define FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1
-#define FRF_BB_TX_VLAN7_LBN 112
-#define FRF_BB_TX_VLAN7_WIDTH 12
-#define FRF_BB_TX_VLAN6_PORT1_EN_LBN 109
-#define FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1
-#define FRF_BB_TX_VLAN6_PORT0_EN_LBN 108
-#define FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1
-#define FRF_BB_TX_VLAN6_LBN 96
-#define FRF_BB_TX_VLAN6_WIDTH 12
-#define FRF_BB_TX_VLAN5_PORT1_EN_LBN 93
-#define FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1
-#define FRF_BB_TX_VLAN5_PORT0_EN_LBN 92
-#define FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1
-#define FRF_BB_TX_VLAN5_LBN 80
-#define FRF_BB_TX_VLAN5_WIDTH 12
-#define FRF_BB_TX_VLAN4_PORT1_EN_LBN 77
-#define FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1
-#define FRF_BB_TX_VLAN4_PORT0_EN_LBN 76
-#define FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1
-#define FRF_BB_TX_VLAN4_LBN 64
-#define FRF_BB_TX_VLAN4_WIDTH 12
-#define FRF_BB_TX_VLAN3_PORT1_EN_LBN 61
-#define FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1
-#define FRF_BB_TX_VLAN3_PORT0_EN_LBN 60
-#define FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1
-#define FRF_BB_TX_VLAN3_LBN 48
-#define FRF_BB_TX_VLAN3_WIDTH 12
-#define FRF_BB_TX_VLAN2_PORT1_EN_LBN 45
-#define FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1
-#define FRF_BB_TX_VLAN2_PORT0_EN_LBN 44
-#define FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1
-#define FRF_BB_TX_VLAN2_LBN 32
-#define FRF_BB_TX_VLAN2_WIDTH 12
-#define FRF_BB_TX_VLAN1_PORT1_EN_LBN 29
-#define FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1
-#define FRF_BB_TX_VLAN1_PORT0_EN_LBN 28
-#define FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1
-#define FRF_BB_TX_VLAN1_LBN 16
-#define FRF_BB_TX_VLAN1_WIDTH 12
-#define FRF_BB_TX_VLAN0_PORT1_EN_LBN 13
-#define FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1
-#define FRF_BB_TX_VLAN0_PORT0_EN_LBN 12
-#define FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1
-#define FRF_BB_TX_VLAN0_LBN 0
-#define FRF_BB_TX_VLAN0_WIDTH 12
-
-/* TX_IPFIL_PORTEN_REG: Transmit filter control register */
-#define FR_BZ_TX_IPFIL_PORTEN 0x00000af0
-#define FRF_BZ_TX_MADR0_FIL_EN_LBN 64
-#define FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL31_PORT_EN_LBN 62
-#define FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL30_PORT_EN_LBN 60
-#define FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL29_PORT_EN_LBN 58
-#define FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL28_PORT_EN_LBN 56
-#define FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL27_PORT_EN_LBN 54
-#define FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL26_PORT_EN_LBN 52
-#define FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL25_PORT_EN_LBN 50
-#define FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL24_PORT_EN_LBN 48
-#define FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL23_PORT_EN_LBN 46
-#define FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL22_PORT_EN_LBN 44
-#define FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL21_PORT_EN_LBN 42
-#define FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL20_PORT_EN_LBN 40
-#define FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL19_PORT_EN_LBN 38
-#define FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL18_PORT_EN_LBN 36
-#define FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL17_PORT_EN_LBN 34
-#define FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL16_PORT_EN_LBN 32
-#define FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL15_PORT_EN_LBN 30
-#define FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL14_PORT_EN_LBN 28
-#define FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL13_PORT_EN_LBN 26
-#define FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL12_PORT_EN_LBN 24
-#define FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL11_PORT_EN_LBN 22
-#define FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL10_PORT_EN_LBN 20
-#define FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL9_PORT_EN_LBN 18
-#define FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL8_PORT_EN_LBN 16
-#define FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL7_PORT_EN_LBN 14
-#define FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL6_PORT_EN_LBN 12
-#define FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL5_PORT_EN_LBN 10
-#define FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL4_PORT_EN_LBN 8
-#define FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL3_PORT_EN_LBN 6
-#define FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL2_PORT_EN_LBN 4
-#define FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL1_PORT_EN_LBN 2
-#define FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1
-#define FRF_BB_TX_IPFIL0_PORT_EN_LBN 0
-#define FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1
-
-/* TX_IPFIL_TBL: Transmit IP source address filter table */
-#define FR_BB_TX_IPFIL_TBL 0x00000b00
-#define FR_BB_TX_IPFIL_TBL_STEP 16
-#define FR_BB_TX_IPFIL_TBL_ROWS 16
-#define FRF_BB_TX_IPFIL_MASK_1_LBN 96
-#define FRF_BB_TX_IPFIL_MASK_1_WIDTH 32
-#define FRF_BB_TX_IP_SRC_ADR_1_LBN 64
-#define FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32
-#define FRF_BB_TX_IPFIL_MASK_0_LBN 32
-#define FRF_BB_TX_IPFIL_MASK_0_WIDTH 32
-#define FRF_BB_TX_IP_SRC_ADR_0_LBN 0
-#define FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32
-
-/* MD_TXD_REG: PHY management transmit data register */
-#define FR_AB_MD_TXD 0x00000c00
-#define FRF_AB_MD_TXD_LBN 0
-#define FRF_AB_MD_TXD_WIDTH 16
-
-/* MD_RXD_REG: PHY management receive data register */
-#define FR_AB_MD_RXD 0x00000c10
-#define FRF_AB_MD_RXD_LBN 0
-#define FRF_AB_MD_RXD_WIDTH 16
-
-/* MD_CS_REG: PHY management configuration & status register */
-#define FR_AB_MD_CS 0x00000c20
-#define FRF_AB_MD_RD_EN_CMD_LBN 15
-#define FRF_AB_MD_RD_EN_CMD_WIDTH 1
-#define FRF_AB_MD_WR_EN_CMD_LBN 14
-#define FRF_AB_MD_WR_EN_CMD_WIDTH 1
-#define FRF_AB_MD_ADDR_CMD_LBN 13
-#define FRF_AB_MD_ADDR_CMD_WIDTH 1
-#define FRF_AB_MD_PT_LBN 7
-#define FRF_AB_MD_PT_WIDTH 3
-#define FRF_AB_MD_PL_LBN 6
-#define FRF_AB_MD_PL_WIDTH 1
-#define FRF_AB_MD_INT_CLR_LBN 5
-#define FRF_AB_MD_INT_CLR_WIDTH 1
-#define FRF_AB_MD_GC_LBN 4
-#define FRF_AB_MD_GC_WIDTH 1
-#define FRF_AB_MD_PRSP_LBN 3
-#define FRF_AB_MD_PRSP_WIDTH 1
-#define FRF_AB_MD_RIC_LBN 2
-#define FRF_AB_MD_RIC_WIDTH 1
-#define FRF_AB_MD_RDC_LBN 1
-#define FRF_AB_MD_RDC_WIDTH 1
-#define FRF_AB_MD_WRC_LBN 0
-#define FRF_AB_MD_WRC_WIDTH 1
-
-/* MD_PHY_ADR_REG: PHY management PHY address register */
-#define FR_AB_MD_PHY_ADR 0x00000c30
-#define FRF_AB_MD_PHY_ADR_LBN 0
-#define FRF_AB_MD_PHY_ADR_WIDTH 16
-
-/* MD_ID_REG: PHY management ID register */
-#define FR_AB_MD_ID 0x00000c40
-#define FRF_AB_MD_PRT_ADR_LBN 11
-#define FRF_AB_MD_PRT_ADR_WIDTH 5
-#define FRF_AB_MD_DEV_ADR_LBN 6
-#define FRF_AB_MD_DEV_ADR_WIDTH 5
-
-/* MD_STAT_REG: PHY management status & mask register */
-#define FR_AB_MD_STAT 0x00000c50
-#define FRF_AB_MD_PINT_LBN 4
-#define FRF_AB_MD_PINT_WIDTH 1
-#define FRF_AB_MD_DONE_LBN 3
-#define FRF_AB_MD_DONE_WIDTH 1
-#define FRF_AB_MD_BSERR_LBN 2
-#define FRF_AB_MD_BSERR_WIDTH 1
-#define FRF_AB_MD_LNFL_LBN 1
-#define FRF_AB_MD_LNFL_WIDTH 1
-#define FRF_AB_MD_BSY_LBN 0
-#define FRF_AB_MD_BSY_WIDTH 1
-
-/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */
-#define FR_AB_MAC_STAT_DMA 0x00000c60
-#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48
-#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
-#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0
-#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
-
-/* MAC_CTRL_REG: Port MAC control register */
-#define FR_AB_MAC_CTRL 0x00000c80
-#define FRF_AB_MAC_XOFF_VAL_LBN 16
-#define FRF_AB_MAC_XOFF_VAL_WIDTH 16
-#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7
-#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
-#define FRF_AB_MAC_XG_DISTXCRC_LBN 5
-#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
-#define FRF_AB_MAC_BCAD_ACPT_LBN 4
-#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1
-#define FRF_AB_MAC_UC_PROM_LBN 3
-#define FRF_AB_MAC_UC_PROM_WIDTH 1
-#define FRF_AB_MAC_LINK_STATUS_LBN 2
-#define FRF_AB_MAC_LINK_STATUS_WIDTH 1
-#define FRF_AB_MAC_SPEED_LBN 0
-#define FRF_AB_MAC_SPEED_WIDTH 2
-#define FFE_AB_MAC_SPEED_10G 3
-#define FFE_AB_MAC_SPEED_1G 2
-#define FFE_AB_MAC_SPEED_100M 1
-#define FFE_AB_MAC_SPEED_10M 0
-
-/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */
-#define FR_BB_GEN_MODE 0x00000c90
-#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
-#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
-#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
-#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
-#define FRF_BB_XFP_PHY_INT_MASK_LBN 1
-#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
-#define FRF_BB_XG_PHY_INT_MASK_LBN 0
-#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1
-
-/* MAC_MC_HASH_REG0: Multicast address hash table */
-#define FR_AB_MAC_MC_HASH_REG0 0x00000ca0
-#define FRF_AB_MAC_MCAST_HASH0_LBN 0
-#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128
-
-/* MAC_MC_HASH_REG1: Multicast address hash table */
-#define FR_AB_MAC_MC_HASH_REG1 0x00000cb0
-#define FRF_AB_MAC_MCAST_HASH1_LBN 0
-#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128
-
-/* GM_CFG1_REG: GMAC configuration register 1 */
-#define FR_AB_GM_CFG1 0x00000e00
-#define FRF_AB_GM_SW_RST_LBN 31
-#define FRF_AB_GM_SW_RST_WIDTH 1
-#define FRF_AB_GM_SIM_RST_LBN 30
-#define FRF_AB_GM_SIM_RST_WIDTH 1
-#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
-#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
-#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
-#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
-#define FRF_AB_GM_RST_RX_FUNC_LBN 17
-#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1
-#define FRF_AB_GM_RST_TX_FUNC_LBN 16
-#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1
-#define FRF_AB_GM_LOOP_LBN 8
-#define FRF_AB_GM_LOOP_WIDTH 1
-#define FRF_AB_GM_RX_FC_EN_LBN 5
-#define FRF_AB_GM_RX_FC_EN_WIDTH 1
-#define FRF_AB_GM_TX_FC_EN_LBN 4
-#define FRF_AB_GM_TX_FC_EN_WIDTH 1
-#define FRF_AB_GM_SYNC_RXEN_LBN 3
-#define FRF_AB_GM_SYNC_RXEN_WIDTH 1
-#define FRF_AB_GM_RX_EN_LBN 2
-#define FRF_AB_GM_RX_EN_WIDTH 1
-#define FRF_AB_GM_SYNC_TXEN_LBN 1
-#define FRF_AB_GM_SYNC_TXEN_WIDTH 1
-#define FRF_AB_GM_TX_EN_LBN 0
-#define FRF_AB_GM_TX_EN_WIDTH 1
-
-/* GM_CFG2_REG: GMAC configuration register 2 */
-#define FR_AB_GM_CFG2 0x00000e10
-#define FRF_AB_GM_PAMBL_LEN_LBN 12
-#define FRF_AB_GM_PAMBL_LEN_WIDTH 4
-#define FRF_AB_GM_IF_MODE_LBN 8
-#define FRF_AB_GM_IF_MODE_WIDTH 2
-#define FFE_AB_IF_MODE_BYTE_MODE 2
-#define FFE_AB_IF_MODE_NIBBLE_MODE 1
-#define FRF_AB_GM_HUGE_FRM_EN_LBN 5
-#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
-#define FRF_AB_GM_LEN_CHK_LBN 4
-#define FRF_AB_GM_LEN_CHK_WIDTH 1
-#define FRF_AB_GM_PAD_CRC_EN_LBN 2
-#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1
-#define FRF_AB_GM_CRC_EN_LBN 1
-#define FRF_AB_GM_CRC_EN_WIDTH 1
-#define FRF_AB_GM_FD_LBN 0
-#define FRF_AB_GM_FD_WIDTH 1
-
-/* GM_IPG_REG: GMAC IPG register */
-#define FR_AB_GM_IPG 0x00000e20
-#define FRF_AB_GM_NONB2B_IPG1_LBN 24
-#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7
-#define FRF_AB_GM_NONB2B_IPG2_LBN 16
-#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7
-#define FRF_AB_GM_MIN_IPG_ENF_LBN 8
-#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
-#define FRF_AB_GM_B2B_IPG_LBN 0
-#define FRF_AB_GM_B2B_IPG_WIDTH 7
-
-/* GM_HD_REG: GMAC half duplex register */
-#define FR_AB_GM_HD 0x00000e30
-#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20
-#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
-#define FRF_AB_GM_ALT_BOFF_EN_LBN 19
-#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
-#define FRF_AB_GM_BP_NO_BOFF_LBN 18
-#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1
-#define FRF_AB_GM_DIS_BOFF_LBN 17
-#define FRF_AB_GM_DIS_BOFF_WIDTH 1
-#define FRF_AB_GM_EXDEF_TX_EN_LBN 16
-#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
-#define FRF_AB_GM_RTRY_LIMIT_LBN 12
-#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4
-#define FRF_AB_GM_COL_WIN_LBN 0
-#define FRF_AB_GM_COL_WIN_WIDTH 10
-
-/* GM_MAX_FLEN_REG: GMAC maximum frame length register */
-#define FR_AB_GM_MAX_FLEN 0x00000e40
-#define FRF_AB_GM_MAX_FLEN_LBN 0
-#define FRF_AB_GM_MAX_FLEN_WIDTH 16
-
-/* GM_TEST_REG: GMAC test register */
-#define FR_AB_GM_TEST 0x00000e70
-#define FRF_AB_GM_MAX_BOFF_LBN 3
-#define FRF_AB_GM_MAX_BOFF_WIDTH 1
-#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
-#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
-#define FRF_AB_GM_TEST_PAUSE_LBN 1
-#define FRF_AB_GM_TEST_PAUSE_WIDTH 1
-#define FRF_AB_GM_SHORT_SLOT_LBN 0
-#define FRF_AB_GM_SHORT_SLOT_WIDTH 1
-
-/* GM_ADR1_REG: GMAC station address register 1 */
-#define FR_AB_GM_ADR1 0x00000f00
-#define FRF_AB_GM_ADR_B0_LBN 24
-#define FRF_AB_GM_ADR_B0_WIDTH 8
-#define FRF_AB_GM_ADR_B1_LBN 16
-#define FRF_AB_GM_ADR_B1_WIDTH 8
-#define FRF_AB_GM_ADR_B2_LBN 8
-#define FRF_AB_GM_ADR_B2_WIDTH 8
-#define FRF_AB_GM_ADR_B3_LBN 0
-#define FRF_AB_GM_ADR_B3_WIDTH 8
-
-/* GM_ADR2_REG: GMAC station address register 2 */
-#define FR_AB_GM_ADR2 0x00000f10
-#define FRF_AB_GM_ADR_B4_LBN 24
-#define FRF_AB_GM_ADR_B4_WIDTH 8
-#define FRF_AB_GM_ADR_B5_LBN 16
-#define FRF_AB_GM_ADR_B5_WIDTH 8
-
-/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */
-#define FR_AB_GMF_CFG0 0x00000f20
-#define FRF_AB_GMF_FTFENRPLY_LBN 20
-#define FRF_AB_GMF_FTFENRPLY_WIDTH 1
-#define FRF_AB_GMF_STFENRPLY_LBN 19
-#define FRF_AB_GMF_STFENRPLY_WIDTH 1
-#define FRF_AB_GMF_FRFENRPLY_LBN 18
-#define FRF_AB_GMF_FRFENRPLY_WIDTH 1
-#define FRF_AB_GMF_SRFENRPLY_LBN 17
-#define FRF_AB_GMF_SRFENRPLY_WIDTH 1
-#define FRF_AB_GMF_WTMENRPLY_LBN 16
-#define FRF_AB_GMF_WTMENRPLY_WIDTH 1
-#define FRF_AB_GMF_FTFENREQ_LBN 12
-#define FRF_AB_GMF_FTFENREQ_WIDTH 1
-#define FRF_AB_GMF_STFENREQ_LBN 11
-#define FRF_AB_GMF_STFENREQ_WIDTH 1
-#define FRF_AB_GMF_FRFENREQ_LBN 10
-#define FRF_AB_GMF_FRFENREQ_WIDTH 1
-#define FRF_AB_GMF_SRFENREQ_LBN 9
-#define FRF_AB_GMF_SRFENREQ_WIDTH 1
-#define FRF_AB_GMF_WTMENREQ_LBN 8
-#define FRF_AB_GMF_WTMENREQ_WIDTH 1
-#define FRF_AB_GMF_HSTRSTFT_LBN 4
-#define FRF_AB_GMF_HSTRSTFT_WIDTH 1
-#define FRF_AB_GMF_HSTRSTST_LBN 3
-#define FRF_AB_GMF_HSTRSTST_WIDTH 1
-#define FRF_AB_GMF_HSTRSTFR_LBN 2
-#define FRF_AB_GMF_HSTRSTFR_WIDTH 1
-#define FRF_AB_GMF_HSTRSTSR_LBN 1
-#define FRF_AB_GMF_HSTRSTSR_WIDTH 1
-#define FRF_AB_GMF_HSTRSTWT_LBN 0
-#define FRF_AB_GMF_HSTRSTWT_WIDTH 1
-
-/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */
-#define FR_AB_GMF_CFG1 0x00000f30
-#define FRF_AB_GMF_CFGFRTH_LBN 16
-#define FRF_AB_GMF_CFGFRTH_WIDTH 5
-#define FRF_AB_GMF_CFGXOFFRTX_LBN 0
-#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
-
-/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */
-#define FR_AB_GMF_CFG2 0x00000f40
-#define FRF_AB_GMF_CFGHWM_LBN 16
-#define FRF_AB_GMF_CFGHWM_WIDTH 6
-#define FRF_AB_GMF_CFGLWM_LBN 0
-#define FRF_AB_GMF_CFGLWM_WIDTH 6
-
-/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */
-#define FR_AB_GMF_CFG3 0x00000f50
-#define FRF_AB_GMF_CFGHWMFT_LBN 16
-#define FRF_AB_GMF_CFGHWMFT_WIDTH 6
-#define FRF_AB_GMF_CFGFTTH_LBN 0
-#define FRF_AB_GMF_CFGFTTH_WIDTH 6
-
-/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
-#define FR_AB_GMF_CFG4 0x00000f60
-#define FRF_AB_GMF_HSTFLTRFRM_LBN 0
-#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
-
-/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
-#define FR_AB_GMF_CFG5 0x00000f70
-#define FRF_AB_GMF_CFGHDPLX_LBN 22
-#define FRF_AB_GMF_CFGHDPLX_WIDTH 1
-#define FRF_AB_GMF_SRFULL_LBN 21
-#define FRF_AB_GMF_SRFULL_WIDTH 1
-#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20
-#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
-#define FRF_AB_GMF_CFGBYTMODE_LBN 19
-#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1
-#define FRF_AB_GMF_HSTDRPLT64_LBN 18
-#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1
-#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
-#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
-
-/* TX_SRC_MAC_TBL: Transmit IP source address filter table */
-#define FR_BB_TX_SRC_MAC_TBL 0x00001000
-#define FR_BB_TX_SRC_MAC_TBL_STEP 16
-#define FR_BB_TX_SRC_MAC_TBL_ROWS 16
-#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
-#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
-#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
-#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
-
-/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */
-#define FR_BB_TX_SRC_MAC_CTL 0x00001100
-#define FRF_BB_TX_SRC_DROP_CTR_LBN 16
-#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
-#define FRF_BB_TX_SRC_FLTR_EN_LBN 15
-#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
-#define FRF_BB_TX_DROP_CTR_CLR_LBN 12
-#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
-#define FRF_BB_TX_MAC_QID_SEL_LBN 0
-#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3
-
-/* XM_ADR_LO_REG: XGMAC address register low */
-#define FR_AB_XM_ADR_LO 0x00001200
-#define FRF_AB_XM_ADR_LO_LBN 0
-#define FRF_AB_XM_ADR_LO_WIDTH 32
-
-/* XM_ADR_HI_REG: XGMAC address register high */
-#define FR_AB_XM_ADR_HI 0x00001210
-#define FRF_AB_XM_ADR_HI_LBN 0
-#define FRF_AB_XM_ADR_HI_WIDTH 16
-
-/* XM_GLB_CFG_REG: XGMAC global configuration */
-#define FR_AB_XM_GLB_CFG 0x00001220
-#define FRF_AB_XM_RMTFLT_GEN_LBN 17
-#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1
-#define FRF_AB_XM_DEBUG_MODE_LBN 16
-#define FRF_AB_XM_DEBUG_MODE_WIDTH 1
-#define FRF_AB_XM_RX_STAT_EN_LBN 11
-#define FRF_AB_XM_RX_STAT_EN_WIDTH 1
-#define FRF_AB_XM_TX_STAT_EN_LBN 10
-#define FRF_AB_XM_TX_STAT_EN_WIDTH 1
-#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6
-#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
-#define FRF_AB_XM_WAN_MODE_LBN 5
-#define FRF_AB_XM_WAN_MODE_WIDTH 1
-#define FRF_AB_XM_INTCLR_MODE_LBN 3
-#define FRF_AB_XM_INTCLR_MODE_WIDTH 1
-#define FRF_AB_XM_CORE_RST_LBN 0
-#define FRF_AB_XM_CORE_RST_WIDTH 1
-
-/* XM_TX_CFG_REG: XGMAC transmit configuration */
-#define FR_AB_XM_TX_CFG 0x00001230
-#define FRF_AB_XM_TX_PROG_LBN 24
-#define FRF_AB_XM_TX_PROG_WIDTH 1
-#define FRF_AB_XM_IPG_LBN 16
-#define FRF_AB_XM_IPG_WIDTH 4
-#define FRF_AB_XM_FCNTL_LBN 10
-#define FRF_AB_XM_FCNTL_WIDTH 1
-#define FRF_AB_XM_TXCRC_LBN 8
-#define FRF_AB_XM_TXCRC_WIDTH 1
-#define FRF_AB_XM_EDRC_LBN 6
-#define FRF_AB_XM_EDRC_WIDTH 1
-#define FRF_AB_XM_AUTO_PAD_LBN 5
-#define FRF_AB_XM_AUTO_PAD_WIDTH 1
-#define FRF_AB_XM_TX_PRMBL_LBN 2
-#define FRF_AB_XM_TX_PRMBL_WIDTH 1
-#define FRF_AB_XM_TXEN_LBN 1
-#define FRF_AB_XM_TXEN_WIDTH 1
-#define FRF_AB_XM_TX_RST_LBN 0
-#define FRF_AB_XM_TX_RST_WIDTH 1
-
-/* XM_RX_CFG_REG: XGMAC receive configuration */
-#define FR_AB_XM_RX_CFG 0x00001240
-#define FRF_AB_XM_PASS_LENERR_LBN 26
-#define FRF_AB_XM_PASS_LENERR_WIDTH 1
-#define FRF_AB_XM_PASS_CRC_ERR_LBN 25
-#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
-#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
-#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
-#define FRF_AB_XM_REJ_BCAST_LBN 20
-#define FRF_AB_XM_REJ_BCAST_WIDTH 1
-#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
-#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
-#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
-#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
-#define FRF_AB_XM_AUTO_DEPAD_LBN 8
-#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1
-#define FRF_AB_XM_RXCRC_LBN 3
-#define FRF_AB_XM_RXCRC_WIDTH 1
-#define FRF_AB_XM_RX_PRMBL_LBN 2
-#define FRF_AB_XM_RX_PRMBL_WIDTH 1
-#define FRF_AB_XM_RXEN_LBN 1
-#define FRF_AB_XM_RXEN_WIDTH 1
-#define FRF_AB_XM_RX_RST_LBN 0
-#define FRF_AB_XM_RX_RST_WIDTH 1
-
-/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */
-#define FR_AB_XM_MGT_INT_MASK 0x00001250
-#define FRF_AB_XM_MSK_STA_INTR_LBN 16
-#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1
-#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
-#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
-#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
-#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
-#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
-#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
-#define FRF_AB_XM_MSK_RMTFLT_LBN 1
-#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1
-#define FRF_AB_XM_MSK_LCLFLT_LBN 0
-#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1
-
-/* XM_FC_REG: XGMAC flow control register */
-#define FR_AB_XM_FC 0x00001270
-#define FRF_AB_XM_PAUSE_TIME_LBN 16
-#define FRF_AB_XM_PAUSE_TIME_WIDTH 16
-#define FRF_AB_XM_RX_MAC_STAT_LBN 11
-#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1
-#define FRF_AB_XM_TX_MAC_STAT_LBN 10
-#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1
-#define FRF_AB_XM_MCNTL_PASS_LBN 8
-#define FRF_AB_XM_MCNTL_PASS_WIDTH 2
-#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
-#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
-#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
-#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
-#define FRF_AB_XM_ZPAUSE_LBN 2
-#define FRF_AB_XM_ZPAUSE_WIDTH 1
-#define FRF_AB_XM_XMIT_PAUSE_LBN 1
-#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1
-#define FRF_AB_XM_DIS_FCNTL_LBN 0
-#define FRF_AB_XM_DIS_FCNTL_WIDTH 1
-
-/* XM_PAUSE_TIME_REG: XGMAC pause time register */
-#define FR_AB_XM_PAUSE_TIME 0x00001290
-#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16
-#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
-#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0
-#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
-
-/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
-#define FR_AB_XM_TX_PARAM 0x000012d0
-#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31
-#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
-#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
-#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
-#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
-#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
-#define FRF_AB_XM_PAD_CHAR_LBN 0
-#define FRF_AB_XM_PAD_CHAR_WIDTH 8
-
-/* XM_RX_PARAM_REG: XGMAC receive parameter register */
-#define FR_AB_XM_RX_PARAM 0x000012e0
-#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
-#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
-#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
-#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
-
-/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */
-#define FR_AB_XM_MGT_INT_MSK 0x000012f0
-#define FRF_AB_XM_STAT_CNTR_OF_LBN 9
-#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
-#define FRF_AB_XM_STAT_CNTR_HF_LBN 8
-#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
-#define FRF_AB_XM_PRMBLE_ERR_LBN 2
-#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1
-#define FRF_AB_XM_RMTFLT_LBN 1
-#define FRF_AB_XM_RMTFLT_WIDTH 1
-#define FRF_AB_XM_LCLFLT_LBN 0
-#define FRF_AB_XM_LCLFLT_WIDTH 1
-
-/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */
-#define FR_AB_XX_PWR_RST 0x00001300
-#define FRF_AB_XX_PWRDND_SIG_LBN 31
-#define FRF_AB_XX_PWRDND_SIG_WIDTH 1
-#define FRF_AB_XX_PWRDNC_SIG_LBN 30
-#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1
-#define FRF_AB_XX_PWRDNB_SIG_LBN 29
-#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1
-#define FRF_AB_XX_PWRDNA_SIG_LBN 28
-#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1
-#define FRF_AB_XX_SIM_MODE_LBN 27
-#define FRF_AB_XX_SIM_MODE_WIDTH 1
-#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25
-#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
-#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24
-#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
-#define FRF_AB_XX_RESETD_SIG_LBN 23
-#define FRF_AB_XX_RESETD_SIG_WIDTH 1
-#define FRF_AB_XX_RESETC_SIG_LBN 22
-#define FRF_AB_XX_RESETC_SIG_WIDTH 1
-#define FRF_AB_XX_RESETB_SIG_LBN 21
-#define FRF_AB_XX_RESETB_SIG_WIDTH 1
-#define FRF_AB_XX_RESETA_SIG_LBN 20
-#define FRF_AB_XX_RESETA_SIG_WIDTH 1
-#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
-#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
-#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
-#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
-#define FRF_AB_XX_SD_RST_ACT_LBN 16
-#define FRF_AB_XX_SD_RST_ACT_WIDTH 1
-#define FRF_AB_XX_PWRDND_EN_LBN 15
-#define FRF_AB_XX_PWRDND_EN_WIDTH 1
-#define FRF_AB_XX_PWRDNC_EN_LBN 14
-#define FRF_AB_XX_PWRDNC_EN_WIDTH 1
-#define FRF_AB_XX_PWRDNB_EN_LBN 13
-#define FRF_AB_XX_PWRDNB_EN_WIDTH 1
-#define FRF_AB_XX_PWRDNA_EN_LBN 12
-#define FRF_AB_XX_PWRDNA_EN_WIDTH 1
-#define FRF_AB_XX_RSTPLLCD_EN_LBN 9
-#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
-#define FRF_AB_XX_RSTPLLAB_EN_LBN 8
-#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
-#define FRF_AB_XX_RESETD_EN_LBN 7
-#define FRF_AB_XX_RESETD_EN_WIDTH 1
-#define FRF_AB_XX_RESETC_EN_LBN 6
-#define FRF_AB_XX_RESETC_EN_WIDTH 1
-#define FRF_AB_XX_RESETB_EN_LBN 5
-#define FRF_AB_XX_RESETB_EN_WIDTH 1
-#define FRF_AB_XX_RESETA_EN_LBN 4
-#define FRF_AB_XX_RESETA_EN_WIDTH 1
-#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2
-#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
-#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1
-#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
-#define FRF_AB_XX_RST_XX_EN_LBN 0
-#define FRF_AB_XX_RST_XX_EN_WIDTH 1
-
-/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */
-#define FR_AB_XX_SD_CTL 0x00001310
-#define FRF_AB_XX_TERMADJ1_LBN 17
-#define FRF_AB_XX_TERMADJ1_WIDTH 1
-#define FRF_AB_XX_TERMADJ0_LBN 16
-#define FRF_AB_XX_TERMADJ0_WIDTH 1
-#define FRF_AB_XX_HIDRVD_LBN 15
-#define FRF_AB_XX_HIDRVD_WIDTH 1
-#define FRF_AB_XX_LODRVD_LBN 14
-#define FRF_AB_XX_LODRVD_WIDTH 1
-#define FRF_AB_XX_HIDRVC_LBN 13
-#define FRF_AB_XX_HIDRVC_WIDTH 1
-#define FRF_AB_XX_LODRVC_LBN 12
-#define FRF_AB_XX_LODRVC_WIDTH 1
-#define FRF_AB_XX_HIDRVB_LBN 11
-#define FRF_AB_XX_HIDRVB_WIDTH 1
-#define FRF_AB_XX_LODRVB_LBN 10
-#define FRF_AB_XX_LODRVB_WIDTH 1
-#define FRF_AB_XX_HIDRVA_LBN 9
-#define FRF_AB_XX_HIDRVA_WIDTH 1
-#define FRF_AB_XX_LODRVA_LBN 8
-#define FRF_AB_XX_LODRVA_WIDTH 1
-#define FRF_AB_XX_LPBKD_LBN 3
-#define FRF_AB_XX_LPBKD_WIDTH 1
-#define FRF_AB_XX_LPBKC_LBN 2
-#define FRF_AB_XX_LPBKC_WIDTH 1
-#define FRF_AB_XX_LPBKB_LBN 1
-#define FRF_AB_XX_LPBKB_WIDTH 1
-#define FRF_AB_XX_LPBKA_LBN 0
-#define FRF_AB_XX_LPBKA_WIDTH 1
-
-/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
-#define FR_AB_XX_TXDRV_CTL 0x00001320
-#define FRF_AB_XX_DEQD_LBN 28
-#define FRF_AB_XX_DEQD_WIDTH 4
-#define FRF_AB_XX_DEQC_LBN 24
-#define FRF_AB_XX_DEQC_WIDTH 4
-#define FRF_AB_XX_DEQB_LBN 20
-#define FRF_AB_XX_DEQB_WIDTH 4
-#define FRF_AB_XX_DEQA_LBN 16
-#define FRF_AB_XX_DEQA_WIDTH 4
-#define FRF_AB_XX_DTXD_LBN 12
-#define FRF_AB_XX_DTXD_WIDTH 4
-#define FRF_AB_XX_DTXC_LBN 8
-#define FRF_AB_XX_DTXC_WIDTH 4
-#define FRF_AB_XX_DTXB_LBN 4
-#define FRF_AB_XX_DTXB_WIDTH 4
-#define FRF_AB_XX_DTXA_LBN 0
-#define FRF_AB_XX_DTXA_WIDTH 4
-
-/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */
-#define FR_AB_XX_PRBS_CTL 0x00001330
-#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
-#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
-#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
-#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
-#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
-#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
-#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
-#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
-#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
-#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
-#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
-#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
-#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
-#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
-#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
-#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
-#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
-#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
-#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
-#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
-#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
-#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
-#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
-#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
-#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
-#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
-#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
-#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
-#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
-#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
-#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
-#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
-#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
-#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
-#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
-#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
-#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
-#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
-#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
-#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
-#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
-#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
-#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
-#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
-#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
-#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
-#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
-#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
-
-/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */
-#define FR_AB_XX_PRBS_CHK 0x00001340
-#define FRF_AB_XX_REV_LB_EN_LBN 16
-#define FRF_AB_XX_REV_LB_EN_WIDTH 1
-#define FRF_AB_XX_CH3_DEG_DET_LBN 15
-#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1
-#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
-#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
-#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
-#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
-#define FRF_AB_XX_CH3_ERR_CHK_LBN 12
-#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
-#define FRF_AB_XX_CH2_DEG_DET_LBN 11
-#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1
-#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
-#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
-#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
-#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
-#define FRF_AB_XX_CH2_ERR_CHK_LBN 8
-#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
-#define FRF_AB_XX_CH1_DEG_DET_LBN 7
-#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1
-#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
-#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
-#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
-#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
-#define FRF_AB_XX_CH1_ERR_CHK_LBN 4
-#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
-#define FRF_AB_XX_CH0_DEG_DET_LBN 3
-#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1
-#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
-#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
-#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
-#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
-#define FRF_AB_XX_CH0_ERR_CHK_LBN 0
-#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
-
-/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */
-#define FR_AB_XX_PRBS_ERR 0x00001350
-#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
-#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
-#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
-#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
-#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
-#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
-#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
-#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
-
-/* XX_CORE_STAT_REG: XAUI XGXS core status register */
-#define FR_AB_XX_CORE_STAT 0x00001360
-#define FRF_AB_XX_FORCE_SIG3_LBN 31
-#define FRF_AB_XX_FORCE_SIG3_WIDTH 1
-#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
-#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
-#define FRF_AB_XX_FORCE_SIG2_LBN 29
-#define FRF_AB_XX_FORCE_SIG2_WIDTH 1
-#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
-#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
-#define FRF_AB_XX_FORCE_SIG1_LBN 27
-#define FRF_AB_XX_FORCE_SIG1_WIDTH 1
-#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
-#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
-#define FRF_AB_XX_FORCE_SIG0_LBN 25
-#define FRF_AB_XX_FORCE_SIG0_WIDTH 1
-#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
-#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
-#define FRF_AB_XX_XGXS_LB_EN_LBN 23
-#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1
-#define FRF_AB_XX_XGMII_LB_EN_LBN 22
-#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1
-#define FRF_AB_XX_MATCH_FAULT_LBN 21
-#define FRF_AB_XX_MATCH_FAULT_WIDTH 1
-#define FRF_AB_XX_ALIGN_DONE_LBN 20
-#define FRF_AB_XX_ALIGN_DONE_WIDTH 1
-#define FRF_AB_XX_SYNC_STAT3_LBN 19
-#define FRF_AB_XX_SYNC_STAT3_WIDTH 1
-#define FRF_AB_XX_SYNC_STAT2_LBN 18
-#define FRF_AB_XX_SYNC_STAT2_WIDTH 1
-#define FRF_AB_XX_SYNC_STAT1_LBN 17
-#define FRF_AB_XX_SYNC_STAT1_WIDTH 1
-#define FRF_AB_XX_SYNC_STAT0_LBN 16
-#define FRF_AB_XX_SYNC_STAT0_WIDTH 1
-#define FRF_AB_XX_COMMA_DET_CH3_LBN 15
-#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
-#define FRF_AB_XX_COMMA_DET_CH2_LBN 14
-#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
-#define FRF_AB_XX_COMMA_DET_CH1_LBN 13
-#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
-#define FRF_AB_XX_COMMA_DET_CH0_LBN 12
-#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
-#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
-#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
-#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
-#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
-#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
-#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
-#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
-#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
-#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7
-#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
-#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6
-#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
-#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5
-#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
-#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4
-#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
-#define FRF_AB_XX_DISPERR_CH3_LBN 3
-#define FRF_AB_XX_DISPERR_CH3_WIDTH 1
-#define FRF_AB_XX_DISPERR_CH2_LBN 2
-#define FRF_AB_XX_DISPERR_CH2_WIDTH 1
-#define FRF_AB_XX_DISPERR_CH1_LBN 1
-#define FRF_AB_XX_DISPERR_CH1_WIDTH 1
-#define FRF_AB_XX_DISPERR_CH0_LBN 0
-#define FRF_AB_XX_DISPERR_CH0_WIDTH 1
-
-/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */
-#define FR_AA_RX_DESC_PTR_TBL_KER 0x00011800
-#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
-#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
-/* RX_DESC_PTR_TBL: Receive descriptor pointer table */
-#define FR_BZ_RX_DESC_PTR_TBL 0x00f40000
-#define FR_BZ_RX_DESC_PTR_TBL_STEP 16
-#define FR_BB_RX_DESC_PTR_TBL_ROWS 4096
-#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
-#define FRF_CZ_RX_HDR_SPLIT_LBN 90
-#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1
-#define FRF_AA_RX_RESET_LBN 89
-#define FRF_AA_RX_RESET_WIDTH 1
-#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
-#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
-#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
-#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
-#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86
-#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
-#define FRF_AZ_RX_DC_HW_RPTR_LBN 80
-#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
-#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
-#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
-#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
-#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
-#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
-#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
-#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
-#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
-#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
-#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
-#define FRF_AZ_RX_DESCQ_LABEL_LBN 5
-#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
-#define FRF_AZ_RX_DESCQ_SIZE_LBN 3
-#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
-#define FFE_AZ_RX_DESCQ_SIZE_4K 3
-#define FFE_AZ_RX_DESCQ_SIZE_2K 2
-#define FFE_AZ_RX_DESCQ_SIZE_1K 1
-#define FFE_AZ_RX_DESCQ_SIZE_512 0
-#define FRF_AZ_RX_DESCQ_TYPE_LBN 2
-#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
-#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1
-#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
-#define FRF_AZ_RX_DESCQ_EN_LBN 0
-#define FRF_AZ_RX_DESCQ_EN_WIDTH 1
-
-/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */
-#define FR_AA_TX_DESC_PTR_TBL_KER 0x00011900
-#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
-#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
-/* TX_DESC_PTR_TBL: Transmit descriptor pointer */
-#define FR_BZ_TX_DESC_PTR_TBL 0x00f50000
-#define FR_BZ_TX_DESC_PTR_TBL_STEP 16
-#define FR_BB_TX_DESC_PTR_TBL_ROWS 4096
-#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
-#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
-#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
-#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
-#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
-#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
-#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
-#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
-#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
-#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
-#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
-#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
-#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
-#define FRF_AZ_TX_DESCQ_EN_LBN 88
-#define FRF_AZ_TX_DESCQ_EN_WIDTH 1
-#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
-#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
-#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
-#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
-#define FRF_AZ_TX_DC_HW_RPTR_LBN 80
-#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
-#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
-#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
-#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
-#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
-#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
-#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
-#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
-#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
-#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
-#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
-#define FRF_AZ_TX_DESCQ_LABEL_LBN 5
-#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
-#define FRF_AZ_TX_DESCQ_SIZE_LBN 3
-#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
-#define FFE_AZ_TX_DESCQ_SIZE_4K 3
-#define FFE_AZ_TX_DESCQ_SIZE_2K 2
-#define FFE_AZ_TX_DESCQ_SIZE_1K 1
-#define FFE_AZ_TX_DESCQ_SIZE_512 0
-#define FRF_AZ_TX_DESCQ_TYPE_LBN 1
-#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
-#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0
-#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
-
-/* EVQ_PTR_TBL_KER: Event queue pointer table */
-#define FR_AA_EVQ_PTR_TBL_KER 0x00011a00
-#define FR_AA_EVQ_PTR_TBL_KER_STEP 16
-#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4
-/* EVQ_PTR_TBL: Event queue pointer table */
-#define FR_BZ_EVQ_PTR_TBL 0x00f60000
-#define FR_BZ_EVQ_PTR_TBL_STEP 16
-#define FR_CZ_EVQ_PTR_TBL_ROWS 1024
-#define FR_BB_EVQ_PTR_TBL_ROWS 4096
-#define FRF_BZ_EVQ_RPTR_IGN_LBN 40
-#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
-#define FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39
-#define FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1
-#define FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39
-#define FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1
-#define FRF_AZ_EVQ_NXT_WPTR_LBN 24
-#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
-#define FRF_AZ_EVQ_EN_LBN 23
-#define FRF_AZ_EVQ_EN_WIDTH 1
-#define FRF_AZ_EVQ_SIZE_LBN 20
-#define FRF_AZ_EVQ_SIZE_WIDTH 3
-#define FFE_AZ_EVQ_SIZE_32K 6
-#define FFE_AZ_EVQ_SIZE_16K 5
-#define FFE_AZ_EVQ_SIZE_8K 4
-#define FFE_AZ_EVQ_SIZE_4K 3
-#define FFE_AZ_EVQ_SIZE_2K 2
-#define FFE_AZ_EVQ_SIZE_1K 1
-#define FFE_AZ_EVQ_SIZE_512 0
-#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
-#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
-
-/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */
-#define FR_AA_BUF_HALF_TBL_KER 0x00018000
-#define FR_AA_BUF_HALF_TBL_KER_STEP 8
-#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096
-/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */
-#define FR_BZ_BUF_HALF_TBL 0x00800000
-#define FR_BZ_BUF_HALF_TBL_STEP 8
-#define FR_CZ_BUF_HALF_TBL_ROWS 147456
-#define FR_BB_BUF_HALF_TBL_ROWS 524288
-#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
-#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
-#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
-#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
-#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
-#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
-#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
-#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
-
-/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */
-#define FR_AA_BUF_FULL_TBL_KER 0x00018000
-#define FR_AA_BUF_FULL_TBL_KER_STEP 8
-#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096
-/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */
-#define FR_BZ_BUF_FULL_TBL 0x00800000
-#define FR_BZ_BUF_FULL_TBL_STEP 8
-#define FR_CZ_BUF_FULL_TBL_ROWS 147456
-#define FR_BB_BUF_FULL_TBL_ROWS 917504
-#define FRF_AZ_BUF_FULL_UNUSED_LBN 51
-#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
-#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
-#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
-#define FRF_AZ_BUF_ADR_REGION_LBN 48
-#define FRF_AZ_BUF_ADR_REGION_WIDTH 2
-#define FFE_AZ_BUF_ADR_REGN3 3
-#define FFE_AZ_BUF_ADR_REGN2 2
-#define FFE_AZ_BUF_ADR_REGN1 1
-#define FFE_AZ_BUF_ADR_REGN0 0
-#define FRF_AZ_BUF_ADR_FBUF_LBN 14
-#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34
-#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
-#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
-
-/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */
-#define FR_BZ_RX_FILTER_TBL0 0x00f00000
-#define FR_BZ_RX_FILTER_TBL0_STEP 32
-#define FR_BZ_RX_FILTER_TBL0_ROWS 8192
-/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */
-#define FR_BB_RX_FILTER_TBL1 0x00f00010
-#define FR_BB_RX_FILTER_TBL1_STEP 32
-#define FR_BB_RX_FILTER_TBL1_ROWS 8192
-#define FRF_BZ_RSS_EN_LBN 110
-#define FRF_BZ_RSS_EN_WIDTH 1
-#define FRF_BZ_SCATTER_EN_LBN 109
-#define FRF_BZ_SCATTER_EN_WIDTH 1
-#define FRF_BZ_TCP_UDP_LBN 108
-#define FRF_BZ_TCP_UDP_WIDTH 1
-#define FRF_BZ_RXQ_ID_LBN 96
-#define FRF_BZ_RXQ_ID_WIDTH 12
-#define FRF_BZ_DEST_IP_LBN 64
-#define FRF_BZ_DEST_IP_WIDTH 32
-#define FRF_BZ_DEST_PORT_TCP_LBN 48
-#define FRF_BZ_DEST_PORT_TCP_WIDTH 16
-#define FRF_BZ_SRC_IP_LBN 16
-#define FRF_BZ_SRC_IP_WIDTH 32
-#define FRF_BZ_SRC_TCP_DEST_UDP_LBN 0
-#define FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16
-
-/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */
-#define FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010
-#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
-#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
-#define FRF_CZ_RMFT_RSS_EN_LBN 75
-#define FRF_CZ_RMFT_RSS_EN_WIDTH 1
-#define FRF_CZ_RMFT_SCATTER_EN_LBN 74
-#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
-#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
-#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
-#define FRF_CZ_RMFT_RXQ_ID_LBN 61
-#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
-#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
-#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
-#define FRF_CZ_RMFT_DEST_MAC_LBN 12
-#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48
-#define FRF_CZ_RMFT_VLAN_ID_LBN 0
-#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
-
-/* TIMER_TBL: Timer table */
-#define FR_BZ_TIMER_TBL 0x00f70000
-#define FR_BZ_TIMER_TBL_STEP 16
-#define FR_CZ_TIMER_TBL_ROWS 1024
-#define FR_BB_TIMER_TBL_ROWS 4096
-#define FRF_CZ_TIMER_Q_EN_LBN 33
-#define FRF_CZ_TIMER_Q_EN_WIDTH 1
-#define FRF_CZ_INT_ARMD_LBN 32
-#define FRF_CZ_INT_ARMD_WIDTH 1
-#define FRF_CZ_INT_PEND_LBN 31
-#define FRF_CZ_INT_PEND_WIDTH 1
-#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30
-#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
-#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16
-#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
-#define FRF_CZ_TIMER_MODE_LBN 14
-#define FRF_CZ_TIMER_MODE_WIDTH 2
-#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3
-#define FFE_CZ_TIMER_MODE_TRIG_START 2
-#define FFE_CZ_TIMER_MODE_IMMED_START 1
-#define FFE_CZ_TIMER_MODE_DIS 0
-#define FRF_BB_TIMER_MODE_LBN 12
-#define FRF_BB_TIMER_MODE_WIDTH 2
-#define FFE_BB_TIMER_MODE_INT_HLDOFF 2
-#define FFE_BB_TIMER_MODE_TRIG_START 2
-#define FFE_BB_TIMER_MODE_IMMED_START 1
-#define FFE_BB_TIMER_MODE_DIS 0
-#define FRF_CZ_TIMER_VAL_LBN 0
-#define FRF_CZ_TIMER_VAL_WIDTH 14
-#define FRF_BB_TIMER_VAL_LBN 0
-#define FRF_BB_TIMER_VAL_WIDTH 12
-
-/* TX_PACE_TBL: Transmit pacing table */
-#define FR_BZ_TX_PACE_TBL 0x00f80000
-#define FR_BZ_TX_PACE_TBL_STEP 16
-#define FR_CZ_TX_PACE_TBL_ROWS 1024
-#define FR_BB_TX_PACE_TBL_ROWS 4096
-#define FRF_BZ_TX_PACE_LBN 0
-#define FRF_BZ_TX_PACE_WIDTH 5
-
-/* RX_INDIRECTION_TBL: RX Indirection Table */
-#define FR_BZ_RX_INDIRECTION_TBL 0x00fb0000
-#define FR_BZ_RX_INDIRECTION_TBL_STEP 16
-#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128
-#define FRF_BZ_IT_QUEUE_LBN 0
-#define FRF_BZ_IT_QUEUE_WIDTH 6
-
-/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */
-#define FR_CZ_TX_FILTER_TBL0 0x00fc0000
-#define FR_CZ_TX_FILTER_TBL0_STEP 16
-#define FR_CZ_TX_FILTER_TBL0_ROWS 8192
-#define FRF_CZ_TIFT_TCP_UDP_LBN 108
-#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1
-#define FRF_CZ_TIFT_TXQ_ID_LBN 96
-#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12
-#define FRF_CZ_TIFT_DEST_IP_LBN 64
-#define FRF_CZ_TIFT_DEST_IP_WIDTH 32
-#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
-#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
-#define FRF_CZ_TIFT_SRC_IP_LBN 16
-#define FRF_CZ_TIFT_SRC_IP_WIDTH 32
-#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
-#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
-
-/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */
-#define FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000
-#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
-#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
-#define FRF_CZ_TMFT_TXQ_ID_LBN 61
-#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
-#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
-#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
-#define FRF_CZ_TMFT_SRC_MAC_LBN 12
-#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48
-#define FRF_CZ_TMFT_VLAN_ID_LBN 0
-#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
-
-/* MC_TREG_SMEM: MC Shared Memory */
-#define FR_CZ_MC_TREG_SMEM 0x00ff0000
-#define FR_CZ_MC_TREG_SMEM_STEP 4
-#define FR_CZ_MC_TREG_SMEM_ROWS 512
-#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
-#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
-
-/* MSIX_VECTOR_TABLE: MSIX Vector Table */
-#define FR_BB_MSIX_VECTOR_TABLE 0x00ff0000
-#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16
-#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64
-/* MSIX_VECTOR_TABLE: MSIX Vector Table */
-#define FR_CZ_MSIX_VECTOR_TABLE 0x00000000
-/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
-#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
-#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
-#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
-#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96
-#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
-#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
-#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
-#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
-#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
-#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
-#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
-
-/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
-#define FR_BB_MSIX_PBA_TABLE 0x00ff2000
-#define FR_BZ_MSIX_PBA_TABLE_STEP 4
-#define FR_BB_MSIX_PBA_TABLE_ROWS 2
-/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
-#define FR_CZ_MSIX_PBA_TABLE 0x00008000
-/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
-#define FR_CZ_MSIX_PBA_TABLE_ROWS 32
-#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
-#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
-
-/* SRM_DBG_REG: SRAM debug access */
-#define FR_BZ_SRM_DBG 0x03000000
-#define FR_BZ_SRM_DBG_STEP 8
-#define FR_CZ_SRM_DBG_ROWS 262144
-#define FR_BB_SRM_DBG_ROWS 2097152
-#define FRF_BZ_SRM_DBG_LBN 0
-#define FRF_BZ_SRM_DBG_WIDTH 64
-
-/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */
-#define FR_CZ_TB_MSIX_PBA_TABLE 0x00008000
-#define FR_CZ_TB_MSIX_PBA_TABLE_STEP 4
-#define FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024
-#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0
-#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32
-
-/* DRIVER_EV */
-#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
-#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
-#define FSE_BZ_TX_DSC_ERROR_EV 15
-#define FSE_BZ_RX_DSC_ERROR_EV 14
-#define FSE_AA_RX_RECOVER_EV 11
-#define FSE_AZ_TIMER_EV 10
-#define FSE_AZ_TX_PKT_NON_TCP_UDP 9
-#define FSE_AZ_WAKE_UP_EV 6
-#define FSE_AZ_SRM_UPD_DONE_EV 5
-#define FSE_AB_EVQ_NOT_EN_EV 3
-#define FSE_AZ_EVQ_INIT_DONE_EV 2
-#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
-#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
-#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
-#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
-
-/* EVENT_ENTRY */
-#define FSF_AZ_EV_CODE_LBN 60
-#define FSF_AZ_EV_CODE_WIDTH 4
-#define FSE_CZ_EV_CODE_MCDI_EV 12
-#define FSE_CZ_EV_CODE_USER_EV 8
-#define FSE_AZ_EV_CODE_DRV_GEN_EV 7
-#define FSE_AZ_EV_CODE_GLOBAL_EV 6
-#define FSE_AZ_EV_CODE_DRIVER_EV 5
-#define FSE_AZ_EV_CODE_TX_EV 2
-#define FSE_AZ_EV_CODE_RX_EV 0
-#define FSF_AZ_EV_DATA_LBN 0
-#define FSF_AZ_EV_DATA_WIDTH 60
-
-/* GLOBAL_EV */
-#define FSF_BB_GLB_EV_RX_RECOVERY_LBN 12
-#define FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1
-#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 11
-#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
-#define FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11
-#define FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1
-#define FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10
-#define FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1
-#define FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9
-#define FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1
-#define FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7
-#define FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1
-
-/* LEGACY_INT_VEC */
-#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
-#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
-#define FSF_AZ_NET_IVEC_INT_Q_LBN 40
-#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
-#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
-#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
-#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
-#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
-#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
-#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
-
-/* MC_XGMAC_FLTR_RULE_DEF */
-#define FSF_CZ_MC_XFRC_MODE_LBN 416
-#define FSF_CZ_MC_XFRC_MODE_WIDTH 1
-#define FSE_CZ_MC_XFRC_MODE_LAYERED 1
-#define FSE_CZ_MC_XFRC_MODE_SIMPLE 0
-#define FSF_CZ_MC_XFRC_HASH_LBN 384
-#define FSF_CZ_MC_XFRC_HASH_WIDTH 32
-#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256
-#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128
-#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128
-#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128
-#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0
-#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128
-
-/* RX_EV */
-#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
-#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
-#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57
-#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
-#define FSF_AZ_RX_EV_PKT_OK_LBN 56
-#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1
-#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
-#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
-#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
-#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
-#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
-#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
-#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
-#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
-#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
-#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
-#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
-#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
-#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
-#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
-#define FSF_AA_RX_EV_DRIB_NIB_LBN 49
-#define FSF_AA_RX_EV_DRIB_NIB_WIDTH 1
-#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47
-#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
-#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44
-#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
-#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
-#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
-#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
-#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
-#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1
-#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0
-#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42
-#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
-#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
-#define FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2
-#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
-#define FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1
-#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
-#define FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0
-#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
-#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
-#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
-#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
-#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
-#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39
-#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
-#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
-#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
-#define FSF_AZ_RX_EV_Q_LABEL_LBN 32
-#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
-#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
-#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
-#define FSF_AZ_RX_EV_PORT_LBN 30
-#define FSF_AZ_RX_EV_PORT_WIDTH 1
-#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16
-#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
-#define FSF_AZ_RX_EV_SOP_LBN 15
-#define FSF_AZ_RX_EV_SOP_WIDTH 1
-#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
-#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
-#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
-#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
-#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
-#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
-#define FSF_AZ_RX_EV_DESC_PTR_LBN 0
-#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
-
-/* RX_KER_DESC */
-#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48
-#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
-#define FSF_AZ_RX_KER_BUF_REGION_LBN 46
-#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
-#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0
-#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
-
-/* RX_USER_DESC */
-#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
-#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
-#define FSF_AZ_RX_USER_BUF_ID_LBN 0
-#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20
-
-/* TX_EV */
-#define FSF_AZ_TX_EV_PKT_ERR_LBN 38
-#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
-#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
-#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
-#define FSF_AZ_TX_EV_Q_LABEL_LBN 32
-#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
-#define FSF_AZ_TX_EV_PORT_LBN 16
-#define FSF_AZ_TX_EV_PORT_WIDTH 1
-#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
-#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
-#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
-#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
-#define FSF_AZ_TX_EV_COMP_LBN 12
-#define FSF_AZ_TX_EV_COMP_WIDTH 1
-#define FSF_AZ_TX_EV_DESC_PTR_LBN 0
-#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
-
-/* TX_KER_DESC */
-#define FSF_AZ_TX_KER_CONT_LBN 62
-#define FSF_AZ_TX_KER_CONT_WIDTH 1
-#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
-#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
-#define FSF_AZ_TX_KER_BUF_REGION_LBN 46
-#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
-#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0
-#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
-
-/* TX_USER_DESC */
-#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48
-#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
-#define FSF_AZ_TX_USER_CONT_LBN 46
-#define FSF_AZ_TX_USER_CONT_WIDTH 1
-#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33
-#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
-#define FSF_AZ_TX_USER_BUF_ID_LBN 13
-#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20
-#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0
-#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
-
-/* USER_EV */
-#define FSF_CZ_USER_QID_LBN 32
-#define FSF_CZ_USER_QID_WIDTH 10
-#define FSF_CZ_USER_EV_REG_VALUE_LBN 0
-#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
-
-/**************************************************************************
- *
- * Falcon B0 PCIe core indirect registers
- *
- **************************************************************************
- */
-
-#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68
-
-#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70
-
-#define FPCR_BB_ACK_RPL_TIMER 0x700
-#define FPCRF_BB_ACK_TL_LBN 0
-#define FPCRF_BB_ACK_TL_WIDTH 16
-#define FPCRF_BB_RPL_TL_LBN 16
-#define FPCRF_BB_RPL_TL_WIDTH 16
-
-#define FPCR_BB_ACK_FREQ 0x70C
-#define FPCRF_BB_ACK_FREQ_LBN 0
-#define FPCRF_BB_ACK_FREQ_WIDTH 7
-
-/**************************************************************************
- *
- * Pseudo-registers and fields
- *
- **************************************************************************
- */
-
-/* Interrupt acknowledge work-around register (A0/A1 only) */
-#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070
-
-/* EE_SPI_HCMD_REG: SPI host command register */
-/* Values for the EE_SPI_HCMD_SF_SEL register field */
-#define FFE_AB_SPI_DEVICE_EEPROM 0
-#define FFE_AB_SPI_DEVICE_FLASH 1
-
-/* NIC_STAT_REG: NIC status register */
-#define FRF_AB_STRAP_10G_LBN 2
-#define FRF_AB_STRAP_10G_WIDTH 1
-#define FRF_AA_STRAP_PCIE_LBN 0
-#define FRF_AA_STRAP_PCIE_WIDTH 1
-
-/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
-#define FRF_AZ_FATAL_INTR_LBN 0
-#define FRF_AZ_FATAL_INTR_WIDTH 12
-
-/* SRM_CFG_REG: SRAM configuration register */
-/* We treat the number of SRAM banks and bank size as a single field */
-#define FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN
-#define FRF_AZ_SRM_NB_SZ_WIDTH \
- (FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH)
-#define FFE_AB_SRM_NB1_SZ2M 0
-#define FFE_AB_SRM_NB1_SZ4M 1
-#define FFE_AB_SRM_NB1_SZ8M 2
-#define FFE_AB_SRM_NB_SZ_DEF 3
-#define FFE_AB_SRM_NB2_SZ4M 4
-#define FFE_AB_SRM_NB2_SZ8M 5
-#define FFE_AB_SRM_NB2_SZ16M 6
-#define FFE_AB_SRM_NB_SZ_RES 7
-
-/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
-/* We write just the last dword of these registers */
-#define FR_AZ_RX_DESC_UPD_DWORD_P0 \
- (BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \
- FR_BZ_RX_DESC_UPD_P0 + 3 * 4)
-#define FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32)
-#define FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH
-
-/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
-#define FR_AZ_TX_DESC_UPD_DWORD_P0 \
- (BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \
- FR_BZ_TX_DESC_UPD_P0 + 3 * 4)
-#define FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32)
-#define FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH
-
-/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
-#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12
-#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1
-
-/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
-#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12
-#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
-
-/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
-#define FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN
-#define FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \
- FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH)
-
-/* XM_RX_PARAM_REG: XGMAC receive parameter register */
-#define FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN
-#define FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \
- FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH)
-
-/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
-/* Default values */
-#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */
-#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */
-#define FFE_AB_XX_SD_CTL_DRV_DEF 0 /* 20mA */
-
-/* XX_CORE_STAT_REG: XAUI XGXS core status register */
-/* XGXS all-lanes status fields */
-#define FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN
-#define FRF_AB_XX_SYNC_STAT_WIDTH 4
-#define FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN
-#define FRF_AB_XX_COMMA_DET_WIDTH 4
-#define FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN
-#define FRF_AB_XX_CHAR_ERR_WIDTH 4
-#define FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN
-#define FRF_AB_XX_DISPERR_WIDTH 4
-#define FFE_AB_XX_STAT_ALL_LANES 0xf
-#define FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN
-#define FRF_AB_XX_FORCE_SIG_WIDTH 8
-#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff
-
-/* RX_MAC_FILTER_TBL0 */
-/* RMFT_DEST_MAC is wider than 32 bits */
-#define FRF_CZ_RMFT_DEST_MAC_LO_LBN FRF_CZ_RMFT_DEST_MAC_LBN
-#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32
-#define FRF_CZ_RMFT_DEST_MAC_HI_LBN (FRF_CZ_RMFT_DEST_MAC_LBN + 32)
-#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH (FRF_CZ_RMFT_DEST_MAC_WIDTH - 32)
-
-/* TX_MAC_FILTER_TBL0 */
-/* TMFT_SRC_MAC is wider than 32 bits */
-#define FRF_CZ_TMFT_SRC_MAC_LO_LBN FRF_CZ_TMFT_SRC_MAC_LBN
-#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32
-#define FRF_CZ_TMFT_SRC_MAC_HI_LBN (FRF_CZ_TMFT_SRC_MAC_LBN + 32)
-#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH (FRF_CZ_TMFT_SRC_MAC_WIDTH - 32)
-
-/* TX_PACE_TBL */
-/* Values >20 are documented as reserved, but will result in a queue going
- * into the fast bin with a pace value of zero. */
-#define FFE_BZ_TX_PACE_OFF 0
-#define FFE_BZ_TX_PACE_RESERVED 21
-
-/* DRIVER_EV */
-/* Sub-fields of an RX flush completion event */
-#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
-#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
-#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
-#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
-
-/* EVENT_ENTRY */
-/* Magic number field for event test */
-#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
-#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
-
-/* RX packet prefix */
-#define FS_BZ_RX_PREFIX_HASH_OFST 12
-#define FS_BZ_RX_PREFIX_SIZE 16
-
-#endif /* EFX_FARCH_REGS_H */
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 5f201a547e5b..0d45900afa76 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -30,13 +30,6 @@
*
* Only some combinations are supported, depending on NIC type:
*
- * - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
- * local 2-tuple (only implemented for Falcon B0)
- *
- * - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
- * or local 2-tuple, or local MAC with or without outer VID, and RX
- * default filters
- *
* - Huntington supports filter matching controlled by firmware, potentially
* using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
* with or without outer and inner VID
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 30439cc83a89..7432c09010d6 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -17,46 +17,22 @@
*
**************************************************************************
*
- * Notes on locking strategy for the Falcon architecture:
- *
- * Many CSRs are very wide and cannot be read or written atomically.
- * Writes from the host are buffered by the Bus Interface Unit (BIU)
- * up to 128 bits. Whenever the host writes part of such a register,
- * the BIU collects the written value and does not write to the
- * underlying register until all 4 dwords have been written. A
- * similar buffering scheme applies to host access to the NIC's 64-bit
- * SRAM.
- *
- * Writes to different CSRs and 64-bit SRAM words must be serialised,
- * since interleaved access can result in lost writes. We use
- * efx_nic::biu_lock for this.
- *
- * We also serialise reads from 128-bit CSRs and SRAM with the same
- * spinlock. This may not be necessary, but it doesn't really matter
- * as there are no such reads on the fast path.
+ * The EF10 architecture exposes very few registers to the host and
+ * most of them are only 32 bits wide. The only exceptions are the MC
+ * doorbell register pair, which has its own latching, and
+ * TX_DESC_UPD.
*
- * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
- * 128-bit but are special-cased in the BIU to avoid the need for
- * locking in the host:
+ * The TX_DESC_UPD DMA descriptor pointer is 128-bits but is a special
+ * case in the BIU to avoid the need for locking in the host:
*
- * - They are write-only.
- * - The semantics of writing to these registers are such that
+ * - It is write-only.
+ * - The semantics of writing to this register is such that
* replacing the low 96 bits with zero does not affect functionality.
- * - If the host writes to the last dword address of such a register
+ * - If the host writes to the last dword address of the register
* (i.e. the high 32 bits) the underlying register will always be
* written. If the collector and the current write together do not
* provide values for all 128 bits of the register, the low 96 bits
* will be written as zero.
- * - If the host writes to the address of any other part of such a
- * register while the collector already holds values for some other
- * register, the write is discarded and the collector maintains its
- * current state.
- *
- * The EF10 architecture exposes very few registers to the host and
- * most of them are only 32 bits wide. The only exceptions are the MC
- * doorbell register pair, which has its own latching, and
- * TX_DESC_UPD, which works in a similar way to the Falcon
- * architecture.
*/
#if BITS_PER_LONG == 64
@@ -125,27 +101,6 @@ static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
-/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
-static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
- const efx_qword_t *value, unsigned int index)
-{
- unsigned int addr = index * sizeof(*value);
- unsigned long flags __attribute__ ((unused));
-
- netif_vdbg(efx, hw, efx->net_dev,
- "writing SRAM address %x with " EFX_QWORD_FMT "\n",
- addr, EFX_QWORD_VAL(*value));
-
- spin_lock_irqsave(&efx->biu_lock, flags);
-#ifdef EFX_USE_QWORD_IO
- __raw_writeq((__force u64)value->u64[0], membase + addr);
-#else
- __raw_writel((__force u32)value->u32[0], membase + addr);
- __raw_writel((__force u32)value->u32[1], membase + addr + 4);
-#endif
- spin_unlock_irqrestore(&efx->biu_lock, flags);
-}
-
/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value,
unsigned int reg)
@@ -176,27 +131,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
EFX_OWORD_VAL(*value));
}
-/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
-static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
- efx_qword_t *value, unsigned int index)
-{
- unsigned int addr = index * sizeof(*value);
- unsigned long flags __attribute__ ((unused));
-
- spin_lock_irqsave(&efx->biu_lock, flags);
-#ifdef EFX_USE_QWORD_IO
- value->u64[0] = (__force __le64)__raw_readq(membase + addr);
-#else
- value->u32[0] = (__force __le32)__raw_readl(membase + addr);
- value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
-#endif
- spin_unlock_irqrestore(&efx->biu_lock, flags);
-
- netif_vdbg(efx, hw, efx->net_dev,
- "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
- addr, EFX_QWORD_VAL(*value));
-}
-
/* Read a 32-bit CSR or SRAM */
static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg)
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 0cab508f2f9d..c3e2b4a21d10 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -16,6 +16,7 @@
#include "mcdi_pcol.h"
#include "mcdi_pcol_mae.h"
#include "tc_encap_actions.h"
+#include "tc_conntrack.h"
int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label)
{
@@ -227,6 +228,256 @@ void efx_mae_counters_grant_credits(struct work_struct *work)
rx_queue->granted_count += credits;
}
+static int efx_mae_table_get_desc(struct efx_nic *efx,
+ struct efx_tc_table_desc *desc,
+ u32 table_id)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(16));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_TABLE_DESCRIPTOR_IN_LEN);
+ unsigned int offset = 0, i;
+ size_t outlen;
+ int rc;
+
+ memset(desc, 0, sizeof(*desc));
+
+ MCDI_SET_DWORD(inbuf, TABLE_DESCRIPTOR_IN_TABLE_ID, table_id);
+more:
+ MCDI_SET_DWORD(inbuf, TABLE_DESCRIPTOR_IN_FIRST_FIELDS_INDEX, offset);
+ rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_DESCRIPTOR, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(1)) {
+ rc = -EIO;
+ goto fail;
+ }
+ if (!offset) { /* first iteration: get metadata */
+ desc->type = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_TYPE);
+ desc->key_width = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_KEY_WIDTH);
+ desc->resp_width = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_RESP_WIDTH);
+ desc->n_keys = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_N_KEY_FIELDS);
+ desc->n_resps = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_N_RESP_FIELDS);
+ desc->n_prios = MCDI_WORD(outbuf, TABLE_DESCRIPTOR_OUT_N_PRIORITIES);
+ desc->flags = MCDI_BYTE(outbuf, TABLE_DESCRIPTOR_OUT_FLAGS);
+ rc = -EOPNOTSUPP;
+ if (desc->flags)
+ goto fail;
+ desc->scheme = MCDI_BYTE(outbuf, TABLE_DESCRIPTOR_OUT_SCHEME);
+ if (desc->scheme)
+ goto fail;
+ rc = -ENOMEM;
+ desc->keys = kcalloc(desc->n_keys,
+ sizeof(struct efx_tc_table_field_fmt),
+ GFP_KERNEL);
+ if (!desc->keys)
+ goto fail;
+ desc->resps = kcalloc(desc->n_resps,
+ sizeof(struct efx_tc_table_field_fmt),
+ GFP_KERNEL);
+ if (!desc->resps)
+ goto fail;
+ }
+ /* FW could have returned more than the 16 field_descrs we
+ * made room for in our outbuf
+ */
+ outlen = min(outlen, sizeof(outbuf));
+ for (i = 0; i + offset < desc->n_keys + desc->n_resps; i++) {
+ struct efx_tc_table_field_fmt *field;
+ MCDI_DECLARE_STRUCT_PTR(fdesc);
+
+ if (outlen < MC_CMD_TABLE_DESCRIPTOR_OUT_LEN(i + 1)) {
+ offset += i;
+ goto more;
+ }
+ if (i + offset < desc->n_keys)
+ field = desc->keys + i + offset;
+ else
+ field = desc->resps + (i + offset - desc->n_keys);
+ fdesc = MCDI_ARRAY_STRUCT_PTR(outbuf,
+ TABLE_DESCRIPTOR_OUT_FIELDS, i);
+ field->field_id = MCDI_STRUCT_WORD(fdesc,
+ TABLE_FIELD_DESCR_FIELD_ID);
+ field->lbn = MCDI_STRUCT_WORD(fdesc, TABLE_FIELD_DESCR_LBN);
+ field->width = MCDI_STRUCT_WORD(fdesc, TABLE_FIELD_DESCR_WIDTH);
+ field->masking = MCDI_STRUCT_BYTE(fdesc, TABLE_FIELD_DESCR_MASK_TYPE);
+ field->scheme = MCDI_STRUCT_BYTE(fdesc, TABLE_FIELD_DESCR_SCHEME);
+ }
+ return 0;
+
+fail:
+ kfree(desc->keys);
+ kfree(desc->resps);
+ return rc;
+}
+
+static int efx_mae_table_hook_find(u16 n_fields,
+ struct efx_tc_table_field_fmt *fields,
+ u16 field_id)
+{
+ unsigned int i;
+
+ for (i = 0; i < n_fields; i++) {
+ if (fields[i].field_id == field_id)
+ return i;
+ }
+ return -EPROTO;
+}
+
+#define TABLE_FIND_KEY(_desc, _id) \
+ efx_mae_table_hook_find((_desc)->n_keys, (_desc)->keys, _id)
+#define TABLE_FIND_RESP(_desc, _id) \
+ efx_mae_table_hook_find((_desc)->n_resps, (_desc)->resps, _id)
+
+#define TABLE_HOOK_KEY(_meta, _name, _mcdi_name) ({ \
+ int _rc = TABLE_FIND_KEY(&_meta->desc, TABLE_FIELD_ID_##_mcdi_name); \
+ \
+ if (_rc > U8_MAX) \
+ _rc = -EOPNOTSUPP; \
+ if (_rc >= 0) { \
+ _meta->keys._name##_idx = _rc; \
+ _rc = 0; \
+ } \
+ _rc; \
+})
+#define TABLE_HOOK_RESP(_meta, _name, _mcdi_name) ({ \
+ int _rc = TABLE_FIND_RESP(&_meta->desc, TABLE_FIELD_ID_##_mcdi_name); \
+ \
+ if (_rc > U8_MAX) \
+ _rc = -EOPNOTSUPP; \
+ if (_rc >= 0) { \
+ _meta->resps._name##_idx = _rc; \
+ _rc = 0; \
+ } \
+ _rc; \
+})
+
+static int efx_mae_table_hook_ct(struct efx_nic *efx,
+ struct efx_tc_table_ct *meta_ct)
+{
+ int rc;
+
+ rc = TABLE_HOOK_KEY(meta_ct, eth_proto, ETHER_TYPE);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_KEY(meta_ct, ip_proto, IP_PROTO);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_KEY(meta_ct, src_ip, SRC_IP);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_KEY(meta_ct, dst_ip, DST_IP);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_KEY(meta_ct, l4_sport, SRC_PORT);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_KEY(meta_ct, l4_dport, DST_PORT);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_KEY(meta_ct, zone, DOMAIN);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_RESP(meta_ct, dnat, NAT_DIR);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_RESP(meta_ct, nat_ip, NAT_IP);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_RESP(meta_ct, l4_natport, NAT_PORT);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_RESP(meta_ct, mark, CT_MARK);
+ if (rc)
+ return rc;
+ rc = TABLE_HOOK_RESP(meta_ct, counter_id, COUNTER_ID);
+ if (rc)
+ return rc;
+ meta_ct->hooked = true;
+ return 0;
+}
+
+static void efx_mae_table_free_desc(struct efx_tc_table_desc *desc)
+{
+ kfree(desc->keys);
+ kfree(desc->resps);
+ memset(desc, 0, sizeof(*desc));
+}
+
+static bool efx_mae_check_table_exists(struct efx_nic *efx, u32 tbl_req)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_TABLE_LIST_OUT_LEN(16));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_TABLE_LIST_IN_LEN);
+ u32 tbl_id, tbl_total, tbl_cnt, pos = 0;
+ size_t outlen, msg_max;
+ bool ct_tbl = false;
+ int rc, idx;
+
+ msg_max = sizeof(outbuf);
+ efx->tc->meta_ct.hooked = false;
+more:
+ memset(outbuf, 0, sizeof(*outbuf));
+ MCDI_SET_DWORD(inbuf, TABLE_LIST_IN_FIRST_TABLE_ID_INDEX, pos);
+ rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_LIST, inbuf, sizeof(inbuf), outbuf,
+ msg_max, &outlen);
+ if (rc)
+ return false;
+
+ if (outlen < MC_CMD_TABLE_LIST_OUT_LEN(1))
+ return false;
+
+ tbl_total = MCDI_DWORD(outbuf, TABLE_LIST_OUT_N_TABLES);
+ tbl_cnt = MC_CMD_TABLE_LIST_OUT_TABLE_ID_NUM(min(outlen, msg_max));
+
+ for (idx = 0; idx < tbl_cnt; idx++) {
+ tbl_id = MCDI_ARRAY_DWORD(outbuf, TABLE_LIST_OUT_TABLE_ID, idx);
+ if (tbl_id == tbl_req) {
+ ct_tbl = true;
+ break;
+ }
+ }
+
+ pos += tbl_cnt;
+ if (!ct_tbl && pos < tbl_total)
+ goto more;
+
+ return ct_tbl;
+}
+
+int efx_mae_get_tables(struct efx_nic *efx)
+{
+ int rc;
+
+ efx->tc->meta_ct.hooked = false;
+ if (efx_mae_check_table_exists(efx, TABLE_ID_CONNTRACK_TABLE)) {
+ rc = efx_mae_table_get_desc(efx, &efx->tc->meta_ct.desc,
+ TABLE_ID_CONNTRACK_TABLE);
+ if (rc) {
+ pci_info(efx->pci_dev,
+ "FW does not support conntrack desc rc %d\n",
+ rc);
+ return 0;
+ }
+
+ rc = efx_mae_table_hook_ct(efx, &efx->tc->meta_ct);
+ if (rc) {
+ pci_info(efx->pci_dev,
+ "FW does not support conntrack hook rc %d\n",
+ rc);
+ return 0;
+ }
+ } else {
+ pci_info(efx->pci_dev,
+ "FW does not support conntrack table\n");
+ }
+ return 0;
+}
+
+void efx_mae_free_tables(struct efx_nic *efx)
+{
+ efx_mae_table_free_desc(&efx->tc->meta_ct.desc);
+ efx->tc->meta_ct.hooked = false;
+}
+
static int efx_mae_get_basic_caps(struct efx_nic *efx, struct mae_caps *caps)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_GET_CAPS_OUT_LEN);
@@ -444,8 +695,13 @@ int efx_mae_match_check_caps(struct efx_nic *efx,
CHECK(L4_SPORT, l4_sport) ||
CHECK(L4_DPORT, l4_dport) ||
CHECK(TCP_FLAGS, tcp_flags) ||
+ CHECK_BIT(TCP_SYN_FIN_RST, tcp_syn_fin_rst) ||
CHECK_BIT(IS_IP_FRAG, ip_frag) ||
CHECK_BIT(IP_FIRST_FRAG, ip_firstfrag) ||
+ CHECK_BIT(DO_CT, ct_state_trk) ||
+ CHECK_BIT(CT_HIT, ct_state_est) ||
+ CHECK(CT_MARK, ct_mark) ||
+ CHECK(CT_DOMAIN, ct_zone) ||
CHECK(RECIRC_ID, recirc_id))
return rc;
/* Matches on outer fields are done in a separate hardware table,
@@ -471,6 +727,90 @@ int efx_mae_match_check_caps(struct efx_nic *efx,
}
return 0;
}
+
+/* Checks for match fields not supported in LHS Outer Rules */
+#define UNSUPPORTED(_field) ({ \
+ enum mask_type typ = classify_mask((const u8 *)&mask->_field, \
+ sizeof(mask->_field)); \
+ \
+ if (typ != MASK_ZEROES) { \
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported match field " #_field);\
+ rc = -EOPNOTSUPP; \
+ } \
+ rc; \
+})
+#define UNSUPPORTED_BIT(_field) ({ \
+ if (mask->_field) { \
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported match field " #_field);\
+ rc = -EOPNOTSUPP; \
+ } \
+ rc; \
+})
+
+/* LHS rules are (normally) inserted in the Outer Rule table, which means
+ * they use ENC_ fields in hardware to match regular (not enc_) fields from
+ * &struct efx_tc_match_fields.
+ */
+int efx_mae_match_check_caps_lhs(struct efx_nic *efx,
+ const struct efx_tc_match_fields *mask,
+ struct netlink_ext_ack *extack)
+{
+ const u8 *supported_fields = efx->tc->caps->outer_rule_fields;
+ __be32 ingress_port = cpu_to_be32(mask->ingress_port);
+ enum mask_type ingress_port_mask_type;
+ int rc;
+
+ /* Check for _PREFIX assumes big-endian, so we need to convert */
+ ingress_port_mask_type = classify_mask((const u8 *)&ingress_port,
+ sizeof(ingress_port));
+ rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT],
+ ingress_port_mask_type);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s\n",
+ mask_type_name(ingress_port_mask_type),
+ "ingress_port");
+ return rc;
+ }
+ if (CHECK(ENC_ETHER_TYPE, eth_proto) ||
+ CHECK(ENC_VLAN0_TCI, vlan_tci[0]) ||
+ CHECK(ENC_VLAN0_PROTO, vlan_proto[0]) ||
+ CHECK(ENC_VLAN1_TCI, vlan_tci[1]) ||
+ CHECK(ENC_VLAN1_PROTO, vlan_proto[1]) ||
+ CHECK(ENC_ETH_SADDR, eth_saddr) ||
+ CHECK(ENC_ETH_DADDR, eth_daddr) ||
+ CHECK(ENC_IP_PROTO, ip_proto) ||
+ CHECK(ENC_IP_TOS, ip_tos) ||
+ CHECK(ENC_IP_TTL, ip_ttl) ||
+ CHECK_BIT(ENC_IP_FRAG, ip_frag) ||
+ UNSUPPORTED_BIT(ip_firstfrag) ||
+ CHECK(ENC_SRC_IP4, src_ip) ||
+ CHECK(ENC_DST_IP4, dst_ip) ||
+#ifdef CONFIG_IPV6
+ CHECK(ENC_SRC_IP6, src_ip6) ||
+ CHECK(ENC_DST_IP6, dst_ip6) ||
+#endif
+ CHECK(ENC_L4_SPORT, l4_sport) ||
+ CHECK(ENC_L4_DPORT, l4_dport) ||
+ UNSUPPORTED(tcp_flags) ||
+ CHECK_BIT(TCP_SYN_FIN_RST, tcp_syn_fin_rst))
+ return rc;
+ if (efx_tc_match_is_encap(mask)) {
+ /* can't happen; disallowed for local rules, translated
+ * for foreign rules.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected encap match in LHS rule");
+ return -EOPNOTSUPP;
+ }
+ if (UNSUPPORTED(enc_keyid) ||
+ /* Can't filter on conntrack in LHS rules */
+ UNSUPPORTED_BIT(ct_state_trk) ||
+ UNSUPPORTED_BIT(ct_state_est) ||
+ UNSUPPORTED(ct_mark) ||
+ UNSUPPORTED(recirc_id))
+ return rc;
+ return 0;
+}
+#undef UNSUPPORTED
#undef CHECK_BIT
#undef CHECK
@@ -879,6 +1219,71 @@ fail:
return rc;
}
+/**
+ * efx_mae_allocate_pedit_mac() - allocate pedit MAC address in HW.
+ * @efx: NIC we're installing a pedit MAC address on
+ * @ped: pedit MAC action to be installed
+ *
+ * Attempts to install @ped in HW and populates its id with an index of this
+ * entry in the firmware MAC address table on success.
+ *
+ * Return: negative value on error, 0 in success.
+ */
+int efx_mae_allocate_pedit_mac(struct efx_nic *efx,
+ struct efx_tc_mac_pedit_action *ped)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MAC_ADDR_ALLOC_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR_LEN !=
+ sizeof(ped->h_addr));
+ memcpy(MCDI_PTR(inbuf, MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR), ped->h_addr,
+ sizeof(ped->h_addr));
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MAC_ADDR_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ ped->fw_id = MCDI_DWORD(outbuf, MAE_MAC_ADDR_ALLOC_OUT_MAC_ID);
+ return 0;
+}
+
+/**
+ * efx_mae_free_pedit_mac() - free pedit MAC address in HW.
+ * @efx: NIC we're installing a pedit MAC address on
+ * @ped: pedit MAC action that needs to be freed
+ *
+ * Frees @ped in HW, check that firmware did not free a different one and clears
+ * the id (which denotes the index of the entry in the MAC address table).
+ */
+void efx_mae_free_pedit_mac(struct efx_nic *efx,
+ struct efx_tc_mac_pedit_action *ped)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MAC_ADDR_FREE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MAC_ADDR_FREE_IN_LEN(1));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_MAC_ADDR_FREE_IN_MAC_ID, ped->fw_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MAC_ADDR_FREE, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (rc || outlen < sizeof(outbuf))
+ return;
+ /* FW freed a different ID than we asked for, should also never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what MAC addresses exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID) != ped->fw_id))
+ return;
+ /* We're probably about to free @ped, but let's just make sure its
+ * fw_id is blatted so that it won't look valid if it leaks out.
+ */
+ ped->fw_id = MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL;
+}
+
int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN);
@@ -886,15 +1291,27 @@ int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
size_t outlen;
int rc;
- MCDI_POPULATE_DWORD_3(inbuf, MAE_ACTION_SET_ALLOC_IN_FLAGS,
+ MCDI_POPULATE_DWORD_4(inbuf, MAE_ACTION_SET_ALLOC_IN_FLAGS,
MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH, act->vlan_push,
MAE_ACTION_SET_ALLOC_IN_VLAN_POP, act->vlan_pop,
- MAE_ACTION_SET_ALLOC_IN_DECAP, act->decap);
+ MAE_ACTION_SET_ALLOC_IN_DECAP, act->decap,
+ MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL,
+ act->do_ttl_dec);
+
+ if (act->src_mac)
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
+ act->src_mac->fw_id);
+ else
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
+ MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
+
+ if (act->dst_mac)
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
+ act->dst_mac->fw_id);
+ else
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
+ MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
- MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
- MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
- MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
- MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
if (act->count && !WARN_ON(!act->count->cnt))
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_ID,
act->count->cnt->fw_id);
@@ -1153,6 +1570,465 @@ int efx_mae_unregister_encap_match(struct efx_nic *efx,
return 0;
}
+static int efx_mae_populate_lhs_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
+ const struct efx_tc_match *match)
+{
+ if (match->mask.ingress_port) {
+ if (~match->mask.ingress_port)
+ return -EOPNOTSUPP;
+ MCDI_STRUCT_SET_DWORD(match_crit,
+ MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR,
+ match->value.ingress_port);
+ }
+ MCDI_STRUCT_SET_DWORD(match_crit, MAE_ENC_FIELD_PAIRS_INGRESS_MPORT_SELECTOR_MASK,
+ match->mask.ingress_port);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE,
+ match->value.eth_proto);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETHER_TYPE_BE_MASK,
+ match->mask.eth_proto);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE,
+ match->value.vlan_tci[0]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_TCI_BE_MASK,
+ match->mask.vlan_tci[0]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE,
+ match->value.vlan_proto[0]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN0_PROTO_BE_MASK,
+ match->mask.vlan_proto[0]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE,
+ match->value.vlan_tci[1]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_TCI_BE_MASK,
+ match->mask.vlan_tci[1]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE,
+ match->value.vlan_proto[1]);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_VLAN1_PROTO_BE_MASK,
+ match->mask.vlan_proto[1]);
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE),
+ match->value.eth_saddr, ETH_ALEN);
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_SADDR_BE_MASK),
+ match->mask.eth_saddr, ETH_ALEN);
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE),
+ match->value.eth_daddr, ETH_ALEN);
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_ETH_DADDR_BE_MASK),
+ match->mask.eth_daddr, ETH_ALEN);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO,
+ match->value.ip_proto);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_PROTO_MASK,
+ match->mask.ip_proto);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TOS,
+ match->value.ip_tos);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TOS_MASK,
+ match->mask.ip_tos);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TTL,
+ match->value.ip_ttl);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_IP_TTL_MASK,
+ match->mask.ip_ttl);
+ MCDI_STRUCT_POPULATE_BYTE_1(match_crit,
+ MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS,
+ MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG,
+ match->value.ip_frag);
+ MCDI_STRUCT_POPULATE_BYTE_1(match_crit,
+ MAE_ENC_FIELD_PAIRS_ENC_VLAN_FLAGS_MASK,
+ MAE_ENC_FIELD_PAIRS_ENC_IP_FRAG_MASK,
+ match->mask.ip_frag);
+ MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE,
+ match->value.src_ip);
+ MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP4_BE_MASK,
+ match->mask.src_ip);
+ MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE,
+ match->value.dst_ip);
+ MCDI_STRUCT_SET_DWORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP4_BE_MASK,
+ match->mask.dst_ip);
+#ifdef CONFIG_IPV6
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE),
+ &match->value.src_ip6, sizeof(struct in6_addr));
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_SRC_IP6_BE_MASK),
+ &match->mask.src_ip6, sizeof(struct in6_addr));
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE),
+ &match->value.dst_ip6, sizeof(struct in6_addr));
+ memcpy(MCDI_STRUCT_PTR(match_crit, MAE_ENC_FIELD_PAIRS_ENC_DST_IP6_BE_MASK),
+ &match->mask.dst_ip6, sizeof(struct in6_addr));
+#endif
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE,
+ match->value.l4_sport);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_SPORT_BE_MASK,
+ match->mask.l4_sport);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE,
+ match->value.l4_dport);
+ MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_ENC_FIELD_PAIRS_ENC_L4_DPORT_BE_MASK,
+ match->mask.l4_dport);
+ /* No enc-keys in LHS rules. Caps check should have caught this; any
+ * enc-keys from an fLHS should have been translated to regular keys
+ * and any EM should be a pseudo (we're an OR so can't have a direct
+ * EM with another OR).
+ */
+ if (WARN_ON_ONCE(match->encap && !match->encap->type))
+ return -EOPNOTSUPP;
+ if (WARN_ON_ONCE(match->mask.enc_src_ip))
+ return -EOPNOTSUPP;
+ if (WARN_ON_ONCE(match->mask.enc_dst_ip))
+ return -EOPNOTSUPP;
+#ifdef CONFIG_IPV6
+ if (WARN_ON_ONCE(!ipv6_addr_any(&match->mask.enc_src_ip6)))
+ return -EOPNOTSUPP;
+ if (WARN_ON_ONCE(!ipv6_addr_any(&match->mask.enc_dst_ip6)))
+ return -EOPNOTSUPP;
+#endif
+ if (WARN_ON_ONCE(match->mask.enc_ip_tos))
+ return -EOPNOTSUPP;
+ if (WARN_ON_ONCE(match->mask.enc_ip_ttl))
+ return -EOPNOTSUPP;
+ if (WARN_ON_ONCE(match->mask.enc_sport))
+ return -EOPNOTSUPP;
+ if (WARN_ON_ONCE(match->mask.enc_dport))
+ return -EOPNOTSUPP;
+ if (WARN_ON_ONCE(match->mask.enc_keyid))
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static int efx_mae_insert_lhs_outer_rule(struct efx_nic *efx,
+ struct efx_tc_lhs_rule *rule, u32 prio)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_OUTER_RULE_INSERT_IN_LEN(MAE_ENC_FIELD_PAIRS_LEN));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_OUTER_RULE_INSERT_OUT_LEN);
+ MCDI_DECLARE_STRUCT_PTR(match_crit);
+ const struct efx_tc_lhs_action *act;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_PRIO, prio);
+ /* match */
+ match_crit = _MCDI_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_FIELD_MATCH_CRITERIA);
+ rc = efx_mae_populate_lhs_match_criteria(match_crit, &rule->match);
+ if (rc)
+ return rc;
+
+ /* action */
+ act = &rule->lhs_act;
+ MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE,
+ MAE_MCDI_ENCAP_TYPE_NONE);
+ /* We always inhibit CT lookup on TCP_INTERESTING_FLAGS, since the
+ * SW path needs to process the packet to update the conntrack tables
+ * on connection establishment (SYN) or termination (FIN, RST).
+ */
+ MCDI_POPULATE_DWORD_6(inbuf, MAE_OUTER_RULE_INSERT_IN_LOOKUP_CONTROL,
+ MAE_OUTER_RULE_INSERT_IN_DO_CT, !!act->zone,
+ MAE_OUTER_RULE_INSERT_IN_CT_TCP_FLAGS_INHIBIT, 1,
+ MAE_OUTER_RULE_INSERT_IN_CT_DOMAIN,
+ act->zone ? act->zone->zone : 0,
+ MAE_OUTER_RULE_INSERT_IN_CT_VNI_MODE,
+ MAE_CT_VNI_MODE_ZERO,
+ MAE_OUTER_RULE_INSERT_IN_DO_COUNT, !!act->count,
+ MAE_OUTER_RULE_INSERT_IN_RECIRC_ID,
+ act->rid ? act->rid->fw_id : 0);
+ if (act->count)
+ MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_COUNTER_ID,
+ act->count->cnt->fw_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_OUTER_RULE_INSERT, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ rule->fw_id = MCDI_DWORD(outbuf, MAE_OUTER_RULE_INSERT_OUT_OR_ID);
+ return 0;
+}
+
+int efx_mae_insert_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule,
+ u32 prio)
+{
+ return efx_mae_insert_lhs_outer_rule(efx, rule, prio);
+}
+
+static int efx_mae_remove_lhs_outer_rule(struct efx_nic *efx,
+ struct efx_tc_lhs_rule *rule)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_OUTER_RULE_REMOVE_IN_LEN(1));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_REMOVE_IN_OR_ID, rule->fw_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_OUTER_RULE_REMOVE, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ /* FW freed a different ID than we asked for, should also never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what encap_mds exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID) != rule->fw_id))
+ return -EIO;
+ /* We're probably about to free @rule, but let's just make sure its
+ * fw_id is blatted so that it won't look valid if it leaks out.
+ */
+ rule->fw_id = MC_CMD_MAE_OUTER_RULE_INSERT_OUT_OUTER_RULE_ID_NULL;
+ return 0;
+}
+
+int efx_mae_remove_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule)
+{
+ return efx_mae_remove_lhs_outer_rule(efx, rule);
+}
+
+/* Populating is done by taking each byte of @value in turn and storing
+ * it in the appropriate bits of @row. @value must be big-endian; we
+ * convert it to little-endianness as we go.
+ */
+static int efx_mae_table_populate(struct efx_tc_table_field_fmt field,
+ __le32 *row, size_t row_bits,
+ void *value, size_t value_size)
+{
+ unsigned int i;
+
+ /* For now only scheme 0 is supported for any field, so we check here
+ * (rather than, say, in calling code, which knows the semantics and
+ * could in principle encode for other schemes).
+ */
+ if (field.scheme)
+ return -EOPNOTSUPP;
+ if (DIV_ROUND_UP(field.width, 8) != value_size)
+ return -EINVAL;
+ if (field.lbn + field.width > row_bits)
+ return -EINVAL;
+ for (i = 0; i < value_size; i++) {
+ unsigned int bn = field.lbn + i * 8;
+ unsigned int wn = bn / 32;
+ u64 v;
+
+ v = ((u8 *)value)[value_size - i - 1];
+ v <<= (bn % 32);
+ row[wn] |= cpu_to_le32(v & 0xffffffff);
+ if (wn * 32 < row_bits)
+ row[wn + 1] |= cpu_to_le32(v >> 32);
+ }
+ return 0;
+}
+
+static int efx_mae_table_populate_bool(struct efx_tc_table_field_fmt field,
+ __le32 *row, size_t row_bits, bool value)
+{
+ u8 v = value ? 1 : 0;
+
+ if (field.width != 1)
+ return -EINVAL;
+ return efx_mae_table_populate(field, row, row_bits, &v, 1);
+}
+
+static int efx_mae_table_populate_ipv4(struct efx_tc_table_field_fmt field,
+ __le32 *row, size_t row_bits, __be32 value)
+{
+ /* IPv4 is placed in the first 4 bytes of an IPv6-sized field */
+ struct in6_addr v = {};
+
+ if (field.width != 128)
+ return -EINVAL;
+ v.s6_addr32[0] = value;
+ return efx_mae_table_populate(field, row, row_bits, &v, sizeof(v));
+}
+
+static int efx_mae_table_populate_u24(struct efx_tc_table_field_fmt field,
+ __le32 *row, size_t row_bits, u32 value)
+{
+ __be32 v = cpu_to_be32(value);
+
+ /* We adjust value_size here since just 3 bytes will be copied, and
+ * the pointer to the value is set discarding the first byte which is
+ * the most significant byte for a big-endian 4-bytes value.
+ */
+ return efx_mae_table_populate(field, row, row_bits, ((void *)&v) + 1,
+ sizeof(v) - 1);
+}
+
+#define _TABLE_POPULATE(dst, dw, _field, _value) ({ \
+ typeof(_value) _v = _value; \
+ \
+ (_field.width == sizeof(_value) * 8) ? \
+ efx_mae_table_populate(_field, dst, dw, &_v, \
+ sizeof(_v)) : -EINVAL; \
+})
+#define TABLE_POPULATE_KEY_IPV4(dst, _table, _field, _value) \
+ efx_mae_table_populate_ipv4(efx->tc->meta_##_table.desc.keys \
+ [efx->tc->meta_##_table.keys._field##_idx],\
+ dst, efx->tc->meta_##_table.desc.key_width,\
+ _value)
+#define TABLE_POPULATE_KEY(dst, _table, _field, _value) \
+ _TABLE_POPULATE(dst, efx->tc->meta_##_table.desc.key_width, \
+ efx->tc->meta_##_table.desc.keys \
+ [efx->tc->meta_##_table.keys._field##_idx], \
+ _value)
+
+#define TABLE_POPULATE_RESP_BOOL(dst, _table, _field, _value) \
+ efx_mae_table_populate_bool(efx->tc->meta_##_table.desc.resps \
+ [efx->tc->meta_##_table.resps._field##_idx],\
+ dst, efx->tc->meta_##_table.desc.resp_width,\
+ _value)
+#define TABLE_POPULATE_RESP(dst, _table, _field, _value) \
+ _TABLE_POPULATE(dst, efx->tc->meta_##_table.desc.resp_width, \
+ efx->tc->meta_##_table.desc.resps \
+ [efx->tc->meta_##_table.resps._field##_idx], \
+ _value)
+
+#define TABLE_POPULATE_RESP_U24(dst, _table, _field, _value) \
+ efx_mae_table_populate_u24(efx->tc->meta_##_table.desc.resps \
+ [efx->tc->meta_##_table.resps._field##_idx],\
+ dst, efx->tc->meta_##_table.desc.resp_width,\
+ _value)
+
+static int efx_mae_populate_ct_key(struct efx_nic *efx, __le32 *key, size_t kw,
+ struct efx_tc_ct_entry *conn)
+{
+ bool ipv6 = conn->eth_proto == htons(ETH_P_IPV6);
+ int rc;
+
+ rc = TABLE_POPULATE_KEY(key, ct, eth_proto, conn->eth_proto);
+ if (rc)
+ return rc;
+ rc = TABLE_POPULATE_KEY(key, ct, ip_proto, conn->ip_proto);
+ if (rc)
+ return rc;
+ if (ipv6)
+ rc = TABLE_POPULATE_KEY(key, ct, src_ip, conn->src_ip6);
+ else
+ rc = TABLE_POPULATE_KEY_IPV4(key, ct, src_ip, conn->src_ip);
+ if (rc)
+ return rc;
+ if (ipv6)
+ rc = TABLE_POPULATE_KEY(key, ct, dst_ip, conn->dst_ip6);
+ else
+ rc = TABLE_POPULATE_KEY_IPV4(key, ct, dst_ip, conn->dst_ip);
+ if (rc)
+ return rc;
+ rc = TABLE_POPULATE_KEY(key, ct, l4_sport, conn->l4_sport);
+ if (rc)
+ return rc;
+ rc = TABLE_POPULATE_KEY(key, ct, l4_dport, conn->l4_dport);
+ if (rc)
+ return rc;
+ return TABLE_POPULATE_KEY(key, ct, zone, cpu_to_be16(conn->zone->zone));
+}
+
+int efx_mae_insert_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn)
+{
+ bool ipv6 = conn->eth_proto == htons(ETH_P_IPV6);
+ __le32 *key = NULL, *resp = NULL;
+ size_t inlen, kw, rw;
+ efx_dword_t *inbuf;
+ int rc = -ENOMEM;
+
+ /* Check table access is supported */
+ if (!efx->tc->meta_ct.hooked)
+ return -EOPNOTSUPP;
+
+ /* key/resp widths are in bits; convert to dwords for IN_LEN */
+ kw = DIV_ROUND_UP(efx->tc->meta_ct.desc.key_width, 32);
+ rw = DIV_ROUND_UP(efx->tc->meta_ct.desc.resp_width, 32);
+ BUILD_BUG_ON(sizeof(__le32) != MC_CMD_TABLE_INSERT_IN_DATA_LEN);
+ inlen = MC_CMD_TABLE_INSERT_IN_LEN(kw + rw);
+ if (inlen > MC_CMD_TABLE_INSERT_IN_LENMAX_MCDI2)
+ return -E2BIG;
+ inbuf = kzalloc(inlen, GFP_KERNEL);
+ if (!inbuf)
+ return -ENOMEM;
+
+ key = kcalloc(kw, sizeof(__le32), GFP_KERNEL);
+ if (!key)
+ goto out_free;
+ resp = kcalloc(rw, sizeof(__le32), GFP_KERNEL);
+ if (!resp)
+ goto out_free;
+
+ rc = efx_mae_populate_ct_key(efx, key, kw, conn);
+ if (rc)
+ goto out_free;
+
+ rc = TABLE_POPULATE_RESP_BOOL(resp, ct, dnat, conn->dnat);
+ if (rc)
+ goto out_free;
+ /* No support in hw for IPv6 NAT; field is only 32 bits */
+ if (!ipv6)
+ rc = TABLE_POPULATE_RESP(resp, ct, nat_ip, conn->nat_ip);
+ if (rc)
+ goto out_free;
+ rc = TABLE_POPULATE_RESP(resp, ct, l4_natport, conn->l4_natport);
+ if (rc)
+ goto out_free;
+ rc = TABLE_POPULATE_RESP(resp, ct, mark, cpu_to_be32(conn->mark));
+ if (rc)
+ goto out_free;
+ rc = TABLE_POPULATE_RESP_U24(resp, ct, counter_id, conn->cnt->fw_id);
+ if (rc)
+ goto out_free;
+
+ MCDI_SET_DWORD(inbuf, TABLE_INSERT_IN_TABLE_ID, TABLE_ID_CONNTRACK_TABLE);
+ MCDI_SET_WORD(inbuf, TABLE_INSERT_IN_KEY_WIDTH,
+ efx->tc->meta_ct.desc.key_width);
+ /* MASK_WIDTH is zero as CT is a BCAM */
+ MCDI_SET_WORD(inbuf, TABLE_INSERT_IN_RESP_WIDTH,
+ efx->tc->meta_ct.desc.resp_width);
+ memcpy(MCDI_PTR(inbuf, TABLE_INSERT_IN_DATA), key, kw * sizeof(__le32));
+ memcpy(MCDI_PTR(inbuf, TABLE_INSERT_IN_DATA) + kw * sizeof(__le32),
+ resp, rw * sizeof(__le32));
+
+ BUILD_BUG_ON(MC_CMD_TABLE_INSERT_OUT_LEN);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_INSERT, inbuf, inlen, NULL, 0, NULL);
+
+out_free:
+ kfree(resp);
+ kfree(key);
+ kfree(inbuf);
+ return rc;
+}
+
+int efx_mae_remove_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn)
+{
+ __le32 *key = NULL;
+ efx_dword_t *inbuf;
+ size_t inlen, kw;
+ int rc = -ENOMEM;
+
+ /* Check table access is supported */
+ if (!efx->tc->meta_ct.hooked)
+ return -EOPNOTSUPP;
+
+ /* key width is in bits; convert to dwords for IN_LEN */
+ kw = DIV_ROUND_UP(efx->tc->meta_ct.desc.key_width, 32);
+ BUILD_BUG_ON(sizeof(__le32) != MC_CMD_TABLE_DELETE_IN_DATA_LEN);
+ inlen = MC_CMD_TABLE_DELETE_IN_LEN(kw);
+ if (inlen > MC_CMD_TABLE_DELETE_IN_LENMAX_MCDI2)
+ return -E2BIG;
+ inbuf = kzalloc(inlen, GFP_KERNEL);
+ if (!inbuf)
+ return -ENOMEM;
+
+ key = kcalloc(kw, sizeof(__le32), GFP_KERNEL);
+ if (!key)
+ goto out_free;
+
+ rc = efx_mae_populate_ct_key(efx, key, kw, conn);
+ if (rc)
+ goto out_free;
+
+ MCDI_SET_DWORD(inbuf, TABLE_DELETE_IN_TABLE_ID, TABLE_ID_CONNTRACK_TABLE);
+ MCDI_SET_WORD(inbuf, TABLE_DELETE_IN_KEY_WIDTH,
+ efx->tc->meta_ct.desc.key_width);
+ /* MASK_WIDTH is zero as CT is a BCAM */
+ /* RESP_WIDTH is zero for DELETE */
+ memcpy(MCDI_PTR(inbuf, TABLE_DELETE_IN_DATA), key, kw * sizeof(__le32));
+
+ BUILD_BUG_ON(MC_CMD_TABLE_DELETE_OUT_LEN);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_TABLE_DELETE, inbuf, inlen, NULL, 0, NULL);
+
+out_free:
+ kfree(key);
+ kfree(inbuf);
+ return rc;
+}
+
static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
const struct efx_tc_match *match)
{
@@ -1165,20 +2041,40 @@ static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
}
MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK,
match->mask.ingress_port);
- EFX_POPULATE_DWORD_2(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS),
+ EFX_POPULATE_DWORD_5(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS),
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT,
+ match->value.ct_state_trk,
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT,
+ match->value.ct_state_est,
MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG,
match->value.ip_frag,
MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG,
- match->value.ip_firstfrag);
- EFX_POPULATE_DWORD_2(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK),
+ match->value.ip_firstfrag,
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST,
+ match->value.tcp_syn_fin_rst);
+ EFX_POPULATE_DWORD_5(*_MCDI_STRUCT_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_FLAGS_MASK),
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_DO_CT,
+ match->mask.ct_state_trk,
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_HIT,
+ match->mask.ct_state_est,
MAE_FIELD_MASK_VALUE_PAIRS_V2_IS_IP_FRAG,
match->mask.ip_frag,
MAE_FIELD_MASK_VALUE_PAIRS_V2_IP_FIRST_FRAG,
- match->mask.ip_firstfrag);
+ match->mask.ip_firstfrag,
+ MAE_FIELD_MASK_VALUE_PAIRS_V2_TCP_SYN_FIN_RST,
+ match->mask.tcp_syn_fin_rst);
MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID,
match->value.recirc_id);
MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK,
match->mask.recirc_id);
+ MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK,
+ match->value.ct_mark);
+ MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_MARK_MASK,
+ match->mask.ct_mark);
+ MCDI_STRUCT_SET_WORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN,
+ match->value.ct_zone);
+ MCDI_STRUCT_SET_WORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_CT_DOMAIN_MASK,
+ match->mask.ct_zone);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE,
match->value.eth_proto);
MCDI_STRUCT_SET_WORD_BE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_ETHER_TYPE_BE_MASK,
diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h
index 24abfe509690..8df30bc4f3ba 100644
--- a/drivers/net/ethernet/sfc/mae.h
+++ b/drivers/net/ethernet/sfc/mae.h
@@ -66,6 +66,9 @@ int efx_mae_start_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue);
int efx_mae_stop_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue);
void efx_mae_counters_grant_credits(struct work_struct *work);
+int efx_mae_get_tables(struct efx_nic *efx);
+void efx_mae_free_tables(struct efx_nic *efx);
+
#define MAE_NUM_FIELDS (MAE_FIELD_ENC_VNET_ID + 1)
struct mae_caps {
@@ -81,6 +84,9 @@ int efx_mae_get_caps(struct efx_nic *efx, struct mae_caps *caps);
int efx_mae_match_check_caps(struct efx_nic *efx,
const struct efx_tc_match_fields *mask,
struct netlink_ext_ack *extack);
+int efx_mae_match_check_caps_lhs(struct efx_nic *efx,
+ const struct efx_tc_match_fields *mask,
+ struct netlink_ext_ack *extack);
int efx_mae_check_encap_match_caps(struct efx_nic *efx, bool ipv6,
u8 ip_tos_mask, __be16 udp_sport_mask,
struct netlink_ext_ack *extack);
@@ -97,6 +103,10 @@ int efx_mae_update_encap_md(struct efx_nic *efx,
int efx_mae_free_encap_md(struct efx_nic *efx,
struct efx_tc_encap_action *encap);
+int efx_mae_allocate_pedit_mac(struct efx_nic *efx,
+ struct efx_tc_mac_pedit_action *ped);
+void efx_mae_free_pedit_mac(struct efx_nic *efx,
+ struct efx_tc_mac_pedit_action *ped);
int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act);
int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id);
@@ -109,6 +119,12 @@ int efx_mae_register_encap_match(struct efx_nic *efx,
struct efx_tc_encap_match *encap);
int efx_mae_unregister_encap_match(struct efx_nic *efx,
struct efx_tc_encap_match *encap);
+int efx_mae_insert_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule,
+ u32 prio);
+int efx_mae_remove_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule);
+struct efx_tc_ct_entry; /* see tc_conntrack.h */
+int efx_mae_insert_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn);
+int efx_mae_remove_ct(struct efx_nic *efx, struct efx_tc_ct_entry *conn);
int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
u32 prio, u32 acts_id, u32 *id);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index a7f2c31071e8..d23da9627338 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -10,7 +10,6 @@
#include "net_driver.h"
#include "nic.h"
#include "io.h"
-#include "farch_regs.h"
#include "mcdi_pcol.h"
/**************************************************************************
@@ -1353,12 +1352,6 @@ void efx_mcdi_process_event(struct efx_channel *channel,
case MCDI_EVENT_CODE_MAC_STATS_DMA:
/* MAC stats are gather lazily. We can ignore this. */
break;
- case MCDI_EVENT_CODE_FLR:
- if (efx->type->sriov_flr)
- efx->type->sriov_flr(efx,
- MCDI_EVENT_FIELD(*event, FLR_VF));
- break;
- case MCDI_EVENT_CODE_PTP_RX:
case MCDI_EVENT_CODE_PTP_FAULT:
case MCDI_EVENT_CODE_PTP_PPS:
efx_ptp_event(efx, event);
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 454e9d51a4c2..ea612c619874 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -218,14 +218,28 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
BUILD_BUG_ON(_field ## _LEN != 1); \
*(u8 *)MCDI_STRUCT_PTR(_buf, _field) = _value; \
} while (0)
+#define MCDI_STRUCT_POPULATE_BYTE_1(_buf, _field, _name, _value) do { \
+ efx_dword_t _temp; \
+ EFX_POPULATE_DWORD_1(_temp, _name, _value); \
+ MCDI_STRUCT_SET_BYTE(_buf, _field, \
+ EFX_DWORD_FIELD(_temp, EFX_BYTE_0)); \
+ } while (0)
#define MCDI_BYTE(_buf, _field) \
((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
*MCDI_PTR(_buf, _field))
+#define MCDI_STRUCT_BYTE(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(_field ## _LEN != 1), \
+ *MCDI_STRUCT_PTR(_buf, _field))
#define MCDI_SET_WORD(_buf, _field, _value) do { \
BUILD_BUG_ON(MC_CMD_ ## _field ## _LEN != 2); \
BUILD_BUG_ON(MC_CMD_ ## _field ## _OFST & 1); \
*(__force __le16 *)MCDI_PTR(_buf, _field) = cpu_to_le16(_value);\
} while (0)
+#define MCDI_STRUCT_SET_WORD(_buf, _field, _value) do { \
+ BUILD_BUG_ON(_field ## _LEN != 2); \
+ BUILD_BUG_ON(_field ## _OFST & 1); \
+ *(__force __le16 *)MCDI_STRUCT_PTR(_buf, _field) = cpu_to_le16(_value);\
+ } while (0)
#define MCDI_WORD(_buf, _field) \
((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
diff --git a/drivers/net/ethernet/sfc/mcdi_functions.c b/drivers/net/ethernet/sfc/mcdi_functions.c
index d3e6d8239f5c..ff8424167384 100644
--- a/drivers/net/ethernet/sfc/mcdi_functions.c
+++ b/drivers/net/ethernet/sfc/mcdi_functions.c
@@ -62,7 +62,7 @@ int efx_mcdi_alloc_vis(struct efx_nic *efx, unsigned int min_vis,
int efx_mcdi_ev_probe(struct efx_channel *channel)
{
- return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
+ return efx_nic_alloc_buffer(channel->efx, &channel->eventq,
(channel->eventq_mask + 1) *
sizeof(efx_qword_t),
GFP_KERNEL);
@@ -74,14 +74,14 @@ int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2)
MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
EFX_BUF_SIZE));
MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
- size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
+ size_t entries = channel->eventq.len / EFX_BUF_SIZE;
struct efx_nic *efx = channel->efx;
size_t inlen, outlen;
dma_addr_t dma_addr;
int rc, i;
/* Fill event queue with all ones (i.e. empty events) */
- memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
+ memset(channel->eventq.addr, 0xff, channel->eventq.len);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
@@ -112,7 +112,7 @@ int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2)
INIT_EVQ_IN_FLAG_CUT_THRU, v1_cut_thru);
}
- dma_addr = channel->eventq.buf.dma_addr;
+ dma_addr = channel->eventq.dma_addr;
for (i = 0; i < entries; ++i) {
MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
dma_addr += EFX_BUF_SIZE;
@@ -134,7 +134,7 @@ int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2)
void efx_mcdi_ev_remove(struct efx_channel *channel)
{
- efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
+ efx_nic_free_buffer(channel->efx, &channel->eventq);
}
void efx_mcdi_ev_fini(struct efx_channel *channel)
@@ -166,7 +166,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue)
EFX_BUF_SIZE));
bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM;
bool inner_csum = tx_queue->type & EFX_TXQ_TYPE_INNER_CSUM;
- size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
+ size_t entries = tx_queue->txd.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
dma_addr_t dma_addr;
@@ -182,7 +182,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue)
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id);
- dma_addr = tx_queue->txd.buf.dma_addr;
+ dma_addr = tx_queue->txd.dma_addr;
netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
tx_queue->queue, entries, (u64)dma_addr);
@@ -240,7 +240,7 @@ fail:
void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue)
{
- efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
+ efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd);
}
void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue)
@@ -269,7 +269,7 @@ fail:
int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue)
{
- return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
+ return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd,
(rx_queue->ptr_mask + 1) *
sizeof(efx_qword_t),
GFP_KERNEL);
@@ -278,7 +278,7 @@ int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue)
void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
{
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
- size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
+ size_t entries = rx_queue->rxd.len / EFX_BUF_SIZE;
MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_RXQ_V4_IN_LEN);
struct efx_nic *efx = rx_queue->efx;
unsigned int buffer_size;
@@ -306,7 +306,7 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, efx->vport_id);
MCDI_SET_DWORD(inbuf, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, buffer_size);
- dma_addr = rx_queue->rxd.buf.dma_addr;
+ dma_addr = rx_queue->rxd.dma_addr;
netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
@@ -325,7 +325,7 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue)
{
- efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
+ efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd);
}
void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
index 0ab14f3d01d4..76ea26722ca4 100644
--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -1106,11 +1106,6 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, efx_calc_mac_mtu(efx));
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
-
- /* Set simple MAC filter for Siena */
- MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
- SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
-
MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS,
SET_MAC_IN_FLAG_INCLUDE_FCS,
!!(efx->net_dev->features & NETIF_F_RXFCS));
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index a7a22b019794..27d86e90a3bb 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -67,9 +67,7 @@
#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
#define EFX_TXQ_TYPE_OUTER_CSUM 1 /* Outer checksum offload */
#define EFX_TXQ_TYPE_INNER_CSUM 2 /* Inner checksum offload */
-#define EFX_TXQ_TYPE_HIGHPRI 4 /* High-priority (for TC) */
-#define EFX_TXQ_TYPES 8
-/* HIGHPRI is Siena-only, and INNER_CSUM is EF10, so no need for both */
+#define EFX_TXQ_TYPES 4
#define EFX_MAX_TXQ_PER_CHANNEL 4
#define EFX_MAX_TX_QUEUES (EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_CHANNELS)
@@ -125,26 +123,6 @@ struct efx_buffer {
};
/**
- * struct efx_special_buffer - DMA buffer entered into buffer table
- * @buf: Standard &struct efx_buffer
- * @index: Buffer index within controller;s buffer table
- * @entries: Number of buffer table entries
- *
- * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE.
- * Event and descriptor rings are addressed via one or more buffer
- * table entries (and so can be physically non-contiguous, although we
- * currently do not take advantage of that). On Falcon and Siena we
- * have to take care of allocating and initialising the entries
- * ourselves. On later hardware this is managed by the firmware and
- * @index and @entries are left as 0.
- */
-struct efx_special_buffer {
- struct efx_buffer buf;
- unsigned int index;
- unsigned int entries;
-};
-
-/**
* struct efx_tx_buffer - buffer state for a TX descriptor
* @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
* freed when descriptor completes
@@ -237,7 +215,7 @@ struct efx_tx_buffer {
* Normally this will equal @write_count, but as option descriptors
* don't produce completion events, they won't update this.
* Filled in iff @efx->type->option_descriptors; only used for PIO.
- * Thus, this is written and used on EF10, and neither on farch.
+ * Thus, this is only written and used on EF10.
* @old_read_count: The value of read_count when last checked.
* This is here for performance reasons. The xmit path will
* only get the up-to-date value of read_count if this
@@ -270,7 +248,7 @@ struct efx_tx_queue {
struct netdev_queue *core_txq;
struct efx_tx_buffer *buffer;
struct efx_buffer *cb_page;
- struct efx_special_buffer txd;
+ struct efx_buffer txd;
unsigned int ptr_mask;
void __iomem *piobuf;
unsigned int piobuf_offset;
@@ -399,7 +377,7 @@ struct efx_rx_queue {
struct efx_nic *efx;
int core_index;
struct efx_rx_buffer *buffer;
- struct efx_special_buffer rxd;
+ struct efx_buffer rxd;
unsigned int ptr_mask;
bool refill_enabled;
bool flush_pending;
@@ -515,7 +493,7 @@ struct efx_channel {
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned long busy_poll_state;
#endif
- struct efx_special_buffer eventq;
+ struct efx_buffer eventq;
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
int event_test_cpu;
@@ -754,18 +732,6 @@ struct efx_hw_stat_desc {
u16 offset;
};
-/* Number of bits used in a multicast filter hash address */
-#define EFX_MCAST_HASH_BITS 8
-
-/* Number of (single-bit) entries in a multicast filter hash */
-#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)
-
-/* An Efx multicast filter hash */
-union efx_multicast_hash {
- u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
- efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
-};
-
struct vfdi_status;
/* The reserved RSS context value */
@@ -895,7 +861,6 @@ struct efx_mae;
* @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
* @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
* @sram_lim_qw: Qword address limit of SRAM
- * @next_buffer_table: First available buffer table id
* @n_channels: Number of channels in use
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX
@@ -957,10 +922,6 @@ struct efx_mae;
* see &enum ethtool_fec_config_bits.
* @link_state: Current state of the link
* @n_link_state_changes: Number of times the link has changed state
- * @unicast_filter: Flag for Falcon-arch simple unicast filter.
- * Protected by @mac_lock.
- * @multicast_hash: Multicast hash table for Falcon-arch.
- * Protected by @mac_lock.
* @wanted_fc: Wanted flow control flags
* @fc_disable: When non-zero flow control is disabled. Typically used to
* ensure that network back pressure doesn't delay dma queue flushes.
@@ -1064,7 +1025,6 @@ struct efx_nic {
unsigned tx_dc_base;
unsigned rx_dc_base;
unsigned sram_lim_qw;
- unsigned next_buffer_table;
unsigned int max_channels;
unsigned int max_vis;
@@ -1139,8 +1099,6 @@ struct efx_nic {
struct efx_link_state link_state;
unsigned int n_link_state_changes;
- bool unicast_filter;
- union efx_multicast_hash multicast_hash;
u8 wanted_fc;
unsigned fc_disable;
@@ -1263,10 +1221,6 @@ struct efx_udp_tunnel {
* @remove_port: Free resources allocated by probe_port()
* @handle_global_event: Handle a "global" event (may be %NULL)
* @fini_dmaq: Flush and finalise DMA queues (RX and TX queues)
- * @prepare_flush: Prepare the hardware for flushing the DMA queues
- * (for Falcon architecture)
- * @finish_flush: Clean up after flushing the DMA queues (for Falcon
- * architecture)
* @prepare_flr: Prepare for an FLR
* @finish_flr: Clean up after an FLR
* @describe_stats: Describe statistics for ethtool
@@ -1288,8 +1242,7 @@ struct efx_udp_tunnel {
* @set_wol: Push WoL configuration to the NIC
* @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
* @get_fec_stats: Get standard FEC statistics.
- * @test_chip: Test registers. May use efx_farch_test_registers(), and is
- * expected to reset the NIC.
+ * @test_chip: Test registers. This is expected to reset the NIC.
* @test_nvram: Test validity of NVRAM contents
* @mcdi_request: Send an MCDI request with the given header and SDU.
* The SDU length may be any value from 0 up to the protocol-
@@ -1414,8 +1367,6 @@ struct efx_nic_type {
void (*remove_port)(struct efx_nic *efx);
bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
int (*fini_dmaq)(struct efx_nic *efx);
- void (*prepare_flush)(struct efx_nic *efx);
- void (*finish_flush)(struct efx_nic *efx);
void (*prepare_flr)(struct efx_nic *efx);
void (*finish_flr)(struct efx_nic *efx);
size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
@@ -1531,8 +1482,6 @@ struct efx_nic_type {
int (*sriov_init)(struct efx_nic *efx);
void (*sriov_fini)(struct efx_nic *efx);
bool (*sriov_wanted)(struct efx_nic *efx);
- void (*sriov_reset)(struct efx_nic *efx);
- void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i);
int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, const u8 *mac);
int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan,
u8 qos);
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 63e2394382bb..a33ed473cc8a 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -17,7 +17,6 @@
#include "efx.h"
#include "nic.h"
#include "ef10_regs.h"
-#include "farch_regs.h"
#include "io.h"
#include "workarounds.h"
#include "mcdi_pcol.h"
@@ -172,10 +171,6 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
/* Register dump */
-#define REGISTER_REVISION_FA 1
-#define REGISTER_REVISION_FB 2
-#define REGISTER_REVISION_FC 3
-#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */
#define REGISTER_REVISION_ED 4
#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */
@@ -189,117 +184,9 @@ struct efx_nic_reg {
REGISTER_REVISION_ ## arch ## min_rev, \
REGISTER_REVISION_ ## arch ## max_rev \
}
-#define REGISTER_AA(name) REGISTER(name, F, A, A)
-#define REGISTER_AB(name) REGISTER(name, F, A, B)
-#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
-#define REGISTER_BB(name) REGISTER(name, F, B, B)
-#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
-#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
#define REGISTER_DZ(name) REGISTER(name, E, D, Z)
static const struct efx_nic_reg efx_nic_regs[] = {
- REGISTER_AZ(ADR_REGION),
- REGISTER_AZ(INT_EN_KER),
- REGISTER_BZ(INT_EN_CHAR),
- REGISTER_AZ(INT_ADR_KER),
- REGISTER_BZ(INT_ADR_CHAR),
- /* INT_ACK_KER is WO */
- /* INT_ISR0 is RC */
- REGISTER_AZ(HW_INIT),
- REGISTER_CZ(USR_EV_CFG),
- REGISTER_AB(EE_SPI_HCMD),
- REGISTER_AB(EE_SPI_HADR),
- REGISTER_AB(EE_SPI_HDATA),
- REGISTER_AB(EE_BASE_PAGE),
- REGISTER_AB(EE_VPD_CFG0),
- /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
- /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
- /* PCIE_CORE_INDIRECT is indirect */
- REGISTER_AB(NIC_STAT),
- REGISTER_AB(GPIO_CTL),
- REGISTER_AB(GLB_CTL),
- /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
- REGISTER_BZ(DP_CTRL),
- REGISTER_AZ(MEM_STAT),
- REGISTER_AZ(CS_DEBUG),
- REGISTER_AZ(ALTERA_BUILD),
- REGISTER_AZ(CSR_SPARE),
- REGISTER_AB(PCIE_SD_CTL0123),
- REGISTER_AB(PCIE_SD_CTL45),
- REGISTER_AB(PCIE_PCS_CTL_STAT),
- /* DEBUG_DATA_OUT is not used */
- /* DRV_EV is WO */
- REGISTER_AZ(EVQ_CTL),
- REGISTER_AZ(EVQ_CNT1),
- REGISTER_AZ(EVQ_CNT2),
- REGISTER_AZ(BUF_TBL_CFG),
- REGISTER_AZ(SRM_RX_DC_CFG),
- REGISTER_AZ(SRM_TX_DC_CFG),
- REGISTER_AZ(SRM_CFG),
- /* BUF_TBL_UPD is WO */
- REGISTER_AZ(SRM_UPD_EVQ),
- REGISTER_AZ(SRAM_PARITY),
- REGISTER_AZ(RX_CFG),
- REGISTER_BZ(RX_FILTER_CTL),
- /* RX_FLUSH_DESCQ is WO */
- REGISTER_AZ(RX_DC_CFG),
- REGISTER_AZ(RX_DC_PF_WM),
- REGISTER_BZ(RX_RSS_TKEY),
- /* RX_NODESC_DROP is RC */
- REGISTER_AA(RX_SELF_RST),
- /* RX_DEBUG, RX_PUSH_DROP are not used */
- REGISTER_CZ(RX_RSS_IPV6_REG1),
- REGISTER_CZ(RX_RSS_IPV6_REG2),
- REGISTER_CZ(RX_RSS_IPV6_REG3),
- /* TX_FLUSH_DESCQ is WO */
- REGISTER_AZ(TX_DC_CFG),
- REGISTER_AA(TX_CHKSM_CFG),
- REGISTER_AZ(TX_CFG),
- /* TX_PUSH_DROP is not used */
- REGISTER_AZ(TX_RESERVED),
- REGISTER_BZ(TX_PACE),
- /* TX_PACE_DROP_QID is RC */
- REGISTER_BB(TX_VLAN),
- REGISTER_BZ(TX_IPFIL_PORTEN),
- REGISTER_AB(MD_TXD),
- REGISTER_AB(MD_RXD),
- REGISTER_AB(MD_CS),
- REGISTER_AB(MD_PHY_ADR),
- REGISTER_AB(MD_ID),
- /* MD_STAT is RC */
- REGISTER_AB(MAC_STAT_DMA),
- REGISTER_AB(MAC_CTRL),
- REGISTER_BB(GEN_MODE),
- REGISTER_AB(MAC_MC_HASH_REG0),
- REGISTER_AB(MAC_MC_HASH_REG1),
- REGISTER_AB(GM_CFG1),
- REGISTER_AB(GM_CFG2),
- /* GM_IPG and GM_HD are not used */
- REGISTER_AB(GM_MAX_FLEN),
- /* GM_TEST is not used */
- REGISTER_AB(GM_ADR1),
- REGISTER_AB(GM_ADR2),
- REGISTER_AB(GMF_CFG0),
- REGISTER_AB(GMF_CFG1),
- REGISTER_AB(GMF_CFG2),
- REGISTER_AB(GMF_CFG3),
- REGISTER_AB(GMF_CFG4),
- REGISTER_AB(GMF_CFG5),
- REGISTER_BB(TX_SRC_MAC_CTL),
- REGISTER_AB(XM_ADR_LO),
- REGISTER_AB(XM_ADR_HI),
- REGISTER_AB(XM_GLB_CFG),
- REGISTER_AB(XM_TX_CFG),
- REGISTER_AB(XM_RX_CFG),
- REGISTER_AB(XM_MGT_INT_MASK),
- REGISTER_AB(XM_FC),
- REGISTER_AB(XM_PAUSE_TIME),
- REGISTER_AB(XM_TX_PARAM),
- REGISTER_AB(XM_RX_PARAM),
- /* XM_MGT_INT_MSK (note no 'A') is RC */
- REGISTER_AB(XX_PWR_RST),
- REGISTER_AB(XX_SD_CTL),
- REGISTER_AB(XX_TXDRV_CTL),
/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
/* XX_CORE_STAT is partly RC */
REGISTER_DZ(BIU_HW_REV_ID),
@@ -325,49 +212,9 @@ struct efx_nic_reg_table {
arch, min_rev, max_rev, \
arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
-#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
-#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
-#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
-#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
-#define REGISTER_TABLE_BB_CZ(name) \
- REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \
- FR_BZ_ ## name ## _STEP, \
- FR_BB_ ## name ## _ROWS), \
- REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \
- FR_BZ_ ## name ## _STEP, \
- FR_CZ_ ## name ## _ROWS)
-#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z)
static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
- /* DRIVER is not used */
- /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
- REGISTER_TABLE_BB(TX_IPFIL_TBL),
- REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
- REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
- REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
- REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
- REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
- REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
- REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
- /* We can't reasonably read all of the buffer table (up to 8MB!).
- * However this driver will only use a few entries. Reading
- * 1K entries allows for some expansion of queue count and
- * size before we need to change the version. */
- REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
- F, A, A, 8, 1024),
- REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
- F, B, Z, 8, 1024),
- REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
- REGISTER_TABLE_BB_CZ(TIMER_TBL),
- REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
- REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
- /* TX_FILTER_TBL0 is huge and not used by this driver */
- REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
- REGISTER_TABLE_CZ(MC_TREG_SMEM),
- /* MSIX_PBA_TABLE is not mapped */
- /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
- REGISTER_TABLE_BZ(RX_FILTER_TBL0),
REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS),
};
@@ -425,11 +272,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
case 4: /* 32-bit SRAM */
efx_readd(efx, buf, table->offset + 4 * i);
break;
- case 8: /* 64-bit SRAM */
- efx_sram_readq(efx,
- efx->membase + table->offset,
- buf, i);
- break;
case 16: /* 128-bit-readable register */
efx_reado_table(efx, buf, table->offset, i);
break;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 251868235ae4..1db64fc6e909 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -11,8 +11,6 @@
#include "nic_common.h"
#include "efx.h"
-u32 efx_farch_fpga_ver(struct efx_nic *efx);
-
enum {
PHY_TYPE_NONE = 0,
PHY_TYPE_TXC43128 = 1,
@@ -26,97 +24,6 @@ enum {
};
enum {
- SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT,
- SIENA_STAT_tx_good_bytes,
- SIENA_STAT_tx_bad_bytes,
- SIENA_STAT_tx_packets,
- SIENA_STAT_tx_bad,
- SIENA_STAT_tx_pause,
- SIENA_STAT_tx_control,
- SIENA_STAT_tx_unicast,
- SIENA_STAT_tx_multicast,
- SIENA_STAT_tx_broadcast,
- SIENA_STAT_tx_lt64,
- SIENA_STAT_tx_64,
- SIENA_STAT_tx_65_to_127,
- SIENA_STAT_tx_128_to_255,
- SIENA_STAT_tx_256_to_511,
- SIENA_STAT_tx_512_to_1023,
- SIENA_STAT_tx_1024_to_15xx,
- SIENA_STAT_tx_15xx_to_jumbo,
- SIENA_STAT_tx_gtjumbo,
- SIENA_STAT_tx_collision,
- SIENA_STAT_tx_single_collision,
- SIENA_STAT_tx_multiple_collision,
- SIENA_STAT_tx_excessive_collision,
- SIENA_STAT_tx_deferred,
- SIENA_STAT_tx_late_collision,
- SIENA_STAT_tx_excessive_deferred,
- SIENA_STAT_tx_non_tcpudp,
- SIENA_STAT_tx_mac_src_error,
- SIENA_STAT_tx_ip_src_error,
- SIENA_STAT_rx_bytes,
- SIENA_STAT_rx_good_bytes,
- SIENA_STAT_rx_bad_bytes,
- SIENA_STAT_rx_packets,
- SIENA_STAT_rx_good,
- SIENA_STAT_rx_bad,
- SIENA_STAT_rx_pause,
- SIENA_STAT_rx_control,
- SIENA_STAT_rx_unicast,
- SIENA_STAT_rx_multicast,
- SIENA_STAT_rx_broadcast,
- SIENA_STAT_rx_lt64,
- SIENA_STAT_rx_64,
- SIENA_STAT_rx_65_to_127,
- SIENA_STAT_rx_128_to_255,
- SIENA_STAT_rx_256_to_511,
- SIENA_STAT_rx_512_to_1023,
- SIENA_STAT_rx_1024_to_15xx,
- SIENA_STAT_rx_15xx_to_jumbo,
- SIENA_STAT_rx_gtjumbo,
- SIENA_STAT_rx_bad_gtjumbo,
- SIENA_STAT_rx_overflow,
- SIENA_STAT_rx_false_carrier,
- SIENA_STAT_rx_symbol_error,
- SIENA_STAT_rx_align_error,
- SIENA_STAT_rx_length_error,
- SIENA_STAT_rx_internal_error,
- SIENA_STAT_rx_nodesc_drop_cnt,
- SIENA_STAT_COUNT
-};
-
-/**
- * struct siena_nic_data - Siena NIC state
- * @efx: Pointer back to main interface structure
- * @wol_filter_id: Wake-on-LAN packet filter id
- * @stats: Hardware statistics
- * @vf: Array of &struct siena_vf objects
- * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
- * @vfdi_status: Common VFDI status page to be dmad to VF address space.
- * @local_addr_list: List of local addresses. Protected by %local_lock.
- * @local_page_list: List of DMA addressable pages used to broadcast
- * %local_addr_list. Protected by %local_lock.
- * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
- * @peer_work: Work item to broadcast peer addresses to VMs.
- */
-struct siena_nic_data {
- struct efx_nic *efx;
- int wol_filter_id;
- u64 stats[SIENA_STAT_COUNT];
-#ifdef CONFIG_SFC_SRIOV
- struct siena_vf *vf;
- struct efx_channel *vfdi_channel;
- unsigned vf_buftbl_base;
- struct efx_buffer vfdi_status;
- struct list_head local_addr_list;
- struct list_head local_page_list;
- struct mutex local_lock;
- struct work_struct peer_work;
-#endif
-};
-
-enum {
EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT,
EF10_STAT_port_tx_packets,
EF10_STAT_port_tx_pause,
@@ -304,89 +211,4 @@ int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
extern const struct efx_nic_type efx_hunt_a0_nic_type;
extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
-int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
-
-/* Falcon/Siena queue operations */
-int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
-void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
-void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
-void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
-void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
-unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
- dma_addr_t dma_addr, unsigned int len);
-int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
-void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
-void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
-void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
-void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
-void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
-int efx_farch_ev_probe(struct efx_channel *channel);
-int efx_farch_ev_init(struct efx_channel *channel);
-void efx_farch_ev_fini(struct efx_channel *channel);
-void efx_farch_ev_remove(struct efx_channel *channel);
-int efx_farch_ev_process(struct efx_channel *channel, int quota);
-void efx_farch_ev_read_ack(struct efx_channel *channel);
-void efx_farch_ev_test_generate(struct efx_channel *channel);
-
-/* Falcon/Siena filter operations */
-int efx_farch_filter_table_probe(struct efx_nic *efx);
-void efx_farch_filter_table_restore(struct efx_nic *efx);
-void efx_farch_filter_table_remove(struct efx_nic *efx);
-void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
-s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
- bool replace);
-int efx_farch_filter_remove_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id);
-int efx_farch_filter_get_safe(struct efx_nic *efx,
- enum efx_filter_priority priority, u32 filter_id,
- struct efx_filter_spec *);
-int efx_farch_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority);
-u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
- enum efx_filter_priority priority);
-u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
-s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
- enum efx_filter_priority priority, u32 *buf,
- u32 size);
-#ifdef CONFIG_RFS_ACCEL
-bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
- unsigned int index);
-#endif
-void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
-
-/* Falcon/Siena interrupts */
-void efx_farch_irq_enable_master(struct efx_nic *efx);
-int efx_farch_irq_test_generate(struct efx_nic *efx);
-void efx_farch_irq_disable_master(struct efx_nic *efx);
-irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
-irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
-irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
-
-/* Global Resources */
-void siena_prepare_flush(struct efx_nic *efx);
-int efx_farch_fini_dmaq(struct efx_nic *efx);
-void efx_farch_finish_flr(struct efx_nic *efx);
-void siena_finish_flush(struct efx_nic *efx);
-void falcon_start_nic_stats(struct efx_nic *efx);
-void falcon_stop_nic_stats(struct efx_nic *efx);
-int falcon_reset_xaui(struct efx_nic *efx);
-void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
-void efx_farch_init_common(struct efx_nic *efx);
-void efx_farch_rx_push_indir_table(struct efx_nic *efx);
-void efx_farch_rx_pull_indir_table(struct efx_nic *efx);
-
-/* Tests */
-struct efx_farch_register_test {
- unsigned address;
- efx_oword_t mask;
-};
-
-int efx_farch_test_registers(struct efx_nic *efx,
- const struct efx_farch_register_test *regs,
- size_t n_regs);
-
-void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
- efx_qword_t *event);
-
#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h
index 0cef35c0c559..466df5348b29 100644
--- a/drivers/net/ethernet/sfc/nic_common.h
+++ b/drivers/net/ethernet/sfc/nic_common.h
@@ -15,11 +15,10 @@
#include "ptp.h"
enum {
- /* Revisions 0-2 were Falcon A0, A1 and B0 respectively.
+ /* Revisions 0-3 were Falcon A0, A1, B0 and Siena respectively.
* They are not supported by this driver but these revision numbers
* form part of the ethtool API for register dumping.
*/
- EFX_REV_SIENA_A0 = 3,
EFX_REV_HUNT_A0 = 4,
EFX_REV_EF100 = 5,
};
@@ -33,7 +32,7 @@ static inline int efx_nic_rev(struct efx_nic *efx)
static inline efx_qword_t *efx_event(struct efx_channel *channel,
unsigned int index)
{
- return ((efx_qword_t *) (channel->eventq.buf.addr)) +
+ return ((efx_qword_t *)(channel->eventq.addr)) +
(index & channel->eventq_mask);
}
@@ -59,7 +58,7 @@ static inline int efx_event_present(efx_qword_t *event)
static inline efx_qword_t *
efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
{
- return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
+ return ((efx_qword_t *)(tx_queue->txd.addr)) + index;
}
/* Report whether this TX queue would be empty for the given write_count.
@@ -80,9 +79,7 @@ int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
/* Decide whether to push a TX descriptor to the NIC vs merely writing
* the doorbell. This can reduce latency when we are adding a single
- * descriptor to an empty queue, but is otherwise pointless. Further,
- * Falcon and Siena have hardware bugs (SF bug 33851) that may be
- * triggered if we don't check this.
+ * descriptor to an empty queue, but is otherwise pointless.
* We use the write_count used for the last doorbell push, to get the
* NIC's view of the tx queue.
*/
@@ -99,7 +96,7 @@ static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
static inline efx_qword_t *
efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
{
- return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
+ return ((efx_qword_t *)(rx_queue->rxd.addr)) + index;
}
/* Alignment of PCIe DMA boundaries (4KB) */
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 00cf6de3bb2b..f54200f03e15 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -43,7 +43,6 @@
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "io.h"
-#include "farch_regs.h"
#include "tx.h"
#include "nic.h" /* indirectly includes ptp.h */
#include "efx_channels.h"
@@ -87,9 +86,6 @@
#define PTP_V1_VERSION_LENGTH 2
#define PTP_V1_VERSION_OFFSET 28
-#define PTP_V1_UUID_LENGTH 6
-#define PTP_V1_UUID_OFFSET 50
-
#define PTP_V1_SEQUENCE_LENGTH 2
#define PTP_V1_SEQUENCE_OFFSET 58
@@ -101,17 +97,6 @@
#define PTP_V2_VERSION_LENGTH 1
#define PTP_V2_VERSION_OFFSET 29
-#define PTP_V2_UUID_LENGTH 8
-#define PTP_V2_UUID_OFFSET 48
-
-/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2),
- * the MC only captures the last six bytes of the clock identity. These values
- * reflect those, not the ones used in the standard. The standard permits
- * mapping of V1 UUIDs to V2 UUIDs with these same values.
- */
-#define PTP_V2_MC_UUID_LENGTH 6
-#define PTP_V2_MC_UUID_OFFSET 50
-
#define PTP_V2_SEQUENCE_LENGTH 2
#define PTP_V2_SEQUENCE_OFFSET 58
@@ -167,14 +152,12 @@ enum ptp_packet_state {
/**
* struct efx_ptp_match - Matching structure, stored in sk_buff's cb area.
- * @words: UUID and (partial) sequence number
* @expiry: Time after which the packet should be delivered irrespective of
* event arrival.
* @state: The state of the packet - whether it is ready for processing or
* whether that is of no interest.
*/
struct efx_ptp_match {
- u32 words[DIV_ROUND_UP(PTP_V1_UUID_LENGTH, 4)];
unsigned long expiry;
enum ptp_packet_state state;
};
@@ -236,15 +219,9 @@ struct efx_ptp_rxfilter {
/**
* struct efx_ptp_data - Precision Time Protocol (PTP) state
* @efx: The NIC context
- * @channel: The PTP channel (Siena only)
- * @rx_ts_inline: Flag for whether RX timestamps are inline (else they are
- * separate events)
+ * @channel: The PTP channel (for Medford and Medford2)
* @rxq: Receive SKB queue (awaiting timestamps)
* @txq: Transmit SKB queue
- * @evt_list: List of MC receive events awaiting packets
- * @evt_free_list: List of free events
- * @evt_lock: Lock for manipulating evt_list and evt_free_list
- * @rx_evts: Instantiated events (on evt_list and evt_free_list)
* @workwq: Work queue for processing pending PTP operations
* @work: Work task
* @cleanup_work: Work task for periodic cleanup
@@ -310,13 +287,8 @@ struct efx_ptp_rxfilter {
struct efx_ptp_data {
struct efx_nic *efx;
struct efx_channel *channel;
- bool rx_ts_inline;
struct sk_buff_head rxq;
struct sk_buff_head txq;
- struct list_head evt_list;
- struct list_head evt_free_list;
- spinlock_t evt_lock;
- struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
struct workqueue_struct *workwq;
struct work_struct work;
struct delayed_work cleanup_work;
@@ -465,25 +437,6 @@ size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
return PTP_STAT_COUNT;
}
-/* For Siena platforms NIC time is s and ns */
-static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor)
-{
- struct timespec64 ts = ns_to_timespec64(ns);
- *nic_major = (u32)ts.tv_sec;
- *nic_minor = ts.tv_nsec;
-}
-
-static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor,
- s32 correction)
-{
- ktime_t kt = ktime_set(nic_major, nic_minor);
- if (correction >= 0)
- kt = ktime_add_ns(kt, (u64)correction);
- else
- kt = ktime_sub_ns(kt, (u64)-correction);
- return kt;
-}
-
/* To convert from s27 format to ns we multiply then divide by a power of 2.
* For the conversion from ns to s27, the operation is also converted to a
* multiply and shift.
@@ -697,12 +650,6 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
ptp->nic_time.minor_max = 1 << 27;
ptp->nic_time.sync_event_minor_shift = 19;
break;
- case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS:
- ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns;
- ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction;
- ptp->nic_time.minor_max = 1000000000;
- ptp->nic_time.sync_event_minor_shift = 22;
- break;
case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS:
ptp->ns_to_nic_time = efx_ptp_ns_to_s_qns;
ptp->nic_to_kernel_time = efx_ptp_s_qns_to_ktime_correction;
@@ -1217,76 +1164,6 @@ fail:
return;
}
-static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
-{
- struct efx_ptp_data *ptp = efx->ptp_data;
- struct list_head *cursor;
- struct list_head *next;
-
- if (ptp->rx_ts_inline)
- return;
-
- /* Drop time-expired events */
- spin_lock_bh(&ptp->evt_lock);
- list_for_each_safe(cursor, next, &ptp->evt_list) {
- struct efx_ptp_event_rx *evt;
-
- evt = list_entry(cursor, struct efx_ptp_event_rx,
- link);
- if (time_after(jiffies, evt->expiry)) {
- list_move(&evt->link, &ptp->evt_free_list);
- netif_warn(efx, hw, efx->net_dev,
- "PTP rx event dropped\n");
- }
- }
- spin_unlock_bh(&ptp->evt_lock);
-}
-
-static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
- struct sk_buff *skb)
-{
- struct efx_ptp_data *ptp = efx->ptp_data;
- bool evts_waiting;
- struct list_head *cursor;
- struct list_head *next;
- struct efx_ptp_match *match;
- enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED;
-
- WARN_ON_ONCE(ptp->rx_ts_inline);
-
- spin_lock_bh(&ptp->evt_lock);
- evts_waiting = !list_empty(&ptp->evt_list);
- spin_unlock_bh(&ptp->evt_lock);
-
- if (!evts_waiting)
- return PTP_PACKET_STATE_UNMATCHED;
-
- match = (struct efx_ptp_match *)skb->cb;
- /* Look for a matching timestamp in the event queue */
- spin_lock_bh(&ptp->evt_lock);
- list_for_each_safe(cursor, next, &ptp->evt_list) {
- struct efx_ptp_event_rx *evt;
-
- evt = list_entry(cursor, struct efx_ptp_event_rx, link);
- if ((evt->seq0 == match->words[0]) &&
- (evt->seq1 == match->words[1])) {
- struct skb_shared_hwtstamps *timestamps;
-
- /* Match - add in hardware timestamp */
- timestamps = skb_hwtstamps(skb);
- timestamps->hwtstamp = evt->hwtimestamp;
-
- match->state = PTP_PACKET_STATE_MATCHED;
- rc = PTP_PACKET_STATE_MATCHED;
- list_move(&evt->link, &ptp->evt_free_list);
- break;
- }
- }
- spin_unlock_bh(&ptp->evt_lock);
-
- return rc;
-}
-
/* Process any queued receive events and corresponding packets
*
* q is returned with all the packets that are ready for delivery.
@@ -1302,9 +1179,6 @@ static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
match = (struct efx_ptp_match *)skb->cb;
if (match->state == PTP_PACKET_STATE_MATCH_UNWANTED) {
__skb_queue_tail(q, skb);
- } else if (efx_ptp_match_rx(efx, skb) ==
- PTP_PACKET_STATE_MATCHED) {
- __skb_queue_tail(q, skb);
} else if (time_after(jiffies, match->expiry)) {
match->state = PTP_PACKET_STATE_TIMED_OUT;
++ptp->rx_no_timestamp;
@@ -1583,8 +1457,6 @@ fail:
static int efx_ptp_stop(struct efx_nic *efx)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- struct list_head *cursor;
- struct list_head *next;
int rc;
if (ptp == NULL)
@@ -1599,13 +1471,6 @@ static int efx_ptp_stop(struct efx_nic *efx)
efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
skb_queue_purge(&efx->ptp_data->txq);
- /* Drop any pending receive events */
- spin_lock_bh(&efx->ptp_data->evt_lock);
- list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
- list_move(cursor, &efx->ptp_data->evt_free_list);
- }
- spin_unlock_bh(&efx->ptp_data->evt_lock);
-
return rc;
}
@@ -1645,8 +1510,6 @@ static void efx_ptp_worker(struct work_struct *work)
return;
}
- efx_ptp_drop_time_expired_events(efx);
-
__skb_queue_head_init(&tempq);
efx_ptp_process_events(efx, &tempq);
@@ -1695,7 +1558,6 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
{
struct efx_ptp_data *ptp;
int rc = 0;
- unsigned int pos;
if (efx->ptp_data) {
efx->ptp_data->channel = channel;
@@ -1709,7 +1571,6 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
ptp->efx = efx;
ptp->channel = channel;
- ptp->rx_ts_inline = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
if (rc != 0)
@@ -1736,12 +1597,6 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
ptp->config.flags = 0;
ptp->config.tx_type = HWTSTAMP_TX_OFF;
ptp->config.rx_filter = HWTSTAMP_FILTER_NONE;
- INIT_LIST_HEAD(&ptp->evt_list);
- INIT_LIST_HEAD(&ptp->evt_free_list);
- spin_lock_init(&ptp->evt_lock);
- for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
- list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
-
INIT_LIST_HEAD(&ptp->rxfilters_mcast);
INIT_LIST_HEAD(&ptp->rxfilters_ucast);
@@ -1881,7 +1736,6 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
struct efx_nic *efx = channel->efx;
struct efx_ptp_data *ptp = efx->ptp_data;
struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
- u8 *match_data_012, *match_data_345;
unsigned int version;
u8 *data;
@@ -1897,12 +1751,6 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
if (version != PTP_VERSION_V1) {
return false;
}
-
- /* PTP V1 uses all six bytes of the UUID to match the packet
- * to the timestamp
- */
- match_data_012 = data + PTP_V1_UUID_OFFSET;
- match_data_345 = data + PTP_V1_UUID_OFFSET + 3;
} else {
if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
return false;
@@ -1912,21 +1760,6 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
return false;
}
-
- /* The original V2 implementation uses bytes 2-7 of
- * the UUID to match the packet to the timestamp. This
- * discards two of the bytes of the MAC address used
- * to create the UUID (SF bug 33070). The PTP V2
- * enhanced mode fixes this issue and uses bytes 0-2
- * and byte 5-7 of the UUID.
- */
- match_data_345 = data + PTP_V2_UUID_OFFSET + 5;
- if (ptp->mode == MC_CMD_PTP_MODE_V2) {
- match_data_012 = data + PTP_V2_UUID_OFFSET + 2;
- } else {
- match_data_012 = data + PTP_V2_UUID_OFFSET + 0;
- BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
- }
}
/* Does this packet require timestamping? */
@@ -1938,17 +1771,6 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
*/
BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
-
- /* Extract UUID/Sequence information */
- match->words[0] = (match_data_012[0] |
- (match_data_012[1] << 8) |
- (match_data_012[2] << 16) |
- (match_data_345[0] << 24));
- match->words[1] = (match_data_345[1] |
- (match_data_345[2] << 8) |
- (data[PTP_V1_SEQUENCE_OFFSET +
- PTP_V1_SEQUENCE_LENGTH - 1] <<
- 16));
} else {
match->state = PTP_PACKET_STATE_MATCH_UNWANTED;
}
@@ -2112,50 +1934,6 @@ static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
queue_work(ptp->workwq, &ptp->work);
}
-/* Process a completed receive event. Put it on the event queue and
- * start worker thread. This is required because event and their
- * correspoding packets may come in either order.
- */
-static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
-{
- struct efx_ptp_event_rx *evt = NULL;
-
- if (WARN_ON_ONCE(ptp->rx_ts_inline))
- return;
-
- if (ptp->evt_frag_idx != 3) {
- ptp_event_failure(efx, 3);
- return;
- }
-
- spin_lock_bh(&ptp->evt_lock);
- if (!list_empty(&ptp->evt_free_list)) {
- evt = list_first_entry(&ptp->evt_free_list,
- struct efx_ptp_event_rx, link);
- list_del(&evt->link);
-
- evt->seq0 = EFX_QWORD_FIELD(ptp->evt_frags[2], MCDI_EVENT_DATA);
- evt->seq1 = (EFX_QWORD_FIELD(ptp->evt_frags[2],
- MCDI_EVENT_SRC) |
- (EFX_QWORD_FIELD(ptp->evt_frags[1],
- MCDI_EVENT_SRC) << 8) |
- (EFX_QWORD_FIELD(ptp->evt_frags[0],
- MCDI_EVENT_SRC) << 16));
- evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time(
- EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
- EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA),
- ptp->ts_corrections.ptp_rx);
- evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
- list_add_tail(&evt->link, &ptp->evt_list);
-
- queue_work(ptp->workwq, &ptp->work);
- } else if (net_ratelimit()) {
- /* Log a rate-limited warning message. */
- netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
- }
- spin_unlock_bh(&ptp->evt_lock);
-}
-
static void ptp_event_fault(struct efx_nic *efx, struct efx_ptp_data *ptp)
{
int code = EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA);
@@ -2202,9 +1980,6 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
if (!MCDI_EVENT_FIELD(*ev, CONT)) {
/* Process resulting event */
switch (code) {
- case MCDI_EVENT_CODE_PTP_RX:
- ptp_event_rx(efx, ptp);
- break;
case MCDI_EVENT_CODE_PTP_FAULT:
ptp_event_fault(efx, ptp);
break;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 563c1e317ce9..894fad0bb5ea 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -38,8 +38,7 @@
/*
* Loopback test packet structure
*
- * The self-test should stress every RSS vector, and unfortunately
- * Falcon only performs RSS on TCP/UDP packets.
+ * The self-test should stress every RSS vector.
*/
struct efx_loopback_payload {
char pad[2]; /* Ensures ip is 4-byte aligned */
@@ -584,10 +583,6 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
return 0;
}
-/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
- * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it
- * to delay and retry. Therefore, it's safer to just poll directly. Wait
- * for link up and any faults to dissipate. */
static int efx_wait_for_link(struct efx_nic *efx)
{
struct efx_link_state *link_state = &efx->link_state;
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index fe268b6c1cac..047322b04d4f 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -12,9 +12,11 @@
#include <net/pkt_cls.h>
#include <net/vxlan.h>
#include <net/geneve.h>
+#include <net/tc_act/tc_ct.h>
#include "tc.h"
#include "tc_bindings.h"
#include "tc_encap_actions.h"
+#include "tc_conntrack.h"
#include "mae.h"
#include "ef100_rep.h"
#include "efx.h"
@@ -29,6 +31,9 @@ enum efx_encap_type efx_tc_indr_netdev_type(struct net_device *net_dev)
return EFX_ENCAP_TYPE_NONE;
}
+#define EFX_TC_HDR_TYPE_TTL_MASK ((u32)0xff)
+/* Hoplimit is stored in the most significant byte in the pedit ipv6 header action */
+#define EFX_TC_HDR_TYPE_HLIMIT_MASK ~((u32)0xff000000)
#define EFX_EFV_PF NULL
/* Look up the representor information (efv) for a device.
* May return NULL for the PF (us), or an error pointer for a device that
@@ -84,6 +89,12 @@ s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv)
return mport;
}
+static const struct rhashtable_params efx_tc_mac_ht_params = {
+ .key_len = offsetofend(struct efx_tc_mac_pedit_action, h_addr),
+ .key_offset = 0,
+ .head_offset = offsetof(struct efx_tc_mac_pedit_action, linkage),
+};
+
static const struct rhashtable_params efx_tc_encap_match_ht_params = {
.key_len = offsetof(struct efx_tc_encap_match, linkage),
.key_offset = 0,
@@ -96,6 +107,68 @@ static const struct rhashtable_params efx_tc_match_action_ht_params = {
.head_offset = offsetof(struct efx_tc_flow_rule, linkage),
};
+static const struct rhashtable_params efx_tc_lhs_rule_ht_params = {
+ .key_len = sizeof(unsigned long),
+ .key_offset = offsetof(struct efx_tc_lhs_rule, cookie),
+ .head_offset = offsetof(struct efx_tc_lhs_rule, linkage),
+};
+
+static const struct rhashtable_params efx_tc_recirc_ht_params = {
+ .key_len = offsetof(struct efx_tc_recirc_id, linkage),
+ .key_offset = 0,
+ .head_offset = offsetof(struct efx_tc_recirc_id, linkage),
+};
+
+static struct efx_tc_mac_pedit_action *efx_tc_flower_get_mac(struct efx_nic *efx,
+ unsigned char h_addr[ETH_ALEN],
+ struct netlink_ext_ack *extack)
+{
+ struct efx_tc_mac_pedit_action *ped, *old;
+ int rc;
+
+ ped = kzalloc(sizeof(*ped), GFP_USER);
+ if (!ped)
+ return ERR_PTR(-ENOMEM);
+ memcpy(ped->h_addr, h_addr, ETH_ALEN);
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->mac_ht,
+ &ped->linkage,
+ efx_tc_mac_ht_params);
+ if (old) {
+ /* don't need our new entry */
+ kfree(ped);
+ if (!refcount_inc_not_zero(&old->ref))
+ return ERR_PTR(-EAGAIN);
+ /* existing entry found, ref taken */
+ return old;
+ }
+
+ rc = efx_mae_allocate_pedit_mac(efx, ped);
+ if (rc < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to store pedit MAC address in hw");
+ goto out_remove;
+ }
+
+ /* ref and return */
+ refcount_set(&ped->ref, 1);
+ return ped;
+out_remove:
+ rhashtable_remove_fast(&efx->tc->mac_ht, &ped->linkage,
+ efx_tc_mac_ht_params);
+ kfree(ped);
+ return ERR_PTR(rc);
+}
+
+static void efx_tc_flower_put_mac(struct efx_nic *efx,
+ struct efx_tc_mac_pedit_action *ped)
+{
+ if (!refcount_dec_and_test(&ped->ref))
+ return; /* still in use */
+ rhashtable_remove_fast(&efx->tc->mac_ht, &ped->linkage,
+ efx_tc_mac_ht_params);
+ efx_mae_free_pedit_mac(efx, ped);
+ kfree(ped);
+}
+
static void efx_tc_free_action_set(struct efx_nic *efx,
struct efx_tc_action_set *act, bool in_hw)
{
@@ -121,6 +194,10 @@ static void efx_tc_free_action_set(struct efx_nic *efx,
list_del(&act->encap_user);
efx_tc_flower_release_encap_md(efx, act->encap_md);
}
+ if (act->src_mac)
+ efx_tc_flower_put_mac(efx, act->src_mac);
+ if (act->dst_mac)
+ efx_tc_flower_put_mac(efx, act->dst_mac);
kfree(act);
}
@@ -201,23 +278,24 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx,
}
}
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_VLAN) |
- BIT(FLOW_DISSECTOR_KEY_CVLAN) |
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
- BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_TCP) |
- BIT(FLOW_DISSECTOR_KEY_IP))) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported flower keys %#x",
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CT) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_TCP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IP))) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported flower keys %#llx",
dissector->used_keys);
return -EOPNOTSUPP;
}
@@ -228,12 +306,13 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx,
!(match->value.eth_proto == htons(ETH_P_IP) ||
match->value.eth_proto == htons(ETH_P_IPV6)))
if (dissector->used_keys &
- (BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_IP) |
- BIT(FLOW_DISSECTOR_KEY_TCP))) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "L3/L4 flower keys %#x require protocol ipv[46]",
+ (BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_TCP))) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "L3/L4 flower keys %#llx require protocol ipv[46]",
dissector->used_keys);
return -EINVAL;
}
@@ -281,9 +360,10 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx,
if ((match->value.ip_proto != IPPROTO_UDP &&
match->value.ip_proto != IPPROTO_TCP) || !IS_ALL_ONES(match->mask.ip_proto))
if (dissector->used_keys &
- (BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_TCP))) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "L4 flower keys %#x require ipproto udp or tcp",
+ (BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_TCP))) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "L4 flower keys %#llx require ipproto udp or tcp",
dissector->used_keys);
return -EINVAL;
}
@@ -344,15 +424,41 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx,
MAP_ENC_KEY_AND_MASK(PORTS, ports, enc_ports, dst, enc_dport);
MAP_ENC_KEY_AND_MASK(KEYID, enc_keyid, enc_keyid, keyid, enc_keyid);
} else if (dissector->used_keys &
- (BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
- BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Flower enc keys require enc_control (keys: %#x)",
+ (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Flower enc keys require enc_control (keys: %#llx)",
dissector->used_keys);
return -EOPNOTSUPP;
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT)) {
+ struct flow_match_ct fm;
+
+ flow_rule_match_ct(rule, &fm);
+ match->value.ct_state_trk = !!(fm.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED);
+ match->mask.ct_state_trk = !!(fm.mask->ct_state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED);
+ match->value.ct_state_est = !!(fm.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED);
+ match->mask.ct_state_est = !!(fm.mask->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED);
+ if (fm.mask->ct_state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
+ TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported ct_state match %#x",
+ fm.mask->ct_state);
+ return -EOPNOTSUPP;
+ }
+ match->value.ct_mark = fm.key->ct_mark;
+ match->mask.ct_mark = fm.mask->ct_mark;
+ match->value.ct_zone = fm.key->ct_zone;
+ match->mask.ct_zone = fm.mask->ct_zone;
+
+ if (memchr_inv(fm.mask->ct_labels, 0, sizeof(fm.mask->ct_labels))) {
+ NL_SET_ERR_MSG_MOD(extack, "Matching on ct_label not supported");
+ return -EOPNOTSUPP;
+ }
+ }
return 0;
}
@@ -572,12 +678,65 @@ fail_pseudo:
return rc;
}
+static struct efx_tc_recirc_id *efx_tc_get_recirc_id(struct efx_nic *efx,
+ u32 chain_index,
+ struct net_device *net_dev)
+{
+ struct efx_tc_recirc_id *rid, *old;
+ int rc;
+
+ rid = kzalloc(sizeof(*rid), GFP_USER);
+ if (!rid)
+ return ERR_PTR(-ENOMEM);
+ rid->chain_index = chain_index;
+ /* We don't take a reference here, because it's implied - if there's
+ * a rule on the net_dev that's been offloaded to us, then the net_dev
+ * can't go away until the rule has been deoffloaded.
+ */
+ rid->net_dev = net_dev;
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->recirc_ht,
+ &rid->linkage,
+ efx_tc_recirc_ht_params);
+ if (old) {
+ /* don't need our new entry */
+ kfree(rid);
+ if (!refcount_inc_not_zero(&old->ref))
+ return ERR_PTR(-EAGAIN);
+ /* existing entry found */
+ rid = old;
+ } else {
+ rc = ida_alloc_range(&efx->tc->recirc_ida, 1, U8_MAX, GFP_USER);
+ if (rc < 0) {
+ rhashtable_remove_fast(&efx->tc->recirc_ht,
+ &rid->linkage,
+ efx_tc_recirc_ht_params);
+ kfree(rid);
+ return ERR_PTR(rc);
+ }
+ rid->fw_id = rc;
+ refcount_set(&rid->ref, 1);
+ }
+ return rid;
+}
+
+static void efx_tc_put_recirc_id(struct efx_nic *efx, struct efx_tc_recirc_id *rid)
+{
+ if (!refcount_dec_and_test(&rid->ref))
+ return; /* still in use */
+ rhashtable_remove_fast(&efx->tc->recirc_ht, &rid->linkage,
+ efx_tc_recirc_ht_params);
+ ida_free(&efx->tc->recirc_ida, rid->fw_id);
+ kfree(rid);
+}
+
static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule)
{
efx_mae_delete_rule(efx, rule->fw_id);
/* Release entries in subsidiary tables */
efx_tc_free_action_set_list(efx, &rule->acts, true);
+ if (rule->match.rid)
+ efx_tc_put_recirc_id(efx, rule->match.rid);
if (rule->match.encap)
efx_tc_flower_release_encap_match(efx, rule->match.encap);
rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
@@ -601,6 +760,8 @@ static const char *efx_tc_encap_type_name(enum efx_encap_type typ)
/* For details of action order constraints refer to SF-123102-TC-1§12.6.1 */
enum efx_tc_action_order {
EFX_TC_AO_DECAP,
+ EFX_TC_AO_DEC_TTL,
+ EFX_TC_AO_PEDIT_MAC_ADDRS,
EFX_TC_AO_VLAN_POP,
EFX_TC_AO_VLAN_PUSH,
EFX_TC_AO_COUNT,
@@ -615,6 +776,15 @@ static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act,
case EFX_TC_AO_DECAP:
if (act->decap)
return false;
+ /* PEDIT_MAC_ADDRS must not happen before DECAP, though it
+ * can wait until much later
+ */
+ if (act->dst_mac || act->src_mac)
+ return false;
+
+ /* Decrementing ttl must not happen before DECAP */
+ if (act->do_ttl_dec)
+ return false;
fallthrough;
case EFX_TC_AO_VLAN_POP:
if (act->vlan_pop >= 2)
@@ -634,12 +804,17 @@ static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act,
if (act->count)
return false;
fallthrough;
+ case EFX_TC_AO_PEDIT_MAC_ADDRS:
case EFX_TC_AO_ENCAP:
if (act->encap_md)
return false;
fallthrough;
case EFX_TC_AO_DELIVER:
return !act->deliver;
+ case EFX_TC_AO_DEC_TTL:
+ if (act->encap_md)
+ return false;
+ return !act->do_ttl_dec;
default:
/* Bad caller. Whatever they wanted to do, say they can't. */
WARN_ON_ONCE(1);
@@ -647,6 +822,532 @@ static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act,
}
}
+/**
+ * DOC: TC conntrack sequences
+ *
+ * The MAE hardware can handle at most two rounds of action rule matching,
+ * consequently we support conntrack through the notion of a "left-hand side
+ * rule". This is a rule which typically contains only the actions "ct" and
+ * "goto chain N", and corresponds to one or more "right-hand side rules" in
+ * chain N, which typically match on +trk+est, and may perform ct(nat) actions.
+ * RHS rules go in the Action Rule table as normal but with a nonzero recirc_id
+ * (the hardware equivalent of chain_index), while LHS rules may go in either
+ * the Action Rule or the Outer Rule table, the latter being preferred for
+ * performance reasons, and set both DO_CT and a recirc_id in their response.
+ *
+ * Besides the RHS rules, there are often also similar rules matching on
+ * +trk+new which perform the ct(commit) action. These are not offloaded.
+ */
+
+static bool efx_tc_rule_is_lhs_rule(struct flow_rule *fr,
+ struct efx_tc_match *match)
+{
+ const struct flow_action_entry *fa;
+ int i;
+
+ flow_action_for_each(i, fa, &fr->action) {
+ switch (fa->id) {
+ case FLOW_ACTION_GOTO:
+ return true;
+ case FLOW_ACTION_CT:
+ /* If rule is -trk, or doesn't mention trk at all, then
+ * a CT action implies a conntrack lookup (hence it's an
+ * LHS rule). If rule is +trk, then a CT action could
+ * just be ct(nat) or even ct(commit) (though the latter
+ * can't be offloaded).
+ */
+ if (!match->mask.ct_state_trk || !match->value.ct_state_trk)
+ return true;
+ break;
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
+static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
+ struct flow_cls_offload *tc,
+ struct flow_rule *fr,
+ struct net_device *net_dev,
+ struct efx_tc_lhs_rule *rule)
+
+{
+ struct netlink_ext_ack *extack = tc->common.extack;
+ struct efx_tc_lhs_action *act = &rule->lhs_act;
+ const struct flow_action_entry *fa;
+ bool pipe = true;
+ int i;
+
+ flow_action_for_each(i, fa, &fr->action) {
+ struct efx_tc_ct_zone *ct_zone;
+ struct efx_tc_recirc_id *rid;
+
+ if (!pipe) {
+ /* more actions after a non-pipe action */
+ NL_SET_ERR_MSG_MOD(extack, "Action follows non-pipe action");
+ return -EINVAL;
+ }
+ switch (fa->id) {
+ case FLOW_ACTION_GOTO:
+ if (!fa->chain_index) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't goto chain 0, no looping in hw");
+ return -EOPNOTSUPP;
+ }
+ rid = efx_tc_get_recirc_id(efx, fa->chain_index,
+ net_dev);
+ if (IS_ERR(rid)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to allocate a hardware recirculation ID for this chain_index");
+ return PTR_ERR(rid);
+ }
+ act->rid = rid;
+ if (fa->hw_stats) {
+ struct efx_tc_counter_index *cnt;
+
+ if (!(fa->hw_stats & FLOW_ACTION_HW_STATS_DELAYED)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "hw_stats_type %u not supported (only 'delayed')",
+ fa->hw_stats);
+ return -EOPNOTSUPP;
+ }
+ cnt = efx_tc_flower_get_counter_index(efx, tc->cookie,
+ EFX_TC_COUNTER_TYPE_OR);
+ if (IS_ERR(cnt)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to obtain a counter");
+ return PTR_ERR(cnt);
+ }
+ WARN_ON(act->count); /* can't happen */
+ act->count = cnt;
+ }
+ pipe = false;
+ break;
+ case FLOW_ACTION_CT:
+ if (act->zone) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't offload multiple ct actions");
+ return -EOPNOTSUPP;
+ }
+ if (fa->ct.action & (TCA_CT_ACT_COMMIT |
+ TCA_CT_ACT_FORCE)) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't offload ct commit/force");
+ return -EOPNOTSUPP;
+ }
+ if (fa->ct.action & TCA_CT_ACT_CLEAR) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't clear ct in LHS rule");
+ return -EOPNOTSUPP;
+ }
+ if (fa->ct.action & (TCA_CT_ACT_NAT |
+ TCA_CT_ACT_NAT_SRC |
+ TCA_CT_ACT_NAT_DST)) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't perform NAT in LHS rule - packet isn't conntracked yet");
+ return -EOPNOTSUPP;
+ }
+ if (fa->ct.action) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule\n",
+ fa->ct.action);
+ return -EOPNOTSUPP;
+ }
+ ct_zone = efx_tc_ct_register_zone(efx, fa->ct.zone,
+ fa->ct.flow_table);
+ if (IS_ERR(ct_zone)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to register for CT updates");
+ return PTR_ERR(ct_zone);
+ }
+ act->zone = ct_zone;
+ break;
+ default:
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule\n",
+ fa->id);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (pipe) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing goto chain in LHS rule");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static void efx_tc_flower_release_lhs_actions(struct efx_nic *efx,
+ struct efx_tc_lhs_action *act)
+{
+ if (act->rid)
+ efx_tc_put_recirc_id(efx, act->rid);
+ if (act->zone)
+ efx_tc_ct_unregister_zone(efx, act->zone);
+ if (act->count)
+ efx_tc_flower_put_counter_index(efx, act->count);
+}
+
+/**
+ * struct efx_tc_mangler_state - accumulates 32-bit pedits into fields
+ *
+ * @dst_mac_32: dst_mac[0:3] has been populated
+ * @dst_mac_16: dst_mac[4:5] has been populated
+ * @src_mac_16: src_mac[0:1] has been populated
+ * @src_mac_32: src_mac[2:5] has been populated
+ * @dst_mac: h_dest field of ethhdr
+ * @src_mac: h_source field of ethhdr
+ *
+ * Since FLOW_ACTION_MANGLE comes in 32-bit chunks that do not
+ * necessarily equate to whole fields of the packet header, this
+ * structure is used to hold the cumulative effect of the partial
+ * field pedits that have been processed so far.
+ */
+struct efx_tc_mangler_state {
+ u8 dst_mac_32:1; /* eth->h_dest[0:3] */
+ u8 dst_mac_16:1; /* eth->h_dest[4:5] */
+ u8 src_mac_16:1; /* eth->h_source[0:1] */
+ u8 src_mac_32:1; /* eth->h_source[2:5] */
+ unsigned char dst_mac[ETH_ALEN];
+ unsigned char src_mac[ETH_ALEN];
+};
+
+/** efx_tc_complete_mac_mangle() - pull complete field pedits out of @mung
+ * @efx: NIC we're installing a flow rule on
+ * @act: action set (cursor) to update
+ * @mung: accumulated partial mangles
+ * @extack: netlink extended ack for reporting errors
+ *
+ * Check @mung to find any combinations of partial mangles that can be
+ * combined into a complete packet field edit, add that edit to @act,
+ * and consume the partial mangles from @mung.
+ */
+
+static int efx_tc_complete_mac_mangle(struct efx_nic *efx,
+ struct efx_tc_action_set *act,
+ struct efx_tc_mangler_state *mung,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_tc_mac_pedit_action *ped;
+
+ if (mung->dst_mac_32 && mung->dst_mac_16) {
+ ped = efx_tc_flower_get_mac(efx, mung->dst_mac, extack);
+ if (IS_ERR(ped))
+ return PTR_ERR(ped);
+
+ /* Check that we have not already populated dst_mac */
+ if (act->dst_mac)
+ efx_tc_flower_put_mac(efx, act->dst_mac);
+
+ act->dst_mac = ped;
+
+ /* consume the incomplete state */
+ mung->dst_mac_32 = 0;
+ mung->dst_mac_16 = 0;
+ }
+ if (mung->src_mac_16 && mung->src_mac_32) {
+ ped = efx_tc_flower_get_mac(efx, mung->src_mac, extack);
+ if (IS_ERR(ped))
+ return PTR_ERR(ped);
+
+ /* Check that we have not already populated src_mac */
+ if (act->src_mac)
+ efx_tc_flower_put_mac(efx, act->src_mac);
+
+ act->src_mac = ped;
+
+ /* consume the incomplete state */
+ mung->src_mac_32 = 0;
+ mung->src_mac_16 = 0;
+ }
+ return 0;
+}
+
+static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
+ const struct flow_action_entry *fa,
+ struct netlink_ext_ack *extack)
+{
+ switch (fa->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ switch (fa->mangle.offset) {
+ case offsetof(struct iphdr, ttl):
+ /* check that pedit applies to ttl only */
+ if (fa->mangle.mask != ~EFX_TC_HDR_TYPE_TTL_MASK)
+ break;
+
+ /* Adding 0xff is equivalent to decrementing the ttl.
+ * Other added values are not supported.
+ */
+ if ((fa->mangle.val & EFX_TC_HDR_TYPE_TTL_MASK) != U8_MAX)
+ break;
+
+ /* check that we do not decrement ttl twice */
+ if (!efx_tc_flower_action_order_ok(act,
+ EFX_TC_AO_DEC_TTL)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl");
+ return -EOPNOTSUPP;
+ }
+ act->do_ttl_dec = 1;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ switch (fa->mangle.offset) {
+ case round_down(offsetof(struct ipv6hdr, hop_limit), 4):
+ /* check that pedit applies to hoplimit only */
+ if (fa->mangle.mask != EFX_TC_HDR_TYPE_HLIMIT_MASK)
+ break;
+
+ /* Adding 0xff is equivalent to decrementing the hoplimit.
+ * Other added values are not supported.
+ */
+ if ((fa->mangle.val >> 24) != U8_MAX)
+ break;
+
+ /* check that we do not decrement hoplimit twice */
+ if (!efx_tc_flower_action_order_ok(act,
+ EFX_TC_AO_DEC_TTL)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl");
+ return -EOPNOTSUPP;
+ }
+ act->do_ttl_dec = 1;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: ttl add action type %x %x %x/%x",
+ fa->mangle.htype, fa->mangle.offset,
+ fa->mangle.val, fa->mangle.mask);
+ return -EOPNOTSUPP;
+}
+
+/**
+ * efx_tc_mangle() - handle a single 32-bit (or less) pedit
+ * @efx: NIC we're installing a flow rule on
+ * @act: action set (cursor) to update
+ * @fa: FLOW_ACTION_MANGLE action metadata
+ * @mung: accumulator for partial mangles
+ * @extack: netlink extended ack for reporting errors
+ * @match: original match used along with the mangle action
+ *
+ * Identify the fields written by a FLOW_ACTION_MANGLE, and record
+ * the partial mangle state in @mung. If this mangle completes an
+ * earlier partial mangle, consume and apply to @act by calling
+ * efx_tc_complete_mac_mangle().
+ */
+
+static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
+ const struct flow_action_entry *fa,
+ struct efx_tc_mangler_state *mung,
+ struct netlink_ext_ack *extack,
+ struct efx_tc_match *match)
+{
+ __le32 mac32;
+ __le16 mac16;
+ u8 tr_ttl;
+
+ switch (fa->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ BUILD_BUG_ON(offsetof(struct ethhdr, h_dest) != 0);
+ BUILD_BUG_ON(offsetof(struct ethhdr, h_source) != 6);
+ if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_PEDIT_MAC_ADDRS)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Pedit mangle mac action violates action order");
+ return -EOPNOTSUPP;
+ }
+ switch (fa->mangle.offset) {
+ case 0:
+ if (fa->mangle.mask) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: mask (%#x) of eth.dst32 mangle",
+ fa->mangle.mask);
+ return -EOPNOTSUPP;
+ }
+ /* Ethernet address is little-endian */
+ mac32 = cpu_to_le32(fa->mangle.val);
+ memcpy(mung->dst_mac, &mac32, sizeof(mac32));
+ mung->dst_mac_32 = 1;
+ return efx_tc_complete_mac_mangle(efx, act, mung, extack);
+ case 4:
+ if (fa->mangle.mask == 0xffff) {
+ mac16 = cpu_to_le16(fa->mangle.val >> 16);
+ memcpy(mung->src_mac, &mac16, sizeof(mac16));
+ mung->src_mac_16 = 1;
+ } else if (fa->mangle.mask == 0xffff0000) {
+ mac16 = cpu_to_le16((u16)fa->mangle.val);
+ memcpy(mung->dst_mac + 4, &mac16, sizeof(mac16));
+ mung->dst_mac_16 = 1;
+ } else {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: mask (%#x) of eth+4 mangle is not high or low 16b",
+ fa->mangle.mask);
+ return -EOPNOTSUPP;
+ }
+ return efx_tc_complete_mac_mangle(efx, act, mung, extack);
+ case 8:
+ if (fa->mangle.mask) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: mask (%#x) of eth.src32 mangle",
+ fa->mangle.mask);
+ return -EOPNOTSUPP;
+ }
+ mac32 = cpu_to_le32(fa->mangle.val);
+ memcpy(mung->src_mac + 2, &mac32, sizeof(mac32));
+ mung->src_mac_32 = 1;
+ return efx_tc_complete_mac_mangle(efx, act, mung, extack);
+ default:
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported: mangle eth+%u %x/%x",
+ fa->mangle.offset, fa->mangle.val, fa->mangle.mask);
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ switch (fa->mangle.offset) {
+ case offsetof(struct iphdr, ttl):
+ /* we currently only support pedit IP4 when it applies
+ * to TTL and then only when it can be achieved with a
+ * decrement ttl action
+ */
+
+ /* check that pedit applies to ttl only */
+ if (fa->mangle.mask != ~EFX_TC_HDR_TYPE_TTL_MASK) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: mask (%#x) out of range, only support mangle action on ipv4.ttl",
+ fa->mangle.mask);
+ return -EOPNOTSUPP;
+ }
+
+ /* we can only convert to a dec ttl when we have an
+ * exact match on the ttl field
+ */
+ if (match->mask.ip_ttl != U8_MAX) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: only support mangle ipv4.ttl when we have an exact match on ttl, mask used for match (%#x)",
+ match->mask.ip_ttl);
+ return -EOPNOTSUPP;
+ }
+
+ /* check that we don't try to decrement 0, which equates
+ * to setting the ttl to 0xff
+ */
+ if (match->value.ip_ttl == 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported: we cannot decrement ttl past 0");
+ return -EOPNOTSUPP;
+ }
+
+ /* check that we do not decrement ttl twice */
+ if (!efx_tc_flower_action_order_ok(act,
+ EFX_TC_AO_DEC_TTL)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported: multiple dec ttl");
+ return -EOPNOTSUPP;
+ }
+
+ /* check pedit can be achieved with decrement action */
+ tr_ttl = match->value.ip_ttl - 1;
+ if ((fa->mangle.val & EFX_TC_HDR_TYPE_TTL_MASK) == tr_ttl) {
+ act->do_ttl_dec = 1;
+ return 0;
+ }
+
+ fallthrough;
+ default:
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: only support mangle on the ttl field (offset is %u)",
+ fa->mangle.offset);
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ switch (fa->mangle.offset) {
+ case round_down(offsetof(struct ipv6hdr, hop_limit), 4):
+ /* we currently only support pedit IP6 when it applies
+ * to the hoplimit and then only when it can be achieved
+ * with a decrement hoplimit action
+ */
+
+ /* check that pedit applies to ttl only */
+ if (fa->mangle.mask != EFX_TC_HDR_TYPE_HLIMIT_MASK) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: mask (%#x) out of range, only support mangle action on ipv6.hop_limit",
+ fa->mangle.mask);
+
+ return -EOPNOTSUPP;
+ }
+
+ /* we can only convert to a dec ttl when we have an
+ * exact match on the ttl field
+ */
+ if (match->mask.ip_ttl != U8_MAX) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: only support mangle ipv6.hop_limit when we have an exact match on ttl, mask used for match (%#x)",
+ match->mask.ip_ttl);
+ return -EOPNOTSUPP;
+ }
+
+ /* check that we don't try to decrement 0, which equates
+ * to setting the ttl to 0xff
+ */
+ if (match->value.ip_ttl == 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported: we cannot decrement hop_limit past 0");
+ return -EOPNOTSUPP;
+ }
+
+ /* check that we do not decrement hoplimit twice */
+ if (!efx_tc_flower_action_order_ok(act,
+ EFX_TC_AO_DEC_TTL)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported: multiple dec ttl");
+ return -EOPNOTSUPP;
+ }
+
+ /* check pedit can be achieved with decrement action */
+ tr_ttl = match->value.ip_ttl - 1;
+ if ((fa->mangle.val >> 24) == tr_ttl) {
+ act->do_ttl_dec = 1;
+ return 0;
+ }
+
+ fallthrough;
+ default:
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: only support mangle on the hop_limit field");
+ return -EOPNOTSUPP;
+ }
+ default:
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled mangle htype %u for action rule",
+ fa->mangle.htype);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/**
+ * efx_tc_incomplete_mangle() - check for leftover partial pedits
+ * @mung: accumulator for partial mangles
+ * @extack: netlink extended ack for reporting errors
+ *
+ * Since the MAE can only overwrite whole fields, any partial
+ * field mangle left over on reaching packet delivery (mirred or
+ * end of TC actions) cannot be offloaded. Check for any such
+ * and reject them with -%EOPNOTSUPP.
+ */
+
+static int efx_tc_incomplete_mangle(struct efx_tc_mangler_state *mung,
+ struct netlink_ext_ack *extack)
+{
+ if (mung->dst_mac_32 || mung->dst_mac_16) {
+ NL_SET_ERR_MSG_MOD(extack, "Incomplete pedit of destination MAC address");
+ return -EOPNOTSUPP;
+ }
+ if (mung->src_mac_16 || mung->src_mac_32) {
+ NL_SET_ERR_MSG_MOD(extack, "Incomplete pedit of source MAC address");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
struct net_device *net_dev,
struct flow_cls_offload *tc)
@@ -681,11 +1382,40 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
match.mask.ingress_port = ~0;
if (tc->common.chain_index) {
- NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index");
- return -EOPNOTSUPP;
+ struct efx_tc_recirc_id *rid;
+
+ rid = efx_tc_get_recirc_id(efx, tc->common.chain_index, net_dev);
+ if (IS_ERR(rid)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Failed to allocate a hardware recirculation ID for chain_index %u",
+ tc->common.chain_index);
+ return PTR_ERR(rid);
+ }
+ match.rid = rid;
+ match.value.recirc_id = rid->fw_id;
}
match.mask.recirc_id = 0xff;
+ /* AR table can't match on DO_CT (+trk). But a commonly used pattern is
+ * +trk+est, which is strictly implied by +est, so rewrite it to that.
+ */
+ if (match.mask.ct_state_trk && match.value.ct_state_trk &&
+ match.mask.ct_state_est && match.value.ct_state_est)
+ match.mask.ct_state_trk = 0;
+ /* Thanks to CT_TCP_FLAGS_INHIBIT, packets with interesting flags could
+ * match +trk-est (CT_HIT=0) despite being on an established connection.
+ * So make -est imply -tcp_syn_fin_rst match to ensure these packets
+ * still hit the software path.
+ */
+ if (match.mask.ct_state_est && !match.value.ct_state_est) {
+ if (match.value.tcp_syn_fin_rst) {
+ /* Can't offload this combination */
+ rc = -EOPNOTSUPP;
+ goto release;
+ }
+ match.mask.tcp_syn_fin_rst = true;
+ }
+
flow_action_for_each(i, fa, &fr->action) {
switch (fa->id) {
case FLOW_ACTION_REDIRECT:
@@ -702,12 +1432,13 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
if (!found) { /* We don't care. */
netif_dbg(efx, drv, efx->net_dev,
"Ignoring foreign filter that doesn't egdev us\n");
- return -EOPNOTSUPP;
+ rc = -EOPNOTSUPP;
+ goto release;
}
rc = efx_mae_match_check_caps(efx, &match.mask, NULL);
if (rc)
- return rc;
+ goto release;
if (efx_tc_match_is_encap(&match.mask)) {
enum efx_encap_type type;
@@ -716,7 +1447,8 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
if (type == EFX_ENCAP_TYPE_NONE) {
NL_SET_ERR_MSG_MOD(extack,
"Egress encap match on unsupported tunnel device");
- return -EOPNOTSUPP;
+ rc = -EOPNOTSUPP;
+ goto release;
}
rc = efx_mae_check_encap_type_supported(efx, type);
@@ -724,25 +1456,26 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
NL_SET_ERR_MSG_FMT_MOD(extack,
"Firmware reports no support for %s encap match",
efx_tc_encap_type_name(type));
- return rc;
+ goto release;
}
rc = efx_tc_flower_record_encap_match(efx, &match, type,
EFX_TC_EM_DIRECT, 0, 0,
extack);
if (rc)
- return rc;
+ goto release;
} else {
/* This is not a tunnel decap rule, ignore it */
netif_dbg(efx, drv, efx->net_dev,
"Ignoring foreign filter without encap match\n");
- return -EOPNOTSUPP;
+ rc = -EOPNOTSUPP;
+ goto release;
}
rule = kzalloc(sizeof(*rule), GFP_USER);
if (!rule) {
rc = -ENOMEM;
- goto out_free;
+ goto release;
}
INIT_LIST_HEAD(&rule->acts.list);
rule->cookie = tc->cookie;
@@ -754,7 +1487,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
"Ignoring already-offloaded rule (cookie %lx)\n",
tc->cookie);
rc = -EEXIST;
- goto out_free;
+ goto release;
}
act = kzalloc(sizeof(*act), GFP_USER);
@@ -912,21 +1645,95 @@ release:
/* We failed to insert the rule, so free up any entries we created in
* subsidiary tables.
*/
+ if (match.rid)
+ efx_tc_put_recirc_id(efx, match.rid);
if (act)
efx_tc_free_action_set(efx, act, false);
if (rule) {
- rhashtable_remove_fast(&efx->tc->match_action_ht,
- &rule->linkage,
- efx_tc_match_action_ht_params);
+ if (!old)
+ rhashtable_remove_fast(&efx->tc->match_action_ht,
+ &rule->linkage,
+ efx_tc_match_action_ht_params);
efx_tc_free_action_set_list(efx, &rule->acts, false);
}
-out_free:
kfree(rule);
if (match.encap)
efx_tc_flower_release_encap_match(efx, match.encap);
return rc;
}
+static int efx_tc_flower_replace_lhs(struct efx_nic *efx,
+ struct flow_cls_offload *tc,
+ struct flow_rule *fr,
+ struct efx_tc_match *match,
+ struct efx_rep *efv,
+ struct net_device *net_dev)
+{
+ struct netlink_ext_ack *extack = tc->common.extack;
+ struct efx_tc_lhs_rule *rule, *old;
+ int rc;
+
+ if (tc->common.chain_index) {
+ NL_SET_ERR_MSG_MOD(extack, "LHS rule only allowed in chain 0");
+ return -EOPNOTSUPP;
+ }
+
+ if (match->mask.ct_state_trk && match->value.ct_state_trk) {
+ NL_SET_ERR_MSG_MOD(extack, "LHS rule can never match +trk");
+ return -EOPNOTSUPP;
+ }
+ /* LHS rules are always -trk, so we don't need to match on that */
+ match->mask.ct_state_trk = 0;
+ match->value.ct_state_trk = 0;
+
+ rc = efx_mae_match_check_caps_lhs(efx, &match->mask, extack);
+ if (rc)
+ return rc;
+
+ rule = kzalloc(sizeof(*rule), GFP_USER);
+ if (!rule)
+ return -ENOMEM;
+ rule->cookie = tc->cookie;
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht,
+ &rule->linkage,
+ efx_tc_lhs_rule_ht_params);
+ if (old) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Already offloaded rule (cookie %lx)\n", tc->cookie);
+ rc = -EEXIST;
+ NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
+ goto release;
+ }
+
+ /* Parse actions */
+ /* See note in efx_tc_flower_replace() regarding passed net_dev
+ * (used for efx_tc_get_recirc_id()).
+ */
+ rc = efx_tc_flower_handle_lhs_actions(efx, tc, fr, efx->net_dev, rule);
+ if (rc)
+ goto release;
+
+ rule->match = *match;
+
+ rc = efx_mae_insert_lhs_rule(efx, rule, EFX_TC_PRIO_TC);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw");
+ goto release;
+ }
+ netif_dbg(efx, drv, efx->net_dev,
+ "Successfully parsed lhs rule (cookie %lx)\n",
+ tc->cookie);
+ return 0;
+
+release:
+ efx_tc_flower_release_lhs_actions(efx, &rule->lhs_act);
+ if (!old)
+ rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &rule->linkage,
+ efx_tc_lhs_rule_ht_params);
+ kfree(rule);
+ return rc;
+}
+
static int efx_tc_flower_replace(struct efx_nic *efx,
struct net_device *net_dev,
struct flow_cls_offload *tc,
@@ -936,6 +1743,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
struct netlink_ext_ack *extack = tc->common.extack;
const struct ip_tunnel_info *encap_info = NULL;
struct efx_tc_flow_rule *rule = NULL, *old;
+ struct efx_tc_mangler_state mung = {};
struct efx_tc_action_set *act = NULL;
const struct flow_action_entry *fa;
struct efx_rep *from_efv, *to_efv;
@@ -982,19 +1790,69 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
return -EOPNOTSUPP;
}
+ if (efx_tc_rule_is_lhs_rule(fr, &match))
+ return efx_tc_flower_replace_lhs(efx, tc, fr, &match, efv,
+ net_dev);
+
+ /* chain_index 0 is always recirc_id 0 (and does not appear in recirc_ht).
+ * Conveniently, match.rid == NULL and match.value.recirc_id == 0 owing
+ * to the initial memset(), so we don't need to do anything in that case.
+ */
if (tc->common.chain_index) {
- NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index");
- return -EOPNOTSUPP;
+ struct efx_tc_recirc_id *rid;
+
+ /* Note regarding passed net_dev:
+ * VFreps and PF can share chain namespace, as they have
+ * distinct ingress_mports. So we don't need to burn an
+ * extra recirc_id if both use the same chain_index.
+ * (Strictly speaking, we could give each VFrep its own
+ * recirc_id namespace that doesn't take IDs away from the
+ * PF, but that would require a bunch of additional IDAs -
+ * one for each representor - and that's not likely to be
+ * the main cause of recirc_id exhaustion anyway.)
+ */
+ rid = efx_tc_get_recirc_id(efx, tc->common.chain_index,
+ efx->net_dev);
+ if (IS_ERR(rid)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Failed to allocate a hardware recirculation ID for chain_index %u",
+ tc->common.chain_index);
+ return PTR_ERR(rid);
+ }
+ match.rid = rid;
+ match.value.recirc_id = rid->fw_id;
}
match.mask.recirc_id = 0xff;
+ /* AR table can't match on DO_CT (+trk). But a commonly used pattern is
+ * +trk+est, which is strictly implied by +est, so rewrite it to that.
+ */
+ if (match.mask.ct_state_trk && match.value.ct_state_trk &&
+ match.mask.ct_state_est && match.value.ct_state_est)
+ match.mask.ct_state_trk = 0;
+ /* Thanks to CT_TCP_FLAGS_INHIBIT, packets with interesting flags could
+ * match +trk-est (CT_HIT=0) despite being on an established connection.
+ * So make -est imply -tcp_syn_fin_rst match to ensure these packets
+ * still hit the software path.
+ */
+ if (match.mask.ct_state_est && !match.value.ct_state_est) {
+ if (match.value.tcp_syn_fin_rst) {
+ /* Can't offload this combination */
+ rc = -EOPNOTSUPP;
+ goto release;
+ }
+ match.mask.tcp_syn_fin_rst = true;
+ }
+
rc = efx_mae_match_check_caps(efx, &match.mask, extack);
if (rc)
- return rc;
+ goto release;
rule = kzalloc(sizeof(*rule), GFP_USER);
- if (!rule)
- return -ENOMEM;
+ if (!rule) {
+ rc = -ENOMEM;
+ goto release;
+ }
INIT_LIST_HEAD(&rule->acts.list);
rule->cookie = tc->cookie;
old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
@@ -1004,8 +1862,8 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
netif_dbg(efx, drv, efx->net_dev,
"Already offloaded rule (cookie %lx)\n", tc->cookie);
NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
- kfree(rule);
- return -EEXIST;
+ rc = -EEXIST;
+ goto release;
}
/* Parse actions */
@@ -1222,6 +2080,16 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
act->vlan_proto[act->vlan_push] = fa->vlan.proto;
act->vlan_push++;
break;
+ case FLOW_ACTION_ADD:
+ rc = efx_tc_pedit_add(efx, act, fa, extack);
+ if (rc < 0)
+ goto release;
+ break;
+ case FLOW_ACTION_MANGLE:
+ rc = efx_tc_mangle(efx, act, fa, &mung, extack, &match);
+ if (rc < 0)
+ goto release;
+ break;
case FLOW_ACTION_TUNNEL_ENCAP:
if (encap_info) {
/* Can't specify encap multiple times.
@@ -1261,6 +2129,9 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
}
}
+ rc = efx_tc_incomplete_mangle(&mung, extack);
+ if (rc < 0)
+ goto release;
if (act) {
/* Not shot/redirected, so deliver to default dest */
if (from_efv == EFX_EFV_PF)
@@ -1323,12 +2194,15 @@ release:
/* We failed to insert the rule, so free up any entries we created in
* subsidiary tables.
*/
+ if (match.rid)
+ efx_tc_put_recirc_id(efx, match.rid);
if (act)
efx_tc_free_action_set(efx, act, false);
if (rule) {
- rhashtable_remove_fast(&efx->tc->match_action_ht,
- &rule->linkage,
- efx_tc_match_action_ht_params);
+ if (!old)
+ rhashtable_remove_fast(&efx->tc->match_action_ht,
+ &rule->linkage,
+ efx_tc_match_action_ht_params);
efx_tc_free_action_set_list(efx, &rule->acts, false);
}
kfree(rule);
@@ -1340,8 +2214,26 @@ static int efx_tc_flower_destroy(struct efx_nic *efx,
struct flow_cls_offload *tc)
{
struct netlink_ext_ack *extack = tc->common.extack;
+ struct efx_tc_lhs_rule *lhs_rule;
struct efx_tc_flow_rule *rule;
+ lhs_rule = rhashtable_lookup_fast(&efx->tc->lhs_rule_ht, &tc->cookie,
+ efx_tc_lhs_rule_ht_params);
+ if (lhs_rule) {
+ /* Remove it from HW */
+ efx_mae_remove_lhs_rule(efx, lhs_rule);
+ /* Delete it from SW */
+ efx_tc_flower_release_lhs_actions(efx, &lhs_rule->lhs_act);
+ rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &lhs_rule->linkage,
+ efx_tc_lhs_rule_ht_params);
+ if (lhs_rule->match.encap)
+ efx_tc_flower_release_encap_match(efx, lhs_rule->match.encap);
+ netif_dbg(efx, drv, efx->net_dev, "Removed (lhs) filter %lx\n",
+ lhs_rule->cookie);
+ kfree(lhs_rule);
+ return 0;
+ }
+
rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie,
efx_tc_match_action_ht_params);
if (!rule) {
@@ -1657,11 +2549,17 @@ int efx_init_tc(struct efx_nic *efx)
rc = efx_tc_configure_fallback_acts_reps(efx);
if (rc)
return rc;
- rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx);
+ rc = efx_mae_get_tables(efx);
if (rc)
return rc;
+ rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx);
+ if (rc)
+ goto out_free;
efx->tc->up = true;
return 0;
+out_free:
+ efx_mae_free_tables(efx);
+ return rc;
}
void efx_fini_tc(struct efx_nic *efx)
@@ -1677,6 +2575,7 @@ void efx_fini_tc(struct efx_nic *efx)
efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.pf);
efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.reps);
efx->tc->up = false;
+ efx_mae_free_tables(efx);
}
/* At teardown time, all TC filter rules (and thus all resources they created)
@@ -1691,6 +2590,42 @@ static void efx_tc_encap_match_free(void *ptr, void *__unused)
kfree(encap);
}
+static void efx_tc_recirc_free(void *ptr, void *arg)
+{
+ struct efx_tc_recirc_id *rid = ptr;
+ struct efx_nic *efx = arg;
+
+ WARN_ON(refcount_read(&rid->ref));
+ ida_free(&efx->tc->recirc_ida, rid->fw_id);
+ kfree(rid);
+}
+
+static void efx_tc_lhs_free(void *ptr, void *arg)
+{
+ struct efx_tc_lhs_rule *rule = ptr;
+ struct efx_nic *efx = arg;
+
+ netif_err(efx, drv, efx->net_dev,
+ "tc lhs_rule %lx still present at teardown, removing\n",
+ rule->cookie);
+
+ if (rule->lhs_act.zone)
+ efx_tc_ct_unregister_zone(efx, rule->lhs_act.zone);
+ if (rule->lhs_act.count)
+ efx_tc_flower_put_counter_index(efx, rule->lhs_act.count);
+ efx_mae_remove_lhs_rule(efx, rule);
+
+ kfree(rule);
+}
+
+static void efx_tc_mac_free(void *ptr, void *__unused)
+{
+ struct efx_tc_mac_pedit_action *ped = ptr;
+
+ WARN_ON(refcount_read(&ped->ref));
+ kfree(ped);
+}
+
static void efx_tc_flow_free(void *ptr, void *arg)
{
struct efx_tc_flow_rule *rule = ptr;
@@ -1731,12 +2666,25 @@ int efx_init_struct_tc(struct efx_nic *efx)
rc = efx_tc_init_counters(efx);
if (rc < 0)
goto fail_counters;
+ rc = rhashtable_init(&efx->tc->mac_ht, &efx_tc_mac_ht_params);
+ if (rc < 0)
+ goto fail_mac_ht;
rc = rhashtable_init(&efx->tc->encap_match_ht, &efx_tc_encap_match_ht_params);
if (rc < 0)
goto fail_encap_match_ht;
rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params);
if (rc < 0)
goto fail_match_action_ht;
+ rc = rhashtable_init(&efx->tc->lhs_rule_ht, &efx_tc_lhs_rule_ht_params);
+ if (rc < 0)
+ goto fail_lhs_rule_ht;
+ rc = efx_tc_init_conntrack(efx);
+ if (rc < 0)
+ goto fail_conntrack;
+ rc = rhashtable_init(&efx->tc->recirc_ht, &efx_tc_recirc_ht_params);
+ if (rc < 0)
+ goto fail_recirc_ht;
+ ida_init(&efx->tc->recirc_ida);
efx->tc->reps_filter_uc = -1;
efx->tc->reps_filter_mc = -1;
INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
@@ -1749,9 +2697,17 @@ int efx_init_struct_tc(struct efx_nic *efx)
efx->tc->facts.reps.fw_id = MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL;
efx->extra_channel_type[EFX_EXTRA_CHANNEL_TC] = &efx_tc_channel_type;
return 0;
+fail_recirc_ht:
+ efx_tc_destroy_conntrack(efx);
+fail_conntrack:
+ rhashtable_destroy(&efx->tc->lhs_rule_ht);
+fail_lhs_rule_ht:
+ rhashtable_destroy(&efx->tc->match_action_ht);
fail_match_action_ht:
rhashtable_destroy(&efx->tc->encap_match_ht);
fail_encap_match_ht:
+ rhashtable_destroy(&efx->tc->mac_ht);
+fail_mac_ht:
efx_tc_destroy_counters(efx);
fail_counters:
efx_tc_destroy_encap_actions(efx);
@@ -1778,10 +2734,16 @@ void efx_fini_struct_tc(struct efx_nic *efx)
MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
EFX_WARN_ON_PARANOID(efx->tc->facts.reps.fw_id !=
MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
+ rhashtable_free_and_destroy(&efx->tc->lhs_rule_ht, efx_tc_lhs_free, efx);
rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free,
efx);
rhashtable_free_and_destroy(&efx->tc->encap_match_ht,
efx_tc_encap_match_free, NULL);
+ efx_tc_fini_conntrack(efx);
+ rhashtable_free_and_destroy(&efx->tc->recirc_ht, efx_tc_recirc_free, efx);
+ WARN_ON(!ida_is_empty(&efx->tc->recirc_ida));
+ ida_destroy(&efx->tc->recirc_ida);
+ rhashtable_free_and_destroy(&efx->tc->mac_ht, efx_tc_mac_free, NULL);
efx_tc_fini_counters(efx);
efx_tc_fini_encap_actions(efx);
mutex_unlock(&efx->tc->mutex);
diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h
index 1549c3df43bb..4dd2c378fd9f 100644
--- a/drivers/net/ethernet/sfc/tc.h
+++ b/drivers/net/ethernet/sfc/tc.h
@@ -18,36 +18,76 @@
#define IS_ALL_ONES(v) (!(typeof (v))~(v))
-#ifdef CONFIG_IPV6
+/**
+ * struct efx_tc_mac_pedit_action - mac pedit action fields
+ *
+ * @h_addr: mac address field of ethernet header
+ * @linkage: rhashtable reference
+ * @ref: reference count
+ * @fw_id: index of this entry in firmware MAC address table
+ *
+ * MAC address edits are indirected through a table in the hardware
+ */
+struct efx_tc_mac_pedit_action {
+ u8 h_addr[ETH_ALEN];
+ struct rhash_head linkage;
+ refcount_t ref;
+ u32 fw_id; /* index of this entry in firmware MAC address table */
+};
+
static inline bool efx_ipv6_addr_all_ones(struct in6_addr *addr)
{
return !memchr_inv(addr, 0xff, sizeof(*addr));
}
-#endif
struct efx_tc_encap_action; /* see tc_encap_actions.h */
+/**
+ * struct efx_tc_action_set - collection of tc action fields
+ *
+ * @vlan_push: the number of vlan headers to push
+ * @vlan_pop: the number of vlan headers to pop
+ * @decap: used to indicate a tunnel header decapsulation should take place
+ * @do_ttl_dec: used to indicate IP TTL / Hop Limit should be decremented
+ * @deliver: used to indicate a deliver action should take place
+ * @vlan_tci: tci fields for vlan push actions
+ * @vlan_proto: ethernet types for vlan push actions
+ * @count: counter mapping
+ * @encap_md: encap entry in tc_encap_ht table
+ * @encap_user: linked list of encap users (encap_md->users)
+ * @user: owning action-set-list. Only populated if @encap_md is; used by efx_tc_update_encap() fallback handling
+ * @count_user: linked list of counter users (counter->users)
+ * @dest_mport: destination mport
+ * @src_mac: source mac entry in tc_mac_ht table
+ * @dst_mac: destination mac entry in tc_mac_ht table
+ * @fw_id: index of this entry in firmware actions table
+ * @list: linked list of tc actions
+ *
+ */
struct efx_tc_action_set {
u16 vlan_push:2;
u16 vlan_pop:2;
u16 decap:1;
+ u16 do_ttl_dec:1;
u16 deliver:1;
- __be16 vlan_tci[2]; /* TCIs for vlan_push */
- __be16 vlan_proto[2]; /* Ethertypes for vlan_push */
+ __be16 vlan_tci[2];
+ __be16 vlan_proto[2];
struct efx_tc_counter_index *count;
- struct efx_tc_encap_action *encap_md; /* entry in tc_encap_ht table */
- struct list_head encap_user; /* entry on encap_md->users list */
- struct efx_tc_action_set_list *user; /* Only populated if encap_md */
- struct list_head count_user; /* entry on counter->users list, if encap */
+ struct efx_tc_encap_action *encap_md;
+ struct list_head encap_user;
+ struct efx_tc_action_set_list *user;
+ struct list_head count_user;
u32 dest_mport;
- u32 fw_id; /* index of this entry in firmware actions table */
+ struct efx_tc_mac_pedit_action *src_mac;
+ struct efx_tc_mac_pedit_action *dst_mac;
+ u32 fw_id;
struct list_head list;
};
struct efx_tc_match_fields {
/* L1 */
u32 ingress_port;
- u8 recirc_id;
+ u8 recirc_id; /* mapped from (u32) TC chain_index to smaller space */
/* L2 (inner when encap) */
__be16 eth_proto;
__be16 vlan_tci[2], vlan_proto[2];
@@ -62,6 +102,7 @@ struct efx_tc_match_fields {
/* L4 */
__be16 l4_sport, l4_dport; /* Ports (UDP, TCP) */
__be16 tcp_flags;
+ bool tcp_syn_fin_rst; /* true if ANY of SYN/FIN/RST are set */
/* Encap. The following are *outer* fields. Note that there are no
* outer eth (L2) fields; this is because TC doesn't have them.
*/
@@ -70,6 +111,10 @@ struct efx_tc_match_fields {
u8 enc_ip_tos, enc_ip_ttl;
__be16 enc_sport, enc_dport;
__be32 enc_keyid; /* e.g. VNI, VSID */
+ /* Conntrack. */
+ u16 ct_state_trk:1, ct_state_est:1;
+ u32 ct_mark;
+ u16 ct_zone;
};
static inline bool efx_tc_match_is_encap(const struct efx_tc_match_fields *mask)
@@ -117,10 +162,19 @@ struct efx_tc_encap_match {
struct efx_tc_encap_match *pseudo; /* Referenced pseudo EM if needed */
};
+struct efx_tc_recirc_id {
+ u32 chain_index;
+ struct net_device *net_dev;
+ struct rhash_head linkage;
+ refcount_t ref;
+ u8 fw_id; /* index allocated for use in the MAE */
+};
+
struct efx_tc_match {
struct efx_tc_match_fields value;
struct efx_tc_match_fields mask;
struct efx_tc_encap_match *encap;
+ struct efx_tc_recirc_id *rid;
};
struct efx_tc_action_set_list {
@@ -128,6 +182,12 @@ struct efx_tc_action_set_list {
u32 fw_id;
};
+struct efx_tc_lhs_action {
+ struct efx_tc_recirc_id *rid;
+ struct efx_tc_ct_zone *zone;
+ struct efx_tc_counter_index *count;
+};
+
struct efx_tc_flow_rule {
unsigned long cookie;
struct rhash_head linkage;
@@ -137,12 +197,62 @@ struct efx_tc_flow_rule {
u32 fw_id;
};
+struct efx_tc_lhs_rule {
+ unsigned long cookie;
+ struct efx_tc_match match;
+ struct efx_tc_lhs_action lhs_act;
+ struct rhash_head linkage;
+ u32 fw_id;
+};
+
enum efx_tc_rule_prios {
EFX_TC_PRIO_TC, /* Rule inserted by TC */
EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */
EFX_TC_PRIO__NUM
};
+struct efx_tc_table_field_fmt {
+ u16 field_id;
+ u16 lbn;
+ u16 width;
+ u8 masking;
+ u8 scheme;
+};
+
+struct efx_tc_table_desc {
+ u16 type;
+ u16 key_width;
+ u16 resp_width;
+ u16 n_keys;
+ u16 n_resps;
+ u16 n_prios;
+ u8 flags;
+ u8 scheme;
+ struct efx_tc_table_field_fmt *keys;
+ struct efx_tc_table_field_fmt *resps;
+};
+
+struct efx_tc_table_ct { /* TABLE_ID_CONNTRACK_TABLE */
+ struct efx_tc_table_desc desc;
+ bool hooked;
+ struct { /* indices of named fields within @desc.keys */
+ u8 eth_proto_idx;
+ u8 ip_proto_idx;
+ u8 src_ip_idx; /* either v4 or v6 */
+ u8 dst_ip_idx;
+ u8 l4_sport_idx;
+ u8 l4_dport_idx;
+ u8 zone_idx; /* for TABLE_FIELD_ID_DOMAIN */
+ } keys;
+ struct { /* indices of named fields within @desc.resps */
+ u8 dnat_idx;
+ u8 nat_ip_idx;
+ u8 l4_natport_idx;
+ u8 mark_idx;
+ u8 counter_id_idx;
+ } resps;
+};
+
/**
* struct efx_tc_state - control plane data for TC offload
*
@@ -152,9 +262,16 @@ enum efx_tc_rule_prios {
* @counter_ht: Hashtable of TC counters (FW IDs and counter values)
* @counter_id_ht: Hashtable mapping TC counter cookies to counters
* @encap_ht: Hashtable of TC encap actions
+ * @mac_ht: Hashtable of MAC address entries (for pedits)
* @encap_match_ht: Hashtable of TC encap matches
* @match_action_ht: Hashtable of TC match-action rules
+ * @lhs_rule_ht: Hashtable of TC left-hand (act ct & goto chain) rules
+ * @ct_zone_ht: Hashtable of TC conntrack flowtable bindings
+ * @ct_ht: Hashtable of TC conntrack flow entries
* @neigh_ht: Hashtable of neighbour watches (&struct efx_neigh_binder)
+ * @recirc_ht: Hashtable of recirculation ID mappings (&struct efx_tc_recirc_id)
+ * @recirc_ida: Recirculation ID allocator
+ * @meta_ct: MAE table layout for conntrack table
* @reps_mport_id: MAE port allocated for representor RX
* @reps_filter_uc: VNIC filter for representor unicast RX (promisc)
* @reps_filter_mc: VNIC filter for representor multicast RX (allmulti)
@@ -183,9 +300,16 @@ struct efx_tc_state {
struct rhashtable counter_ht;
struct rhashtable counter_id_ht;
struct rhashtable encap_ht;
+ struct rhashtable mac_ht;
struct rhashtable encap_match_ht;
struct rhashtable match_action_ht;
+ struct rhashtable lhs_rule_ht;
+ struct rhashtable ct_zone_ht;
+ struct rhashtable ct_ht;
struct rhashtable neigh_ht;
+ struct rhashtable recirc_ht;
+ struct ida recirc_ida;
+ struct efx_tc_table_ct meta_ct;
u32 reps_mport_id, reps_mport_vport_id;
s32 reps_filter_uc, reps_filter_mc;
bool flush_counters;
diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c
new file mode 100644
index 000000000000..8e06bfbcbea1
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc_conntrack.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2023, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "tc_conntrack.h"
+#include "tc.h"
+#include "mae.h"
+
+static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+
+static const struct rhashtable_params efx_tc_ct_zone_ht_params = {
+ .key_len = offsetof(struct efx_tc_ct_zone, linkage),
+ .key_offset = 0,
+ .head_offset = offsetof(struct efx_tc_ct_zone, linkage),
+};
+
+static const struct rhashtable_params efx_tc_ct_ht_params = {
+ .key_len = offsetof(struct efx_tc_ct_entry, linkage),
+ .key_offset = 0,
+ .head_offset = offsetof(struct efx_tc_ct_entry, linkage),
+};
+
+static void efx_tc_ct_zone_free(void *ptr, void *arg)
+{
+ struct efx_tc_ct_zone *zone = ptr;
+ struct efx_nic *efx = zone->efx;
+
+ netif_err(efx, drv, efx->net_dev,
+ "tc ct_zone %u still present at teardown, removing\n",
+ zone->zone);
+
+ nf_flow_table_offload_del_cb(zone->nf_ft, efx_tc_flow_block, zone);
+ kfree(zone);
+}
+
+static void efx_tc_ct_free(void *ptr, void *arg)
+{
+ struct efx_tc_ct_entry *conn = ptr;
+ struct efx_nic *efx = arg;
+
+ netif_err(efx, drv, efx->net_dev,
+ "tc ct_entry %lx still present at teardown\n",
+ conn->cookie);
+
+ /* We can release the counter, but we can't remove the CT itself
+ * from hardware because the table meta is already gone.
+ */
+ efx_tc_flower_release_counter(efx, conn->cnt);
+ kfree(conn);
+}
+
+int efx_tc_init_conntrack(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = rhashtable_init(&efx->tc->ct_zone_ht, &efx_tc_ct_zone_ht_params);
+ if (rc < 0)
+ goto fail_ct_zone_ht;
+ rc = rhashtable_init(&efx->tc->ct_ht, &efx_tc_ct_ht_params);
+ if (rc < 0)
+ goto fail_ct_ht;
+ return 0;
+fail_ct_ht:
+ rhashtable_destroy(&efx->tc->ct_zone_ht);
+fail_ct_zone_ht:
+ return rc;
+}
+
+/* Only call this in init failure teardown.
+ * Normal exit should fini instead as there may be entries in the table.
+ */
+void efx_tc_destroy_conntrack(struct efx_nic *efx)
+{
+ rhashtable_destroy(&efx->tc->ct_ht);
+ rhashtable_destroy(&efx->tc->ct_zone_ht);
+}
+
+void efx_tc_fini_conntrack(struct efx_nic *efx)
+{
+ rhashtable_free_and_destroy(&efx->tc->ct_zone_ht, efx_tc_ct_zone_free, NULL);
+ rhashtable_free_and_destroy(&efx->tc->ct_ht, efx_tc_ct_free, efx);
+}
+
+#define EFX_NF_TCP_FLAG(flg) cpu_to_be16(be32_to_cpu(TCP_FLAG_##flg) >> 16)
+
+static int efx_tc_ct_parse_match(struct efx_nic *efx, struct flow_rule *fr,
+ struct efx_tc_ct_entry *conn)
+{
+ struct flow_dissector *dissector = fr->match.dissector;
+ unsigned char ipv = 0;
+ bool tcp = false;
+
+ if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control fm;
+
+ flow_rule_match_control(fr, &fm);
+ if (IS_ALL_ONES(fm.mask->addr_type))
+ switch (fm.key->addr_type) {
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ ipv = 4;
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ ipv = 6;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!ipv) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack missing ipv specification\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (dissector->used_keys &
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_TCP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_META))) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Unsupported conntrack keys %#llx\n",
+ dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic fm;
+
+ flow_rule_match_basic(fr, &fm);
+ if (!IS_ALL_ONES(fm.mask->n_proto)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack eth_proto is not exact-match; mask %04x\n",
+ ntohs(fm.mask->n_proto));
+ return -EOPNOTSUPP;
+ }
+ conn->eth_proto = fm.key->n_proto;
+ if (conn->eth_proto != (ipv == 4 ? htons(ETH_P_IP)
+ : htons(ETH_P_IPV6))) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack eth_proto is not IPv%u, is %04x\n",
+ ipv, ntohs(conn->eth_proto));
+ return -EOPNOTSUPP;
+ }
+ if (!IS_ALL_ONES(fm.mask->ip_proto)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack ip_proto is not exact-match; mask %02x\n",
+ fm.mask->ip_proto);
+ return -EOPNOTSUPP;
+ }
+ conn->ip_proto = fm.key->ip_proto;
+ switch (conn->ip_proto) {
+ case IPPROTO_TCP:
+ tcp = true;
+ break;
+ case IPPROTO_UDP:
+ break;
+ default:
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack ip_proto not TCP or UDP, is %02x\n",
+ conn->ip_proto);
+ return -EOPNOTSUPP;
+ }
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack missing eth_proto, ip_proto\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ipv == 4 && flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs fm;
+
+ flow_rule_match_ipv4_addrs(fr, &fm);
+ if (!IS_ALL_ONES(fm.mask->src)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack ipv4.src is not exact-match; mask %08x\n",
+ ntohl(fm.mask->src));
+ return -EOPNOTSUPP;
+ }
+ conn->src_ip = fm.key->src;
+ if (!IS_ALL_ONES(fm.mask->dst)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack ipv4.dst is not exact-match; mask %08x\n",
+ ntohl(fm.mask->dst));
+ return -EOPNOTSUPP;
+ }
+ conn->dst_ip = fm.key->dst;
+ } else if (ipv == 6 && flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs fm;
+
+ flow_rule_match_ipv6_addrs(fr, &fm);
+ if (!efx_ipv6_addr_all_ones(&fm.mask->src)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack ipv6.src is not exact-match; mask %pI6\n",
+ &fm.mask->src);
+ return -EOPNOTSUPP;
+ }
+ conn->src_ip6 = fm.key->src;
+ if (!efx_ipv6_addr_all_ones(&fm.mask->dst)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack ipv6.dst is not exact-match; mask %pI6\n",
+ &fm.mask->dst);
+ return -EOPNOTSUPP;
+ }
+ conn->dst_ip6 = fm.key->dst;
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack missing IPv%u addrs\n", ipv);
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports fm;
+
+ flow_rule_match_ports(fr, &fm);
+ if (!IS_ALL_ONES(fm.mask->src)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack ports.src is not exact-match; mask %04x\n",
+ ntohs(fm.mask->src));
+ return -EOPNOTSUPP;
+ }
+ conn->l4_sport = fm.key->src;
+ if (!IS_ALL_ONES(fm.mask->dst)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack ports.dst is not exact-match; mask %04x\n",
+ ntohs(fm.mask->dst));
+ return -EOPNOTSUPP;
+ }
+ conn->l4_dport = fm.key->dst;
+ } else {
+ netif_dbg(efx, drv, efx->net_dev, "Conntrack missing L4 ports\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(fr, FLOW_DISSECTOR_KEY_TCP)) {
+ __be16 tcp_interesting_flags;
+ struct flow_match_tcp fm;
+
+ if (!tcp) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Conntrack matching on TCP keys but ipproto is not tcp\n");
+ return -EOPNOTSUPP;
+ }
+ flow_rule_match_tcp(fr, &fm);
+ tcp_interesting_flags = EFX_NF_TCP_FLAG(SYN) |
+ EFX_NF_TCP_FLAG(RST) |
+ EFX_NF_TCP_FLAG(FIN);
+ /* If any of the tcp_interesting_flags is set, we always
+ * inhibit CT lookup in LHS (so SW can update CT table).
+ */
+ if (fm.key->flags & tcp_interesting_flags) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Unsupported conntrack tcp.flags %04x/%04x\n",
+ ntohs(fm.key->flags), ntohs(fm.mask->flags));
+ return -EOPNOTSUPP;
+ }
+ /* Other TCP flags cannot be filtered at CT */
+ if (fm.mask->flags & ~tcp_interesting_flags) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Unsupported conntrack tcp.flags %04x/%04x\n",
+ ntohs(fm.key->flags), ntohs(fm.mask->flags));
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone,
+ struct flow_cls_offload *tc)
+{
+ struct flow_rule *fr = flow_cls_offload_flow_rule(tc);
+ struct efx_tc_ct_entry *conn, *old;
+ struct efx_nic *efx = ct_zone->efx;
+ const struct flow_action_entry *fa;
+ struct efx_tc_counter *cnt;
+ int rc, i;
+
+ if (WARN_ON(!efx->tc))
+ return -ENETDOWN;
+ if (WARN_ON(!efx->tc->up))
+ return -ENETDOWN;
+
+ conn = kzalloc(sizeof(*conn), GFP_USER);
+ if (!conn)
+ return -ENOMEM;
+ conn->cookie = tc->cookie;
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->ct_ht,
+ &conn->linkage,
+ efx_tc_ct_ht_params);
+ if (old) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Already offloaded conntrack (cookie %lx)\n", tc->cookie);
+ rc = -EEXIST;
+ goto release;
+ }
+
+ /* Parse match */
+ conn->zone = ct_zone;
+ rc = efx_tc_ct_parse_match(efx, fr, conn);
+ if (rc)
+ goto release;
+
+ /* Parse actions */
+ flow_action_for_each(i, fa, &fr->action) {
+ switch (fa->id) {
+ case FLOW_ACTION_CT_METADATA:
+ conn->mark = fa->ct_metadata.mark;
+ if (memchr_inv(fa->ct_metadata.labels, 0, sizeof(fa->ct_metadata.labels))) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Setting CT label not supported\n");
+ rc = -EOPNOTSUPP;
+ goto release;
+ }
+ break;
+ default:
+ netif_dbg(efx, drv, efx->net_dev,
+ "Unhandled action %u for conntrack\n", fa->id);
+ rc = -EOPNOTSUPP;
+ goto release;
+ }
+ }
+
+ /* fill in defaults for unmangled values */
+ conn->nat_ip = conn->dnat ? conn->dst_ip : conn->src_ip;
+ conn->l4_natport = conn->dnat ? conn->l4_dport : conn->l4_sport;
+
+ cnt = efx_tc_flower_allocate_counter(efx, EFX_TC_COUNTER_TYPE_CT);
+ if (IS_ERR(cnt)) {
+ rc = PTR_ERR(cnt);
+ goto release;
+ }
+ conn->cnt = cnt;
+
+ rc = efx_mae_insert_ct(efx, conn);
+ if (rc) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Failed to insert conntrack, %d\n", rc);
+ goto release;
+ }
+ mutex_lock(&ct_zone->mutex);
+ list_add_tail(&conn->list, &ct_zone->cts);
+ mutex_unlock(&ct_zone->mutex);
+ return 0;
+release:
+ if (conn->cnt)
+ efx_tc_flower_release_counter(efx, conn->cnt);
+ if (!old)
+ rhashtable_remove_fast(&efx->tc->ct_ht, &conn->linkage,
+ efx_tc_ct_ht_params);
+ kfree(conn);
+ return rc;
+}
+
+/* Caller must follow with efx_tc_ct_remove_finish() after RCU grace period! */
+static void efx_tc_ct_remove(struct efx_nic *efx, struct efx_tc_ct_entry *conn)
+{
+ int rc;
+
+ /* Remove it from HW */
+ rc = efx_mae_remove_ct(efx, conn);
+ /* Delete it from SW */
+ rhashtable_remove_fast(&efx->tc->ct_ht, &conn->linkage,
+ efx_tc_ct_ht_params);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "Failed to remove conntrack %lx from hw, rc %d\n",
+ conn->cookie, rc);
+ } else {
+ netif_dbg(efx, drv, efx->net_dev, "Removed conntrack %lx\n",
+ conn->cookie);
+ }
+}
+
+static void efx_tc_ct_remove_finish(struct efx_nic *efx, struct efx_tc_ct_entry *conn)
+{
+ /* Remove related CT counter. This is delayed after the conn object we
+ * are working with has been successfully removed. This protects the
+ * counter from being used-after-free inside efx_tc_ct_stats.
+ */
+ efx_tc_flower_release_counter(efx, conn->cnt);
+ kfree(conn);
+}
+
+static int efx_tc_ct_destroy(struct efx_tc_ct_zone *ct_zone,
+ struct flow_cls_offload *tc)
+{
+ struct efx_nic *efx = ct_zone->efx;
+ struct efx_tc_ct_entry *conn;
+
+ conn = rhashtable_lookup_fast(&efx->tc->ct_ht, &tc->cookie,
+ efx_tc_ct_ht_params);
+ if (!conn) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Conntrack %lx not found to remove\n", tc->cookie);
+ return -ENOENT;
+ }
+
+ mutex_lock(&ct_zone->mutex);
+ list_del(&conn->list);
+ efx_tc_ct_remove(efx, conn);
+ mutex_unlock(&ct_zone->mutex);
+ synchronize_rcu();
+ efx_tc_ct_remove_finish(efx, conn);
+ return 0;
+}
+
+static int efx_tc_ct_stats(struct efx_tc_ct_zone *ct_zone,
+ struct flow_cls_offload *tc)
+{
+ struct efx_nic *efx = ct_zone->efx;
+ struct efx_tc_ct_entry *conn;
+ struct efx_tc_counter *cnt;
+
+ rcu_read_lock();
+ conn = rhashtable_lookup_fast(&efx->tc->ct_ht, &tc->cookie,
+ efx_tc_ct_ht_params);
+ if (!conn) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Conntrack %lx not found for stats\n", tc->cookie);
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ cnt = conn->cnt;
+ spin_lock_bh(&cnt->lock);
+ /* Report only last use */
+ flow_stats_update(&tc->stats, 0, 0, 0, cnt->touched,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ spin_unlock_bh(&cnt->lock);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload *tcb = type_data;
+ struct efx_tc_ct_zone *ct_zone = cb_priv;
+
+ if (type != TC_SETUP_CLSFLOWER)
+ return -EOPNOTSUPP;
+
+ switch (tcb->command) {
+ case FLOW_CLS_REPLACE:
+ return efx_tc_ct_replace(ct_zone, tcb);
+ case FLOW_CLS_DESTROY:
+ return efx_tc_ct_destroy(ct_zone, tcb);
+ case FLOW_CLS_STATS:
+ return efx_tc_ct_stats(ct_zone, tcb);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+struct efx_tc_ct_zone *efx_tc_ct_register_zone(struct efx_nic *efx, u16 zone,
+ struct nf_flowtable *ct_ft)
+{
+ struct efx_tc_ct_zone *ct_zone, *old;
+ int rc;
+
+ ct_zone = kzalloc(sizeof(*ct_zone), GFP_USER);
+ if (!ct_zone)
+ return ERR_PTR(-ENOMEM);
+ ct_zone->zone = zone;
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->ct_zone_ht,
+ &ct_zone->linkage,
+ efx_tc_ct_zone_ht_params);
+ if (old) {
+ /* don't need our new entry */
+ kfree(ct_zone);
+ if (!refcount_inc_not_zero(&old->ref))
+ return ERR_PTR(-EAGAIN);
+ /* existing entry found */
+ WARN_ON_ONCE(old->nf_ft != ct_ft);
+ netif_dbg(efx, drv, efx->net_dev,
+ "Found existing ct_zone for %u\n", zone);
+ return old;
+ }
+ ct_zone->nf_ft = ct_ft;
+ ct_zone->efx = efx;
+ INIT_LIST_HEAD(&ct_zone->cts);
+ mutex_init(&ct_zone->mutex);
+ rc = nf_flow_table_offload_add_cb(ct_ft, efx_tc_flow_block, ct_zone);
+ netif_dbg(efx, drv, efx->net_dev, "Adding new ct_zone for %u, rc %d\n",
+ zone, rc);
+ if (rc < 0)
+ goto fail;
+ refcount_set(&ct_zone->ref, 1);
+ return ct_zone;
+fail:
+ rhashtable_remove_fast(&efx->tc->ct_zone_ht, &ct_zone->linkage,
+ efx_tc_ct_zone_ht_params);
+ kfree(ct_zone);
+ return ERR_PTR(rc);
+}
+
+void efx_tc_ct_unregister_zone(struct efx_nic *efx,
+ struct efx_tc_ct_zone *ct_zone)
+{
+ struct efx_tc_ct_entry *conn, *next;
+
+ if (!refcount_dec_and_test(&ct_zone->ref))
+ return; /* still in use */
+ nf_flow_table_offload_del_cb(ct_zone->nf_ft, efx_tc_flow_block, ct_zone);
+ rhashtable_remove_fast(&efx->tc->ct_zone_ht, &ct_zone->linkage,
+ efx_tc_ct_zone_ht_params);
+ mutex_lock(&ct_zone->mutex);
+ list_for_each_entry(conn, &ct_zone->cts, list)
+ efx_tc_ct_remove(efx, conn);
+ synchronize_rcu();
+ /* need to use _safe because efx_tc_ct_remove_finish() frees conn */
+ list_for_each_entry_safe(conn, next, &ct_zone->cts, list)
+ efx_tc_ct_remove_finish(efx, conn);
+ mutex_unlock(&ct_zone->mutex);
+ mutex_destroy(&ct_zone->mutex);
+ netif_dbg(efx, drv, efx->net_dev, "Removed ct_zone for %u\n",
+ ct_zone->zone);
+ kfree(ct_zone);
+}
diff --git a/drivers/net/ethernet/sfc/tc_conntrack.h b/drivers/net/ethernet/sfc/tc_conntrack.h
new file mode 100644
index 000000000000..e75c8eb1965d
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc_conntrack.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2023, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TC_CONNTRACK_H
+#define EFX_TC_CONNTRACK_H
+#include "net_driver.h"
+
+#if IS_ENABLED(CONFIG_SFC_SRIOV)
+#include <linux/refcount.h>
+#include <net/netfilter/nf_flow_table.h>
+
+struct efx_tc_ct_zone {
+ u16 zone;
+ struct rhash_head linkage;
+ refcount_t ref;
+ struct nf_flowtable *nf_ft;
+ struct efx_nic *efx;
+ struct mutex mutex; /* protects cts list */
+ struct list_head cts; /* list of efx_tc_ct_entry in this zone */
+};
+
+/* create/uncreate/teardown hashtables */
+int efx_tc_init_conntrack(struct efx_nic *efx);
+void efx_tc_destroy_conntrack(struct efx_nic *efx);
+void efx_tc_fini_conntrack(struct efx_nic *efx);
+
+struct efx_tc_ct_zone *efx_tc_ct_register_zone(struct efx_nic *efx, u16 zone,
+ struct nf_flowtable *ct_ft);
+void efx_tc_ct_unregister_zone(struct efx_nic *efx,
+ struct efx_tc_ct_zone *ct_zone);
+
+struct efx_tc_ct_entry {
+ unsigned long cookie;
+ struct rhash_head linkage;
+ __be16 eth_proto;
+ u8 ip_proto;
+ bool dnat;
+ __be32 src_ip, dst_ip, nat_ip;
+ struct in6_addr src_ip6, dst_ip6;
+ __be16 l4_sport, l4_dport, l4_natport; /* Ports (UDP, TCP) */
+ struct efx_tc_ct_zone *zone;
+ u32 mark;
+ struct efx_tc_counter *cnt;
+ struct list_head list; /* entry on zone->cts */
+};
+
+#endif /* CONFIG_SFC_SRIOV */
+#endif /* EFX_TC_CONNTRACK_H */
diff --git a/drivers/net/ethernet/sfc/tc_counters.c b/drivers/net/ethernet/sfc/tc_counters.c
index 979f49058a0c..0fafb47ea082 100644
--- a/drivers/net/ethernet/sfc/tc_counters.c
+++ b/drivers/net/ethernet/sfc/tc_counters.c
@@ -129,8 +129,8 @@ static void efx_tc_counter_work(struct work_struct *work)
/* Counter allocation */
-static struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx,
- int type)
+struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx,
+ int type)
{
struct efx_tc_counter *cnt;
int rc, rc2;
@@ -169,8 +169,8 @@ fail1:
return ERR_PTR(rc > 0 ? -EIO : rc);
}
-static void efx_tc_flower_release_counter(struct efx_nic *efx,
- struct efx_tc_counter *cnt)
+void efx_tc_flower_release_counter(struct efx_nic *efx,
+ struct efx_tc_counter *cnt)
{
int rc;
diff --git a/drivers/net/ethernet/sfc/tc_counters.h b/drivers/net/ethernet/sfc/tc_counters.h
index 41e57f34b763..f18d71c13600 100644
--- a/drivers/net/ethernet/sfc/tc_counters.h
+++ b/drivers/net/ethernet/sfc/tc_counters.h
@@ -49,6 +49,10 @@ int efx_tc_init_counters(struct efx_nic *efx);
void efx_tc_destroy_counters(struct efx_nic *efx);
void efx_tc_fini_counters(struct efx_nic *efx);
+struct efx_tc_counter *efx_tc_flower_allocate_counter(struct efx_nic *efx,
+ int type);
+void efx_tc_flower_release_counter(struct efx_nic *efx,
+ struct efx_tc_counter *cnt);
struct efx_tc_counter_index *efx_tc_flower_get_counter_index(
struct efx_nic *efx, unsigned long cookie,
enum efx_tc_counter_type type);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 4ed4082836a9..fe2d476028e7 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -517,13 +517,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
unsigned index, type;
EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
-
index = skb_get_queue_mapping(skb);
type = efx_tx_csum_type_skb(skb);
- if (index >= efx->n_tx_channels) {
- index -= efx->n_tx_channels;
- type |= EFX_TXQ_TYPE_HIGHPRI;
- }
/* PTP "event" packet */
if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
@@ -603,43 +598,5 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
/* Must be inverse of queue lookup in efx_hard_start_xmit() */
tx_queue->core_txq =
netdev_get_tx_queue(efx->net_dev,
- tx_queue->channel->channel +
- ((tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ?
- efx->n_tx_channels : 0));
-}
-
-int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
- void *type_data)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- struct tc_mqprio_qopt *mqprio = type_data;
- unsigned tc, num_tc;
-
- if (type != TC_SETUP_QDISC_MQPRIO)
- return -EOPNOTSUPP;
-
- /* Only Siena supported highpri queues */
- if (efx_nic_rev(efx) > EFX_REV_SIENA_A0)
- return -EOPNOTSUPP;
-
- num_tc = mqprio->num_tc;
-
- if (num_tc > EFX_MAX_TX_TC)
- return -EINVAL;
-
- mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
-
- if (num_tc == net_dev->num_tc)
- return 0;
-
- for (tc = 0; tc < num_tc; tc++) {
- net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
- net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
- }
-
- net_dev->num_tc = num_tc;
-
- return netif_set_real_num_tx_queues(net_dev,
- max_t(int, num_tc, 1) *
- efx->n_tx_channels);
+ tx_queue->channel->channel);
}
diff --git a/drivers/net/ethernet/sfc/tx_tso.c b/drivers/net/ethernet/sfc/tx_tso.c
index d381d8164f07..64a6768f75ea 100644
--- a/drivers/net/ethernet/sfc/tx_tso.c
+++ b/drivers/net/ethernet/sfc/tx_tso.c
@@ -85,7 +85,7 @@ static inline void prefetch_ptr(struct efx_tx_queue *tx_queue)
prefetch(ptr);
prefetch(ptr + 0x80);
- ptr = (char *) (((efx_qword_t *)tx_queue->txd.buf.addr) + insert_ptr);
+ ptr = (char *)(((efx_qword_t *)tx_queue->txd.addr) + insert_ptr);
prefetch(ptr);
prefetch(ptr + 0x80);
}
diff --git a/drivers/net/ethernet/sfc/vfdi.h b/drivers/net/ethernet/sfc/vfdi.h
deleted file mode 100644
index 480b872eb4d1..000000000000
--- a/drivers/net/ethernet/sfc/vfdi.h
+++ /dev/null
@@ -1,252 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/****************************************************************************
- * Driver for Solarflare network controllers and boards
- * Copyright 2010-2012 Solarflare Communications Inc.
- */
-#ifndef _VFDI_H
-#define _VFDI_H
-
-/**
- * DOC: Virtual Function Driver Interface
- *
- * This file contains software structures used to form a two way
- * communication channel between the VF driver and the PF driver,
- * named Virtual Function Driver Interface (VFDI).
- *
- * For the purposes of VFDI, a page is a memory region with size and
- * alignment of 4K. All addresses are DMA addresses to be used within
- * the domain of the relevant VF.
- *
- * The only hardware-defined channels for a VF driver to communicate
- * with the PF driver are the event mailboxes (%FR_CZ_USR_EV
- * registers). Writing to these registers generates an event with
- * EV_CODE = EV_CODE_USR_EV, USER_QID set to the index of the mailbox
- * and USER_EV_REG_VALUE set to the value written. The PF driver may
- * direct or disable delivery of these events by setting
- * %FR_CZ_USR_EV_CFG.
- *
- * The PF driver can send arbitrary events to arbitrary event queues.
- * However, for consistency, VFDI events from the PF are defined to
- * follow the same form and be sent to the first event queue assigned
- * to the VF while that queue is enabled by the VF driver.
- *
- * The general form of the variable bits of VFDI events is:
- *
- * 0 16 24 31
- * | DATA | TYPE | SEQ |
- *
- * SEQ is a sequence number which should be incremented by 1 (modulo
- * 256) for each event. The sequence numbers used in each direction
- * are independent.
- *
- * The VF submits requests of type &struct vfdi_req by sending the
- * address of the request (ADDR) in a series of 4 events:
- *
- * 0 16 24 31
- * | ADDR[0:15] | VFDI_EV_TYPE_REQ_WORD0 | SEQ |
- * | ADDR[16:31] | VFDI_EV_TYPE_REQ_WORD1 | SEQ+1 |
- * | ADDR[32:47] | VFDI_EV_TYPE_REQ_WORD2 | SEQ+2 |
- * | ADDR[48:63] | VFDI_EV_TYPE_REQ_WORD3 | SEQ+3 |
- *
- * The address must be page-aligned. After receiving such a valid
- * series of events, the PF driver will attempt to read the request
- * and write a response to the same address. In case of an invalid
- * sequence of events or a DMA error, there will be no response.
- *
- * The VF driver may request that the PF driver writes status
- * information into its domain asynchronously. After writing the
- * status, the PF driver will send an event of the form:
- *
- * 0 16 24 31
- * | reserved | VFDI_EV_TYPE_STATUS | SEQ |
- *
- * In case the VF must be reset for any reason, the PF driver will
- * send an event of the form:
- *
- * 0 16 24 31
- * | reserved | VFDI_EV_TYPE_RESET | SEQ |
- *
- * It is then the responsibility of the VF driver to request
- * reinitialisation of its queues.
- */
-#define VFDI_EV_SEQ_LBN 24
-#define VFDI_EV_SEQ_WIDTH 8
-#define VFDI_EV_TYPE_LBN 16
-#define VFDI_EV_TYPE_WIDTH 8
-#define VFDI_EV_TYPE_REQ_WORD0 0
-#define VFDI_EV_TYPE_REQ_WORD1 1
-#define VFDI_EV_TYPE_REQ_WORD2 2
-#define VFDI_EV_TYPE_REQ_WORD3 3
-#define VFDI_EV_TYPE_STATUS 4
-#define VFDI_EV_TYPE_RESET 5
-#define VFDI_EV_DATA_LBN 0
-#define VFDI_EV_DATA_WIDTH 16
-
-struct vfdi_endpoint {
- u8 mac_addr[ETH_ALEN];
- __be16 tci;
-};
-
-/**
- * enum vfdi_op - VFDI operation enumeration
- * @VFDI_OP_RESPONSE: Indicates a response to the request.
- * @VFDI_OP_INIT_EVQ: Initialize SRAM entries and initialize an EVQ.
- * @VFDI_OP_INIT_RXQ: Initialize SRAM entries and initialize an RXQ.
- * @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ.
- * @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then
- * finalize the SRAM entries.
- * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targeting the given RXQ.
- * @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters.
- * @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates
- * from PF and write the initial status.
- * @VFDI_OP_CLEAR_STATUS_PAGE: Clear the DMA page(s) used for status
- * updates from PF.
- */
-enum vfdi_op {
- VFDI_OP_RESPONSE = 0,
- VFDI_OP_INIT_EVQ = 1,
- VFDI_OP_INIT_RXQ = 2,
- VFDI_OP_INIT_TXQ = 3,
- VFDI_OP_FINI_ALL_QUEUES = 4,
- VFDI_OP_INSERT_FILTER = 5,
- VFDI_OP_REMOVE_ALL_FILTERS = 6,
- VFDI_OP_SET_STATUS_PAGE = 7,
- VFDI_OP_CLEAR_STATUS_PAGE = 8,
- VFDI_OP_LIMIT,
-};
-
-/* Response codes for VFDI operations. Other values may be used in future. */
-#define VFDI_RC_SUCCESS 0
-#define VFDI_RC_ENOMEM (-12)
-#define VFDI_RC_EINVAL (-22)
-#define VFDI_RC_EOPNOTSUPP (-95)
-#define VFDI_RC_ETIMEDOUT (-110)
-
-/**
- * struct vfdi_req - Request from VF driver to PF driver
- * @op: Operation code or response indicator, taken from &enum vfdi_op.
- * @rc: Response code. Set to 0 on success or a negative error code on failure.
- * @u.init_evq.index: Index of event queue to create.
- * @u.init_evq.buf_count: Number of 4k buffers backing event queue.
- * @u.init_evq.addr: Array of length %u.init_evq.buf_count containing DMA
- * address of each page backing the event queue.
- * @u.init_rxq.index: Index of receive queue to create.
- * @u.init_rxq.buf_count: Number of 4k buffers backing receive queue.
- * @u.init_rxq.evq: Instance of event queue to target receive events at.
- * @u.init_rxq.label: Label used in receive events.
- * @u.init_rxq.flags: Unused.
- * @u.init_rxq.addr: Array of length %u.init_rxq.buf_count containing DMA
- * address of each page backing the receive queue.
- * @u.init_txq.index: Index of transmit queue to create.
- * @u.init_txq.buf_count: Number of 4k buffers backing transmit queue.
- * @u.init_txq.evq: Instance of event queue to target transmit completion
- * events at.
- * @u.init_txq.label: Label used in transmit completion events.
- * @u.init_txq.flags: Checksum offload flags.
- * @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA
- * address of each page backing the transmit queue.
- * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targeting
- * all traffic at this receive queue.
- * @u.mac_filter.flags: MAC filter flags.
- * @u.set_status_page.dma_addr: Base address for the &struct vfdi_status.
- * This address must be page-aligned and the PF may write up to a
- * whole page (allowing for extension of the structure).
- * @u.set_status_page.peer_page_count: Number of additional pages the VF
- * has provided into which peer addresses may be DMAd.
- * @u.set_status_page.peer_page_addr: Array of DMA addresses of pages.
- * If the number of peers exceeds 256, then the VF must provide
- * additional pages in this array. The PF will then DMA up to
- * 512 vfdi_endpoint structures into each page. These addresses
- * must be page-aligned.
- */
-struct vfdi_req {
- u32 op;
- u32 reserved1;
- s32 rc;
- u32 reserved2;
- union {
- struct {
- u32 index;
- u32 buf_count;
- u64 addr[];
- } init_evq;
- struct {
- u32 index;
- u32 buf_count;
- u32 evq;
- u32 label;
- u32 flags;
-#define VFDI_RXQ_FLAG_SCATTER_EN 1
- u32 reserved;
- u64 addr[];
- } init_rxq;
- struct {
- u32 index;
- u32 buf_count;
- u32 evq;
- u32 label;
- u32 flags;
-#define VFDI_TXQ_FLAG_IP_CSUM_DIS 1
-#define VFDI_TXQ_FLAG_TCPUDP_CSUM_DIS 2
- u32 reserved;
- u64 addr[];
- } init_txq;
- struct {
- u32 rxq;
- u32 flags;
-#define VFDI_MAC_FILTER_FLAG_RSS 1
-#define VFDI_MAC_FILTER_FLAG_SCATTER 2
- } mac_filter;
- struct {
- u64 dma_addr;
- u64 peer_page_count;
- u64 peer_page_addr[];
- } set_status_page;
- } u;
-};
-
-/**
- * struct vfdi_status - Status provided by PF driver to VF driver
- * @generation_start: A generation count DMA'd to VF *before* the
- * rest of the structure.
- * @generation_end: A generation count DMA'd to VF *after* the
- * rest of the structure.
- * @version: Version of this structure; currently set to 1. Later
- * versions must either be layout-compatible or only be sent to VFs
- * that specifically request them.
- * @length: Total length of this structure including embedded tables
- * @vi_scale: log2 the number of VIs available on this VF. This quantity
- * is used by the hardware for register decoding.
- * @max_tx_channels: The maximum number of transmit queues the VF can use.
- * @rss_rxq_count: The number of receive queues present in the shared RSS
- * indirection table.
- * @peer_count: Total number of peers in the complete peer list. If larger
- * than ARRAY_SIZE(%peers), then the VF must provide sufficient
- * additional pages each of which is filled with vfdi_endpoint structures.
- * @local: The MAC address and outer VLAN tag of *this* VF
- * @peers: Table of peer addresses. The @tci fields in these structures
- * are currently unused and must be ignored. Additional peers are
- * written into any additional pages provided by the VF.
- * @timer_quantum_ns: Timer quantum (nominal period between timer ticks)
- * for interrupt moderation timers, in nanoseconds. This member is only
- * present if @length is sufficiently large.
- */
-struct vfdi_status {
- u32 generation_start;
- u32 generation_end;
- u32 version;
- u32 length;
- u8 vi_scale;
- u8 max_tx_channels;
- u8 rss_rxq_count;
- u8 reserved1;
- u16 peer_count;
- u16 reserved2;
- struct vfdi_endpoint local;
- struct vfdi_endpoint peers[256];
-
- /* Members below here extend version 1 of this structure */
- u32 timer_quantum_ns;
-};
-
-#endif
diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h
index 815be2d20c4b..e10e7f84958d 100644
--- a/drivers/net/ethernet/sfc/workarounds.h
+++ b/drivers/net/ethernet/sfc/workarounds.h
@@ -12,14 +12,7 @@
* Bug numbers are from Solarflare's Bugzilla.
*/
-#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
#define EFX_WORKAROUND_EF10(efx) (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
-#define EFX_WORKAROUND_10G(efx) 1
-
-/* Bit-bashed I2C reads cause performance drop */
-#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
-/* Legacy interrupt storm when interrupt fifo fills */
-#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
/* Lockup when writing event block registers at gen2/gen3 */
#define EFX_EF10_WORKAROUND_35388(efx) \
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 174dc8908b72..cb590db625e8 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -43,7 +43,6 @@
#include <linux/smsc911x.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include <linux/acpi.h>
@@ -552,7 +551,7 @@ static void smsc911x_mac_write(struct smsc911x_data *pdata,
/* Get a phy register */
static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
{
- struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv;
+ struct smsc911x_data *pdata = bus->priv;
unsigned long flags;
unsigned int addr;
int i, reg;
@@ -591,7 +590,7 @@ out:
static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
u16 val)
{
- struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv;
+ struct smsc911x_data *pdata = bus->priv;
unsigned long flags;
unsigned int addr;
int i, reg;
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 71fbb358bb7d..e1c4a11c1f18 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -102,7 +102,7 @@ static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd)
static int smsc9420_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
{
- struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv;
+ struct smsc9420_pdata *pd = bus->priv;
unsigned long flags;
u32 addr;
int i, reg = -EIO;
@@ -140,7 +140,7 @@ out:
static int smsc9420_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
u16 val)
{
- struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv;
+ struct smsc9420_pdata *pd = bus->priv;
unsigned long flags;
u32 addr;
int i, reg = -EIO;
@@ -1144,8 +1144,7 @@ static int smsc9420_mii_init(struct net_device *dev)
goto err_out_1;
}
pd->mii_bus->name = DRV_MDIONAME;
- snprintf(pd->mii_bus->id, MII_BUS_ID_SIZE, "%x",
- (pd->pdev->bus->number << 8) | pd->pdev->devfn);
+ snprintf(pd->mii_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(pd->pdev));
pd->mii_bus->priv = pd;
pd->mii_bus->read = smsc9420_mii_read;
pd->mii_bus->write = smsc9420_mii_write;
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 0dcd6a568b06..f358ea003193 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -15,7 +15,7 @@
#include <linux/bpf_trace.h>
#include <net/tcp.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
#include <net/ip6_checksum.h>
#define NETSEC_REG_SOFT_RST 0x104
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 492c39c08af1..4838d2383a43 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -15,10 +15,11 @@
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
-#include <linux/of_platform.h>
#include <linux/phy.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/types.h>
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 5583f0b055ec..06c6871f8788 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -121,17 +121,6 @@ config DWMAC_MESON
the stmmac device driver. This driver is used for Meson6,
Meson8, Meson8b and GXBB SoCs.
-config DWMAC_OXNAS
- tristate "Oxford Semiconductor OXNAS dwmac support"
- default ARCH_OXNAS
- depends on OF && COMMON_CLK && (ARCH_OXNAS || COMPILE_TEST)
- select MFD_SYSCON
- help
- Support for Ethernet controller on Oxford Semiconductor OXNAS SoCs.
-
- This selects the Oxford Semiconductor OXNASSoC glue layer support for
- the stmmac device driver. This driver is used for OX820.
-
config DWMAC_QCOM_ETHQOS
tristate "Qualcomm ETHQOS support"
default ARCH_QCOM
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 7dd3d388068b..5b57aee19267 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o
obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
-obj-$(CONFIG_DWMAC_OXNAS) += dwmac-oxnas.o
obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 16e67c18b6f7..403cb397d4d3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -35,6 +35,7 @@
#define DWMAC_CORE_5_10 0x51
#define DWMAC_CORE_5_20 0x52
#define DWXGMAC_CORE_2_10 0x21
+#define DWXGMAC_CORE_2_20 0x22
#define DWXLGMAC_CORE_2_00 0x20
/* Device ID */
@@ -59,13 +60,25 @@
/* #define FRAME_FILTER_DEBUG */
struct stmmac_txq_stats {
- unsigned long tx_pkt_n;
- unsigned long tx_normal_irq_n;
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 tx_pkt_n;
+ u64 tx_normal_irq_n;
+ u64 napi_poll;
+ u64 tx_clean;
+ u64 tx_set_ic_bit;
+ u64 tx_tso_frames;
+ u64 tx_tso_nfrags;
+ struct u64_stats_sync syncp;
};
struct stmmac_rxq_stats {
- unsigned long rx_pkt_n;
- unsigned long rx_normal_irq_n;
+ u64 rx_bytes;
+ u64 rx_packets;
+ u64 rx_pkt_n;
+ u64 rx_normal_irq_n;
+ u64 napi_poll;
+ struct u64_stats_sync syncp;
};
/* Extra statistic and debug information exposed by ethtool */
@@ -81,6 +94,7 @@ struct stmmac_extra_stats {
unsigned long tx_frame_flushed;
unsigned long tx_payload_error;
unsigned long tx_ip_header_error;
+ unsigned long tx_collision;
/* Receive errors */
unsigned long rx_desc;
unsigned long sa_filter_fail;
@@ -113,14 +127,6 @@ struct stmmac_extra_stats {
/* Tx/Rx IRQ Events */
unsigned long rx_early_irq;
unsigned long threshold;
- unsigned long tx_pkt_n;
- unsigned long rx_pkt_n;
- unsigned long normal_irq_n;
- unsigned long rx_normal_irq_n;
- unsigned long napi_poll;
- unsigned long tx_normal_irq_n;
- unsigned long tx_clean;
- unsigned long tx_set_ic_bit;
unsigned long irq_receive_pmt_irq_n;
/* MMC info */
unsigned long mmc_tx_irq_n;
@@ -190,18 +196,16 @@ struct stmmac_extra_stats {
unsigned long mtl_rx_fifo_ctrl_active;
unsigned long mac_rx_frame_ctrl_fifo;
unsigned long mac_gmii_rx_proto_engine;
- /* TSO */
- unsigned long tx_tso_frames;
- unsigned long tx_tso_nfrags;
/* EST */
unsigned long mtl_est_cgce;
unsigned long mtl_est_hlbs;
unsigned long mtl_est_hlbf;
unsigned long mtl_est_btre;
unsigned long mtl_est_btrlm;
- /* per queue statistics */
- struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
- struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
+ unsigned long rx_dropped;
+ unsigned long rx_errors;
+ unsigned long tx_dropped;
+ unsigned long tx_errors;
};
/* Safety Feature statistics exposed by ethtool */
@@ -406,6 +410,18 @@ struct dma_features {
unsigned int number_tx_queues;
/* PPS output */
unsigned int pps_out_num;
+ /* Number of Traffic Classes */
+ unsigned int numtc;
+ /* DCB Feature Enable */
+ unsigned int dcben;
+ /* IEEE 1588 High Word Register Enable */
+ unsigned int advthword;
+ /* PTP Offload Enable */
+ unsigned int ptoen;
+ /* One-Step Timestamping Enable */
+ unsigned int osten;
+ /* Priority-Based Flow Control Enable */
+ unsigned int pfcen;
/* Alternate (enhanced) DESC mode */
unsigned int enh_desc;
/* TX and RX FIFO sizes */
@@ -426,14 +442,40 @@ struct dma_features {
unsigned int dvlan;
unsigned int l3l4fnum;
unsigned int arpoffsel;
+ /* One Step for PTP over UDP/IP Feature Enable */
+ unsigned int pou_ost_en;
+ /* Tx Timestamp FIFO Depth */
+ unsigned int ttsfd;
+ /* Queue/Channel-Based VLAN tag insertion on Tx */
+ unsigned int cbtisel;
+ /* Supported Parallel Instruction Processor Engines */
+ unsigned int frppipe_num;
+ /* Number of Extended VLAN Tag Filters */
+ unsigned int nrvf_num;
/* TSN Features */
unsigned int estwid;
unsigned int estdep;
unsigned int estsel;
unsigned int fpesel;
unsigned int tbssel;
+ /* Number of DMA channels enabled for TBS */
+ unsigned int tbs_ch_num;
+ /* Per-Stream Filtering Enable */
+ unsigned int sgfsel;
/* Numbers of Auxiliary Snapshot Inputs */
unsigned int aux_snapshot_n;
+ /* Timestamp System Time Source */
+ unsigned int tssrc;
+ /* Enhanced DMA Enable */
+ unsigned int edma;
+ /* Different Descriptor Cache Enable */
+ unsigned int ediffc;
+ /* VxLAN/NVGRE Enable */
+ unsigned int vxn;
+ /* Debug Memory Interface Enable */
+ unsigned int dbgmem;
+ /* Number of Policing Counters */
+ unsigned int pcsel;
};
/* RX Buffer size must be multiple of 4/8/16 bytes */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 9f88530c5e8c..61ebf36da13d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -14,7 +14,7 @@
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
@@ -113,7 +113,7 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
/* dwc-qos needs GMAC4, AAL, TSO and PMT */
plat_dat->has_gmac4 = 1;
plat_dat->dma_cfg->aal = 1;
- plat_dat->tso_en = 1;
+ plat_dat->flags |= STMMAC_FLAG_TSO_EN;
plat_dat->pmt = 1;
return 0;
@@ -178,7 +178,7 @@ static void dwc_qos_remove(struct platform_device *pdev)
#define AUTO_CAL_STATUS 0x880c
#define AUTO_CAL_STATUS_ACTIVE BIT(31)
-static void tegra_eqos_fix_speed(void *priv, unsigned int speed)
+static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct tegra_eqos *eqos = priv;
unsigned long rate = 125000000;
@@ -359,7 +359,7 @@ bypass_clk_reset_gpio:
data->fix_mac_speed = tegra_eqos_fix_speed;
data->init = tegra_eqos_init;
data->bsp_priv = eqos;
- data->sph_disable = 1;
+ data->flags |= STMMAC_FLAG_SPH_DISABLE;
err = tegra_eqos_init(pdev, eqos);
if (err < 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index b9378a63f0e8..df34e34cc14f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -12,7 +12,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
@@ -32,6 +31,7 @@
#define GPR_ENET_QOS_RGMII_EN (0x1 << 21)
#define MX93_GPR_ENET_QOS_INTF_MODE_MASK GENMASK(3, 0)
+#define MX93_GPR_ENET_QOS_INTF_MASK GENMASK(3, 1)
#define MX93_GPR_ENET_QOS_INTF_SEL_MII (0x0 << 1)
#define MX93_GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 1)
#define MX93_GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 1)
@@ -40,13 +40,16 @@
#define DMA_BUS_MODE 0x00001000
#define DMA_BUS_MODE_SFT_RESET (0x1 << 0)
#define RMII_RESET_SPEED (0x3 << 14)
+#define CTRL_SPEED_MASK GENMASK(15, 14)
struct imx_dwmac_ops {
u32 addr_width;
+ u32 flags;
bool mac_rgmii_txclk_auto_adj;
int (*fix_soc_reset)(void *priv, void __iomem *ioaddr);
int (*set_intf_mode)(struct plat_stmmacenet_data *plat_dat);
+ void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode);
};
struct imx_priv_data {
@@ -56,6 +59,7 @@ struct imx_priv_data {
struct regmap *intf_regmap;
u32 intf_reg_off;
bool rmii_refclk_ext;
+ void __iomem *base_addr;
const struct imx_dwmac_ops *ops;
struct plat_stmmacenet_data *plat_dat;
@@ -66,7 +70,7 @@ static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
struct imx_priv_data *dwmac = plat_dat->bsp_priv;
int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = GPR_ENET_QOS_INTF_SEL_MII;
break;
@@ -83,7 +87,7 @@ static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
pr_debug("imx dwmac doesn't support %d interface\n",
- plat_dat->interface);
+ plat_dat->mac_interface);
return -EINVAL;
}
@@ -106,7 +110,7 @@ static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
struct imx_priv_data *dwmac = plat_dat->bsp_priv;
int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = MX93_GPR_ENET_QOS_INTF_SEL_MII;
break;
@@ -121,7 +125,7 @@ static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
dev_dbg(dwmac->dev, "imx dwmac doesn't support %d interface\n",
- plat_dat->interface);
+ plat_dat->mac_interface);
return -EINVAL;
}
@@ -178,7 +182,7 @@ static void imx_dwmac_exit(struct platform_device *pdev, void *priv)
/* nothing to do now */
}
-static void imx_dwmac_fix_speed(void *priv, unsigned int speed)
+static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct plat_stmmacenet_data *plat_dat;
struct imx_priv_data *dwmac = priv;
@@ -188,8 +192,8 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed)
plat_dat = dwmac->plat_dat;
if (dwmac->ops->mac_rgmii_txclk_auto_adj ||
- (plat_dat->interface == PHY_INTERFACE_MODE_RMII) ||
- (plat_dat->interface == PHY_INTERFACE_MODE_MII))
+ (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) ||
+ (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII))
return;
switch (speed) {
@@ -212,6 +216,41 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed)
dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate);
}
+static void imx93_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+{
+ struct imx_priv_data *dwmac = priv;
+ unsigned int iface;
+ int ctrl, old_ctrl;
+
+ imx_dwmac_fix_speed(priv, speed, mode);
+
+ if (!dwmac || mode != MLO_AN_FIXED)
+ return;
+
+ if (regmap_read(dwmac->intf_regmap, dwmac->intf_reg_off, &iface))
+ return;
+
+ iface &= MX93_GPR_ENET_QOS_INTF_MASK;
+ if (iface != MX93_GPR_ENET_QOS_INTF_SEL_RGMII)
+ return;
+
+ old_ctrl = readl(dwmac->base_addr + MAC_CTRL_REG);
+ ctrl = old_ctrl & ~CTRL_SPEED_MASK;
+ regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
+ MX93_GPR_ENET_QOS_INTF_MODE_MASK, 0);
+ writel(ctrl, dwmac->base_addr + MAC_CTRL_REG);
+
+ /* Ensure the settings for CTRL are applied. */
+ readl(dwmac->base_addr + MAC_CTRL_REG);
+
+ usleep_range(10, 20);
+ iface |= MX93_GPR_ENET_QOS_CLK_GEN_EN;
+ regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
+ MX93_GPR_ENET_QOS_INTF_MODE_MASK, iface);
+
+ writel(old_ctrl, dwmac->base_addr + MAC_CTRL_REG);
+}
+
static int imx_dwmac_mx93_reset(void *priv, void __iomem *ioaddr)
{
struct plat_stmmacenet_data *plat_dat = priv;
@@ -221,7 +260,7 @@ static int imx_dwmac_mx93_reset(void *priv, void __iomem *ioaddr)
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) {
+ if (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) {
usleep_range(100, 200);
writel(RMII_RESET_SPEED, ioaddr + MAC_CTRL_REG);
}
@@ -312,6 +351,9 @@ static int imx_dwmac_probe(struct platform_device *pdev)
goto err_parse_dt;
}
+ if (data->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
+ plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY;
+
plat_dat->host_dma_width = dwmac->ops->addr_width;
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
@@ -319,6 +361,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
plat_dat->bsp_priv = dwmac;
dwmac->plat_dat = plat_dat;
+ dwmac->base_addr = stmmac_res.addr;
ret = imx_dwmac_clks_config(dwmac, true);
if (ret)
@@ -328,6 +371,8 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (ret)
goto err_dwmac_init;
+ if (dwmac->ops->fix_mac_speed)
+ plat_dat->fix_mac_speed = dwmac->ops->fix_mac_speed;
dwmac->plat_dat->fix_soc_reset = dwmac->ops->fix_soc_reset;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
@@ -351,6 +396,7 @@ static struct imx_dwmac_ops imx8mp_dwmac_data = {
.addr_width = 34,
.mac_rgmii_txclk_auto_adj = false,
.set_intf_mode = imx8mp_set_intf_mode,
+ .flags = STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY,
};
static struct imx_dwmac_ops imx8dxl_dwmac_data = {
@@ -364,6 +410,7 @@ static struct imx_dwmac_ops imx93_dwmac_data = {
.mac_rgmii_txclk_auto_adj = true,
.set_intf_mode = imx93_set_intf_mode,
.fix_soc_reset = imx_dwmac_mx93_reset,
+ .fix_mac_speed = imx93_dwmac_fix_speed,
};
static const struct of_device_id imx_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
index 8063ba1c3ce8..0a20c3d24722 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -11,7 +11,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
@@ -90,7 +89,7 @@ static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_MII);
@@ -119,7 +118,7 @@ static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
return -EINVAL;
}
@@ -131,13 +130,13 @@ static int x1000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct ingenic_mac *mac = plat_dat->bsp_priv;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_RMII:
dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
return -EINVAL;
}
@@ -150,14 +149,14 @@ static int x1600_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_RMII:
val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
return -EINVAL;
}
@@ -170,7 +169,7 @@ static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_RMII:
val = FIELD_PREP(MACPHYC_MODE_SEL_MASK, MACPHYC_MODE_SEL_RMII) |
FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
@@ -178,7 +177,7 @@ static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
return -EINVAL;
}
@@ -191,7 +190,7 @@ static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_RMII:
val = FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN) |
FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN) |
@@ -221,7 +220,7 @@ static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index a5e639ab0b9e..d352a14f9d48 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -22,13 +22,13 @@ struct intel_dwmac {
};
struct intel_dwmac_data {
- void (*fix_mac_speed)(void *priv, unsigned int speed);
+ void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode);
unsigned long ptp_ref_clk_rate;
unsigned long tx_clk_rate;
bool tx_clk_en;
};
-static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed)
+static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct intel_dwmac *dwmac = priv;
unsigned long rate;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index ab9f876b6df7..a3a249c63598 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -257,9 +257,8 @@ static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
/* Program PTP Clock Frequency for different variant of
* Intel mGBE that has slightly different GPO mapping
*/
-static void intel_mgbe_ptp_clk_freq_config(void *npriv)
+static void intel_mgbe_ptp_clk_freq_config(struct stmmac_priv *priv)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)npriv;
struct intel_priv_data *intel_priv;
u32 gpio_value;
@@ -326,10 +325,10 @@ static int intel_crosststamp(ktime_t *device,
/* Both internal crosstimestamping and external triggered event
* timestamping cannot be run concurrently.
*/
- if (priv->plat->ext_snapshot_en)
+ if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)
return -EBUSY;
- priv->plat->int_snapshot_en = 1;
+ priv->plat->flags |= STMMAC_FLAG_INT_SNAPSHOT_EN;
mutex_lock(&priv->aux_ts_lock);
/* Enable Internal snapshot trigger */
@@ -350,7 +349,7 @@ static int intel_crosststamp(ktime_t *device,
break;
default:
mutex_unlock(&priv->aux_ts_lock);
- priv->plat->int_snapshot_en = 0;
+ priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
return -EINVAL;
}
writel(acr_value, ptpaddr + PTP_ACR);
@@ -376,7 +375,7 @@ static int intel_crosststamp(ktime_t *device,
if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait,
stmmac_cross_ts_isr(priv),
HZ / 100)) {
- priv->plat->int_snapshot_en = 0;
+ priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
return -ETIMEDOUT;
}
@@ -395,7 +394,7 @@ static int intel_crosststamp(ktime_t *device,
}
system->cycles *= intel_priv->crossts_adj;
- priv->plat->int_snapshot_en = 0;
+ priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
return 0;
}
@@ -458,8 +457,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->has_gmac = 0;
plat->has_gmac4 = 1;
plat->force_sf_dma_mode = 0;
- plat->tso_en = 1;
- plat->sph_disable = 1;
+ plat->flags |= (STMMAC_FLAG_TSO_EN | STMMAC_FLAG_SPH_DISABLE);
/* Multiplying factor to the clk_eee_i clock time
* period to make it closer to 100 ns. This value
@@ -561,7 +559,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
/* Set the maxmtu to a default of JUMBO_LEN */
plat->maxmtu = JUMBO_LEN;
- plat->vlan_fail_q_en = true;
+ plat->flags |= STMMAC_FLAG_VLAN_FAIL_Q_EN;
/* Use the last Rx queue */
plat->vlan_fail_q = plat->rx_queues_to_use - 1;
@@ -610,7 +608,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->ext_snapshot_num = AUX_SNAPSHOT0;
plat->crosststamp = intel_crosststamp;
- plat->int_snapshot_en = 0;
+ plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
/* Setup MSI vector offset specific to Intel mGbE controller */
plat->msi_mac_vec = 29;
@@ -628,7 +626,8 @@ static int ehl_common_data(struct pci_dev *pdev,
{
plat->rx_queues_to_use = 8;
plat->tx_queues_to_use = 8;
- plat->use_phy_wol = 1;
+ plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
+ plat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY;
plat->safety_feat_cfg->tsoee = 1;
plat->safety_feat_cfg->mrxpee = 1;
@@ -954,7 +953,7 @@ static int stmmac_config_single_msi(struct pci_dev *pdev,
res->irq = pci_irq_vector(pdev, 0);
res->wol_irq = res->irq;
- plat->multi_msi_en = 0;
+ plat->flags &= ~STMMAC_FLAG_MULTI_MSI_EN;
dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
__func__);
@@ -1006,7 +1005,7 @@ static int stmmac_config_multi_msi(struct pci_dev *pdev,
if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);
- plat->multi_msi_en = 1;
+ plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index e39406df8516..9b0200749109 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -257,7 +257,7 @@ static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
return PTR_ERR_OR_ZERO(gmac->qsgmii_csr);
}
-static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
+static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct ipq806x_gmac *gmac = priv;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index a25c187d3185..2cd6fce5c993 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -117,7 +117,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
}
plat->phy_interface = phy_mode;
- plat->interface = PHY_INTERFACE_MODE_GMII;
+ plat->mac_interface = PHY_INTERFACE_MODE_GMII;
pci_set_master(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index 18e84ba693a6..d0aa674ce705 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -50,9 +50,9 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
- if (plat_dat->interface == PHY_INTERFACE_MODE_MII) {
+ if (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII) {
ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
- } else if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) {
+ } else if (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) {
ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
} else {
dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 73c1dfa7ecb1..cd796ec04132 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -7,8 +7,8 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_net.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/stmmac.h>
@@ -587,8 +587,11 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
{
int i;
- plat->interface = priv_plat->phy_mode;
- plat->use_phy_wol = priv_plat->mac_wol ? 0 : 1;
+ plat->mac_interface = priv_plat->phy_mode;
+ if (priv_plat->mac_wol)
+ plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
+ else
+ plat->flags &= ~STMMAC_FLAG_USE_PHY_WOL;
plat->riwt_off = 1;
plat->maxmtu = ETH_DATA_LEN;
plat->host_dma_width = priv_plat->variant->dma_bit_mask;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 7aa5e6bc04eb..959f88c6da16 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -22,7 +22,7 @@ struct meson_dwmac {
void __iomem *reg;
};
-static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed)
+static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct meson_dwmac *dwmac = priv;
unsigned int val;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 92b16048f91c..0b159dc0d5f6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -13,7 +13,7 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
deleted file mode 100644
index 42954020de2c..000000000000
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
+++ /dev/null
@@ -1,245 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Oxford Semiconductor OXNAS DWMAC glue layer
- *
- * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
- * Copyright (C) 2014 Daniel Golle <daniel@makrotopia.org>
- * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/mfd/syscon.h>
-#include <linux/stmmac.h>
-
-#include "stmmac_platform.h"
-
-/* System Control regmap offsets */
-#define OXNAS_DWMAC_CTRL_REGOFFSET 0x78
-#define OXNAS_DWMAC_DELAY_REGOFFSET 0x100
-
-/* Control Register */
-#define DWMAC_CKEN_RX_IN 14
-#define DWMAC_CKEN_RXN_OUT 13
-#define DWMAC_CKEN_RX_OUT 12
-#define DWMAC_CKEN_TX_IN 10
-#define DWMAC_CKEN_TXN_OUT 9
-#define DWMAC_CKEN_TX_OUT 8
-#define DWMAC_RX_SOURCE 7
-#define DWMAC_TX_SOURCE 6
-#define DWMAC_LOW_TX_SOURCE 4
-#define DWMAC_AUTO_TX_SOURCE 3
-#define DWMAC_RGMII 2
-#define DWMAC_SIMPLE_MUX 1
-#define DWMAC_CKEN_GTX 0
-
-/* Delay register */
-#define DWMAC_TX_VARDELAY_SHIFT 0
-#define DWMAC_TXN_VARDELAY_SHIFT 8
-#define DWMAC_RX_VARDELAY_SHIFT 16
-#define DWMAC_RXN_VARDELAY_SHIFT 24
-#define DWMAC_TX_VARDELAY(d) ((d) << DWMAC_TX_VARDELAY_SHIFT)
-#define DWMAC_TXN_VARDELAY(d) ((d) << DWMAC_TXN_VARDELAY_SHIFT)
-#define DWMAC_RX_VARDELAY(d) ((d) << DWMAC_RX_VARDELAY_SHIFT)
-#define DWMAC_RXN_VARDELAY(d) ((d) << DWMAC_RXN_VARDELAY_SHIFT)
-
-struct oxnas_dwmac;
-
-struct oxnas_dwmac_data {
- int (*setup)(struct oxnas_dwmac *dwmac);
-};
-
-struct oxnas_dwmac {
- struct device *dev;
- struct clk *clk;
- struct regmap *regmap;
- const struct oxnas_dwmac_data *data;
-};
-
-static int oxnas_dwmac_setup_ox810se(struct oxnas_dwmac *dwmac)
-{
- unsigned int value;
- int ret;
-
- ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value);
- if (ret < 0)
- return ret;
-
- /* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */
- value |= BIT(DWMAC_CKEN_GTX) |
- /* Use simple mux for 25/125 Mhz clock switching */
- BIT(DWMAC_SIMPLE_MUX);
-
- regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value);
-
- return 0;
-}
-
-static int oxnas_dwmac_setup_ox820(struct oxnas_dwmac *dwmac)
-{
- unsigned int value;
- int ret;
-
- ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value);
- if (ret < 0)
- return ret;
-
- /* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */
- value |= BIT(DWMAC_CKEN_GTX) |
- /* Use simple mux for 25/125 Mhz clock switching */
- BIT(DWMAC_SIMPLE_MUX) |
- /* set auto switch tx clock source */
- BIT(DWMAC_AUTO_TX_SOURCE) |
- /* enable tx & rx vardelay */
- BIT(DWMAC_CKEN_TX_OUT) |
- BIT(DWMAC_CKEN_TXN_OUT) |
- BIT(DWMAC_CKEN_TX_IN) |
- BIT(DWMAC_CKEN_RX_OUT) |
- BIT(DWMAC_CKEN_RXN_OUT) |
- BIT(DWMAC_CKEN_RX_IN);
- regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value);
-
- /* set tx & rx vardelay */
- value = DWMAC_TX_VARDELAY(4) |
- DWMAC_TXN_VARDELAY(2) |
- DWMAC_RX_VARDELAY(10) |
- DWMAC_RXN_VARDELAY(8);
- regmap_write(dwmac->regmap, OXNAS_DWMAC_DELAY_REGOFFSET, value);
-
- return 0;
-}
-
-static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
-{
- struct oxnas_dwmac *dwmac = priv;
- int ret;
-
- /* Reset HW here before changing the glue configuration */
- ret = device_reset(dwmac->dev);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(dwmac->clk);
- if (ret)
- return ret;
-
- ret = dwmac->data->setup(dwmac);
- if (ret)
- clk_disable_unprepare(dwmac->clk);
-
- return ret;
-}
-
-static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv)
-{
- struct oxnas_dwmac *dwmac = priv;
-
- clk_disable_unprepare(dwmac->clk);
-}
-
-static int oxnas_dwmac_probe(struct platform_device *pdev)
-{
- struct plat_stmmacenet_data *plat_dat;
- struct stmmac_resources stmmac_res;
- struct oxnas_dwmac *dwmac;
- int ret;
-
- ret = stmmac_get_platform_resources(pdev, &stmmac_res);
- if (ret)
- return ret;
-
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
- if (IS_ERR(plat_dat))
- return PTR_ERR(plat_dat);
-
- dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac) {
- ret = -ENOMEM;
- goto err_remove_config_dt;
- }
-
- dwmac->data = (const struct oxnas_dwmac_data *)of_device_get_match_data(&pdev->dev);
- if (!dwmac->data) {
- ret = -EINVAL;
- goto err_remove_config_dt;
- }
-
- dwmac->dev = &pdev->dev;
- plat_dat->bsp_priv = dwmac;
- plat_dat->init = oxnas_dwmac_init;
- plat_dat->exit = oxnas_dwmac_exit;
-
- dwmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "oxsemi,sys-ctrl");
- if (IS_ERR(dwmac->regmap)) {
- dev_err(&pdev->dev, "failed to have sysctrl regmap\n");
- ret = PTR_ERR(dwmac->regmap);
- goto err_remove_config_dt;
- }
-
- dwmac->clk = devm_clk_get(&pdev->dev, "gmac");
- if (IS_ERR(dwmac->clk)) {
- ret = PTR_ERR(dwmac->clk);
- goto err_remove_config_dt;
- }
-
- ret = oxnas_dwmac_init(pdev, plat_dat->bsp_priv);
- if (ret)
- goto err_remove_config_dt;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_dwmac_exit;
-
-
- return 0;
-
-err_dwmac_exit:
- oxnas_dwmac_exit(pdev, plat_dat->bsp_priv);
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
-
- return ret;
-}
-
-static const struct oxnas_dwmac_data ox810se_dwmac_data = {
- .setup = oxnas_dwmac_setup_ox810se,
-};
-
-static const struct oxnas_dwmac_data ox820_dwmac_data = {
- .setup = oxnas_dwmac_setup_ox820,
-};
-
-static const struct of_device_id oxnas_dwmac_match[] = {
- {
- .compatible = "oxsemi,ox810se-dwmac",
- .data = &ox810se_dwmac_data,
- },
- {
- .compatible = "oxsemi,ox820-dwmac",
- .data = &ox820_dwmac_data,
- },
- { }
-};
-MODULE_DEVICE_TABLE(of, oxnas_dwmac_match);
-
-static struct platform_driver oxnas_dwmac_driver = {
- .probe = oxnas_dwmac_probe,
- .remove_new = stmmac_pltfr_remove,
- .driver = {
- .name = "oxnas-dwmac",
- .pm = &stmmac_pltfr_pm_ops,
- .of_match_table = oxnas_dwmac_match,
- },
-};
-module_platform_driver(oxnas_dwmac_driver);
-
-MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
-MODULE_DESCRIPTION("Oxford Semiconductor OXNAS DWMAC glue layer");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index e62940414e54..d3bf42d0fceb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -3,11 +3,10 @@
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_net.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
-#include <linux/property.h>
#include "stmmac.h"
#include "stmmac_platform.h"
@@ -104,7 +103,7 @@ struct qcom_ethqos {
struct clk *link_clk;
struct phy *serdes_phy;
unsigned int speed;
- int phy_mode;
+ phy_interface_t phy_mode;
const struct ethqos_emac_por *por;
unsigned int num_por;
@@ -631,7 +630,7 @@ static int ethqos_configure(struct qcom_ethqos *ethqos)
return ethqos->configure_func(ethqos);
}
-static void ethqos_fix_mac_speed(void *priv, unsigned int speed)
+static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct qcom_ethqos *ethqos = priv;
@@ -694,6 +693,23 @@ static void ethqos_clks_disable(void *data)
ethqos_clks_config(data, false);
}
+static void ethqos_ptp_clk_freq_config(struct stmmac_priv *priv)
+{
+ struct plat_stmmacenet_data *plat_dat = priv->plat;
+ int err;
+
+ if (!plat_dat->clk_ptp_ref)
+ return;
+
+ /* Max the PTP ref clock out to get the best resolution possible */
+ err = clk_set_rate(plat_dat->clk_ptp_ref, ULONG_MAX);
+ if (err)
+ netdev_err(priv->dev, "Failed to max out clk_ptp_ref: %d\n", err);
+ plat_dat->clk_ptp_rate = clk_get_rate(plat_dat->clk_ptp_ref);
+
+ netdev_dbg(priv->dev, "PTP rate %d\n", plat_dat->clk_ptp_rate);
+}
+
static int qcom_ethqos_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -706,12 +722,13 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret,
+ "Failed to get platform resources\n");
plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat)) {
- dev_err(dev, "dt configuration failed\n");
- return PTR_ERR(plat_dat);
+ return dev_err_probe(dev, PTR_ERR(plat_dat),
+ "dt configuration failed\n");
}
plat_dat->clks_config = ethqos_clks_config;
@@ -720,7 +737,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
if (!ethqos)
return -ENOMEM;
- ethqos->phy_mode = device_get_phy_mode(dev);
+ ret = of_get_phy_mode(np, &ethqos->phy_mode);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get phy mode\n");
switch (ethqos->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -731,16 +750,17 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
case PHY_INTERFACE_MODE_SGMII:
ethqos->configure_func = ethqos_configure_sgmii;
break;
- case -ENODEV:
- return -ENODEV;
default:
+ dev_err(dev, "Unsupported phy mode %s\n",
+ phy_modes(ethqos->phy_mode));
return -EINVAL;
}
ethqos->pdev = pdev;
ethqos->rgmii_base = devm_platform_ioremap_resource_byname(pdev, "rgmii");
if (IS_ERR(ethqos->rgmii_base))
- return PTR_ERR(ethqos->rgmii_base);
+ return dev_err_probe(dev, PTR_ERR(ethqos->rgmii_base),
+ "Failed to map rgmii resource\n");
ethqos->mac_base = stmmac_res.addr;
@@ -752,7 +772,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
ethqos->link_clk = devm_clk_get(dev, data->link_clk_name ?: "rgmii");
if (IS_ERR(ethqos->link_clk))
- return PTR_ERR(ethqos->link_clk);
+ return dev_err_probe(dev, PTR_ERR(ethqos->link_clk),
+ "Failed to get link_clk\n");
ret = ethqos_clks_config(ethqos, true);
if (ret)
@@ -764,7 +785,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
ethqos->serdes_phy = devm_phy_optional_get(dev, "serdes");
if (IS_ERR(ethqos->serdes_phy))
- return PTR_ERR(ethqos->serdes_phy);
+ return dev_err_probe(dev, PTR_ERR(ethqos->serdes_phy),
+ "Failed to get serdes phy\n");
ethqos->speed = SPEED_1000;
ethqos_update_link_clk(ethqos, SPEED_1000);
@@ -773,14 +795,17 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->bsp_priv = ethqos;
plat_dat->fix_mac_speed = ethqos_fix_mac_speed;
plat_dat->dump_debug_regs = rgmii_dump;
+ plat_dat->ptp_clk_freq_config = ethqos_ptp_clk_freq_config;
plat_dat->has_gmac4 = 1;
if (ethqos->has_emac_ge_3)
plat_dat->dwmac4_addrs = &data->dwmac4_addrs;
plat_dat->pmt = 1;
- plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
+ if (of_property_read_bool(np, "snps,tso"))
+ plat_dat->flags |= STMMAC_FLAG_TSO_EN;
if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
- plat_dat->rx_clk_runs_in_lpi = 1;
- plat_dat->has_integrated_pcs = data->has_integrated_pcs;
+ plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI;
+ if (data->has_integrated_pcs)
+ plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS;
if (ethqos->serdes_phy) {
plat_dat->serdes_powerup = qcom_ethqos_serdes_powerup;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index d81591b470a2..d920a50dd16c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -14,8 +14,8 @@
#include <linux/of_net.h>
#include <linux/gpio.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/delay.h>
@@ -1785,7 +1785,7 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac)
gmac_clk_enable(gmac, false);
}
-static void rk_fix_speed(void *priv, unsigned int speed)
+static void rk_fix_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct rk_priv_data *bsp_priv = priv;
struct device *dev = &bsp_priv->pdev->dev;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 6267bcb60206..9bf102bbc6a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -61,7 +61,7 @@ struct socfpga_dwmac {
struct mdio_device *pcs_mdiodev;
};
-static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
+static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
void __iomem *splitter_base = dwmac->splitter_base;
@@ -236,7 +236,7 @@ static int socfpga_get_plat_phymode(struct socfpga_dwmac *dwmac)
struct net_device *ndev = dev_get_drvdata(dwmac->dev);
struct stmmac_priv *priv = netdev_priv(ndev);
- return priv->plat->interface;
+ return priv->plat->mac_interface;
}
static void socfpga_sgmii_config(struct socfpga_dwmac *dwmac, bool enable)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index d3a39d2fb3a9..9289bb87c3e3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -7,8 +7,10 @@
*
*/
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include "stmmac_platform.h"
@@ -22,7 +24,7 @@ struct starfive_dwmac {
struct clk *clk_tx;
};
-static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed)
+static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct starfive_dwmac *dwmac = priv;
unsigned long rate;
@@ -58,7 +60,7 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
unsigned int mode;
int err;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_RMII:
mode = STARFIVE_DWMAC_PHY_INFT_RMII;
break;
@@ -70,7 +72,7 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
default:
dev_err(dwmac->dev, "unsupported interface %d\n",
- plat_dat->interface);
+ plat_dat->mac_interface);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index dcbb17c4f07a..0d653bbb931b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -17,7 +17,6 @@
#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_net.h>
#include "stmmac_platform.h"
@@ -104,11 +103,11 @@ struct sti_dwmac {
struct regmap *regmap;
bool gmac_en;
u32 speed;
- void (*fix_retime_src)(void *priv, unsigned int speed);
+ void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode);
};
struct sti_dwmac_of_data {
- void (*fix_retime_src)(void *priv, unsigned int speed);
+ void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode);
};
static u32 phy_intf_sels[] = {
@@ -136,7 +135,7 @@ static u32 stih4xx_tx_retime_val[] = {
| STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
};
-static void stih4xx_fix_retime_src(void *priv, u32 spd)
+static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode)
{
struct sti_dwmac *dwmac = priv;
u32 src = dwmac->tx_retime_src;
@@ -188,7 +187,7 @@ static int sti_dwmac_set_mode(struct sti_dwmac *dwmac)
val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
regmap_update_bits(regmap, reg, ENMII_MASK, val);
- dwmac->fix_retime_src(dwmac, dwmac->speed);
+ dwmac->fix_retime_src(dwmac, dwmac->speed, 0);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index bdb4de59a672..26ea8c687881 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -11,7 +11,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
@@ -172,7 +171,7 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
clk_rate = clk_get_rate(dwmac->clk_eth_ck);
dwmac->enable_eth_ck = false;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
if (clk_rate == ETH_CK_F_25M && dwmac->ext_phyclk)
dwmac->enable_eth_ck = true;
@@ -211,7 +210,7 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
pr_debug("SYSCFG init : Do not manage %d interface\n",
- plat_dat->interface);
+ plat_dat->mac_interface);
/* Do not manage others interfaces */
return -EINVAL;
}
@@ -231,7 +230,7 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
u32 reg = dwmac->mode_reg;
int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = SYSCFG_MCU_ETH_SEL_MII;
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n");
@@ -242,7 +241,7 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
pr_debug("SYSCFG init : Do not manage %d interface\n",
- plat_dat->interface);
+ plat_dat->mac_interface);
/* Do not manage others interfaces */
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 1e714380d125..01e77368eef1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -11,9 +11,10 @@
#include <linux/mdio-mux.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include <linux/of_platform.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -440,8 +441,10 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- u32 v;
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
int ret = 0;
+ u32 v;
v = readl(ioaddr + EMAC_INT_STA);
@@ -452,7 +455,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_TX_INT) {
ret |= handle_tx;
- x->tx_normal_irq_n++;
+ u64_stats_update_begin(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_normal_irq_n++;
+ u64_stats_update_end(&tx_q->txq_stats.syncp);
}
if (v & EMAC_TX_DMA_STOP_INT)
@@ -474,7 +479,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_RX_INT) {
ret |= handle_rx;
- x->rx_normal_irq_n++;
+ u64_stats_update_begin(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.rx_normal_irq_n++;
+ u64_stats_update_end(&rx_q->rxq_stats.syncp);
}
if (v & EMAC_RX_BUF_UA_INT)
@@ -1009,7 +1016,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
if (gmac->variant->support_rmii)
reg &= ~SYSCON_RMII_EN;
- switch (plat->interface) {
+ switch (plat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
/* default */
break;
@@ -1024,7 +1031,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
break;
default:
dev_err(dev, "Unsupported interface mode: %s",
- phy_modes(plat->interface));
+ phy_modes(plat->mac_interface));
return -EINVAL;
}
@@ -1224,10 +1231,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
*/
- plat_dat->interface = interface;
+ plat_dat->mac_interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1;
- plat_dat->has_sun8i = true;
+ plat_dat->flags |= STMMAC_FLAG_HAS_SUN8I;
plat_dat->bsp_priv = gmac;
plat_dat->init = sun8i_dwmac_init;
plat_dat->exit = sun8i_dwmac_exit;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 50963e91c347..beceeae579bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -72,7 +72,7 @@ static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
regulator_disable(gmac->regulator);
}
-static void sun7i_fix_speed(void *priv, unsigned int speed)
+static void sun7i_fix_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct sunxi_priv_data *gmac = priv;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
index fbb0ccf84afc..e0f3cbd36852 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/platform_device.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/module.h>
#include <linux/stmmac.h>
#include <linux/clk.h>
@@ -291,7 +291,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
}
plat->has_xgmac = 1;
- plat->tso_en = 1;
+ plat->flags |= STMMAC_FLAG_TSO_EN;
plat->pmt = 1;
plat->bsp_priv = mgbe;
@@ -338,7 +338,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
/* Program SID */
writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
- plat->serdes_up_after_phy_linkup = 1;
+ plat->flags |= STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP;
err = stmmac_dvr_probe(&pdev->dev, plat, &res);
if (err < 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index acbb284be174..22d113fb8e09 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -6,7 +6,8 @@
*/
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/of_net.h>
#include <linux/stmmac.h>
@@ -53,7 +54,7 @@ struct visconti_eth {
spinlock_t lock; /* lock to protect register update */
};
-static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
+static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct visconti_eth *dwmac = priv;
struct net_device *netdev = dev_get_drvdata(dwmac->dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 1c32b1788f02..dea270f60cc3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -82,29 +82,24 @@ static void dwmac100_dump_dma_regs(struct stmmac_priv *priv,
}
/* DMA controller has two counters to track the number of the missed frames. */
-static void dwmac100_dma_diagnostic_fr(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static void dwmac100_dma_diagnostic_fr(struct stmmac_extra_stats *x,
void __iomem *ioaddr)
{
u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
if (unlikely(csr8)) {
if (csr8 & DMA_MISSED_FRAME_OVE) {
- stats->rx_over_errors += 0x800;
x->rx_overflow_cntr += 0x800;
} else {
unsigned int ove_cntr;
ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
- stats->rx_over_errors += ove_cntr;
x->rx_overflow_cntr += ove_cntr;
}
if (csr8 & DMA_MISSED_FRAME_OVE_M) {
- stats->rx_missed_errors += 0xffff;
x->rx_missed_cntr += 0xffff;
} else {
unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
- stats->rx_missed_errors += miss_f;
x->rx_missed_cntr += miss_f;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 03b1c5a97826..c6ff1fa0e04d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -68,6 +68,11 @@ static void dwmac4_core_init(struct mac_device_info *hw,
init_waitqueue_head(&priv->tstamp_busy_wait);
}
+static void dwmac4_phylink_get_caps(struct stmmac_priv *priv)
+{
+ priv->phylink_config.mac_capabilities |= MAC_2500FD;
+}
+
static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
u8 mode, u32 queue)
{
@@ -1131,6 +1136,7 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
+ .phylink_get_caps = dwmac4_phylink_get_caps,
.set_mac = stmmac_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
.rx_queue_enable = dwmac4_rx_queue_enable,
@@ -1173,6 +1179,7 @@ const struct stmmac_ops dwmac4_ops = {
const struct stmmac_ops dwmac410_ops = {
.core_init = dwmac4_core_init,
+ .phylink_get_caps = dwmac4_phylink_get_caps,
.set_mac = stmmac_dwmac4_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
.rx_queue_enable = dwmac4_rx_queue_enable,
@@ -1221,6 +1228,7 @@ const struct stmmac_ops dwmac410_ops = {
const struct stmmac_ops dwmac510_ops = {
.core_init = dwmac4_core_init,
+ .phylink_get_caps = dwmac4_phylink_get_caps,
.set_mac = stmmac_dwmac4_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
.rx_queue_enable = dwmac4_rx_queue_enable,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 6a011d8633e8..89a14084c611 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -13,8 +13,7 @@
#include "dwmac4.h"
#include "dwmac4_descs.h"
-static int dwmac4_wrback_get_tx_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static int dwmac4_wrback_get_tx_status(struct stmmac_extra_stats *x,
struct dma_desc *p,
void __iomem *ioaddr)
{
@@ -40,15 +39,13 @@ static int dwmac4_wrback_get_tx_status(struct net_device_stats *stats,
x->tx_frame_flushed++;
if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) {
x->tx_losscarrier++;
- stats->tx_carrier_errors++;
}
if (unlikely(tdes3 & TDES3_NO_CARRIER)) {
x->tx_carrier++;
- stats->tx_carrier_errors++;
}
if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
(tdes3 & TDES3_EXCESSIVE_COLLISION)))
- stats->collisions +=
+ x->tx_collision +=
(tdes3 & TDES3_COLLISION_COUNT_MASK)
>> TDES3_COLLISION_COUNT_SHIFT;
@@ -73,8 +70,7 @@ static int dwmac4_wrback_get_tx_status(struct net_device_stats *stats,
return ret;
}
-static int dwmac4_wrback_get_rx_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x,
struct dma_desc *p)
{
unsigned int rdes1 = le32_to_cpu(p->des1);
@@ -93,7 +89,7 @@ static int dwmac4_wrback_get_rx_status(struct net_device_stats *stats,
if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
if (unlikely(rdes3 & RDES3_GIANT_PACKET))
- stats->rx_length_errors++;
+ x->rx_length++;
if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR))
x->rx_gmac_overflow++;
@@ -103,10 +99,8 @@ static int dwmac4_wrback_get_rx_status(struct net_device_stats *stats,
if (unlikely(rdes3 & RDES3_RECEIVE_ERROR))
x->rx_mii++;
- if (unlikely(rdes3 & RDES3_CRC_ERROR)) {
+ if (unlikely(rdes3 & RDES3_CRC_ERROR))
x->rx_crc_errors++;
- stats->rx_crc_errors++;
- }
if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR))
x->dribbling_bit++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 03ceb6a94073..980e5f8a37ec 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -171,6 +171,8 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan));
u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
int ret = 0;
if (dir == DMA_DIR_RX)
@@ -198,18 +200,19 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
}
}
/* TX/RX NORMAL interrupts */
- if (likely(intr_status & DMA_CHAN_STATUS_NIS))
- x->normal_irq_n++;
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
- x->rx_normal_irq_n++;
- x->rxq_stats[chan].rx_normal_irq_n++;
+ u64_stats_update_begin(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.rx_normal_irq_n++;
+ u64_stats_update_end(&rx_q->rxq_stats.syncp);
ret |= handle_rx;
}
if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
- x->tx_normal_irq_n++;
- x->txq_stats[chan].tx_normal_irq_n++;
+ u64_stats_update_begin(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_normal_irq_n++;
+ u64_stats_update_end(&tx_q->txq_stats.syncp);
ret |= handle_tx;
}
+
if (unlikely(intr_status & DMA_CHAN_STATUS_TBU))
ret |= handle_tx;
if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 0b6f999a8305..aaa09b16b016 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -10,6 +10,7 @@
#include <linux/iopoll.h>
#include "common.h"
#include "dwmac_dma.h"
+#include "stmmac.h"
#define GMAC_HI_REG_AE 0x80000000
@@ -161,6 +162,8 @@ static void show_rx_process_state(unsigned int status)
int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan, u32 dir)
{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
int ret = 0;
/* read the status register (CSR5) */
u32 intr_status = readl(ioaddr + DMA_STATUS);
@@ -208,17 +211,20 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
}
/* TX/RX NORMAL interrupts */
if (likely(intr_status & DMA_STATUS_NIS)) {
- x->normal_irq_n++;
if (likely(intr_status & DMA_STATUS_RI)) {
u32 value = readl(ioaddr + DMA_INTR_ENA);
/* to schedule NAPI on real RIE event. */
if (likely(value & DMA_INTR_ENA_RIE)) {
- x->rx_normal_irq_n++;
+ u64_stats_update_begin(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.rx_normal_irq_n++;
+ u64_stats_update_end(&rx_q->rxq_stats.syncp);
ret |= handle_rx;
}
}
if (likely(intr_status & DMA_STATUS_TI)) {
- x->tx_normal_irq_n++;
+ u64_stats_update_begin(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_normal_irq_n++;
+ u64_stats_update_end(&tx_q->txq_stats.syncp);
ret |= handle_tx;
}
if (unlikely(intr_status & DMA_STATUS_ERI))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 1913385df685..7a8f47e7b728 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -74,8 +74,20 @@
#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
#define XGMAC_RXQ_CTRL1 0x000000a4
+#define XGMAC_AVCPQ GENMASK(31, 28)
+#define XGMAC_AVCPQ_SHIFT 28
+#define XGMAC_PTPQ GENMASK(27, 24)
+#define XGMAC_PTPQ_SHIFT 24
+#define XGMAC_TACPQE BIT(23)
+#define XGMAC_DCBCPQ GENMASK(19, 16)
+#define XGMAC_DCBCPQ_SHIFT 16
+#define XGMAC_MCBCQEN BIT(15)
+#define XGMAC_MCBCQ GENMASK(11, 8)
+#define XGMAC_MCBCQ_SHIFT 8
#define XGMAC_RQ GENMASK(7, 4)
#define XGMAC_RQ_SHIFT 4
+#define XGMAC_UPQ GENMASK(3, 0)
+#define XGMAC_UPQ_SHIFT 0
#define XGMAC_RXQ_CTRL2 0x000000a8
#define XGMAC_RXQ_CTRL3 0x000000ac
#define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8)
@@ -110,7 +122,12 @@
#define XGMAC_TLPIEN BIT(0)
#define XGMAC_LPI_TIMER_CTRL 0x000000d4
#define XGMAC_HW_FEATURE0 0x0000011c
+#define XGMAC_HWFEAT_EDMA BIT(31)
+#define XGMAC_HWFEAT_EDIFFC BIT(30)
+#define XGMAC_HWFEAT_VXN BIT(29)
#define XGMAC_HWFEAT_SAVLANINS BIT(27)
+#define XGMAC_HWFEAT_TSSTSSEL GENMASK(26, 25)
+#define XGMAC_HWFEAT_ADDMACADRSEL GENMASK(22, 18)
#define XGMAC_HWFEAT_RXCOESEL BIT(16)
#define XGMAC_HWFEAT_TXCOESEL BIT(14)
#define XGMAC_HWFEAT_EEESEL BIT(13)
@@ -121,34 +138,54 @@
#define XGMAC_HWFEAT_MMCSEL BIT(8)
#define XGMAC_HWFEAT_MGKSEL BIT(7)
#define XGMAC_HWFEAT_RWKSEL BIT(6)
+#define XGMAC_HWFEAT_SMASEL BIT(5)
#define XGMAC_HWFEAT_VLHASH BIT(4)
+#define XGMAC_HWFEAT_HDSEL BIT(3)
#define XGMAC_HWFEAT_GMIISEL BIT(1)
#define XGMAC_HW_FEATURE1 0x00000120
#define XGMAC_HWFEAT_L3L4FNUM GENMASK(30, 27)
#define XGMAC_HWFEAT_HASHTBLSZ GENMASK(25, 24)
+#define XGMAC_HWFEAT_NUMTC GENMASK(23, 21)
#define XGMAC_HWFEAT_RSSEN BIT(20)
+#define XGMAC_HWFEAT_DBGMEMA BIT(19)
#define XGMAC_HWFEAT_TSOEN BIT(18)
#define XGMAC_HWFEAT_SPHEN BIT(17)
+#define XGMAC_HWFEAT_DCBEN BIT(16)
#define XGMAC_HWFEAT_ADDR64 GENMASK(15, 14)
+#define XGMAC_HWFEAT_ADVTHWORD BIT(13)
+#define XGMAC_HWFEAT_PTOEN BIT(12)
+#define XGMAC_HWFEAT_OSTEN BIT(11)
#define XGMAC_HWFEAT_TXFIFOSIZE GENMASK(10, 6)
+#define XGMAC_HWFEAT_PFCEN BIT(5)
#define XGMAC_HWFEAT_RXFIFOSIZE GENMASK(4, 0)
#define XGMAC_HW_FEATURE2 0x00000124
+#define XGMAC_HWFEAT_AUXSNAPNUM GENMASK(30, 28)
#define XGMAC_HWFEAT_PPSOUTNUM GENMASK(26, 24)
#define XGMAC_HWFEAT_TXCHCNT GENMASK(21, 18)
#define XGMAC_HWFEAT_RXCHCNT GENMASK(15, 12)
#define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6)
#define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0)
#define XGMAC_HW_FEATURE3 0x00000128
+#define XGMAC_HWFEAT_TBSCH GENMASK(31, 28)
#define XGMAC_HWFEAT_TBSSEL BIT(27)
#define XGMAC_HWFEAT_FPESEL BIT(26)
+#define XGMAC_HWFEAT_SGFSEL BIT(25)
#define XGMAC_HWFEAT_ESTWID GENMASK(24, 23)
#define XGMAC_HWFEAT_ESTDEP GENMASK(22, 20)
#define XGMAC_HWFEAT_ESTSEL BIT(19)
+#define XGMAC_HWFEAT_TTSFD GENMASK(18, 16)
#define XGMAC_HWFEAT_ASP GENMASK(15, 14)
#define XGMAC_HWFEAT_DVLAN BIT(13)
#define XGMAC_HWFEAT_FRPES GENMASK(12, 11)
#define XGMAC_HWFEAT_FRPPB GENMASK(10, 9)
+#define XGMAC_HWFEAT_POUOST BIT(8)
+#define XGMAC_HWFEAT_FRPPIPE GENMASK(7, 5)
+#define XGMAC_HWFEAT_CBTISEL BIT(4)
#define XGMAC_HWFEAT_FRPSEL BIT(3)
+#define XGMAC_HWFEAT_NRVF GENMASK(2, 0)
+#define XGMAC_HW_FEATURE4 0x0000012c
+#define XGMAC_HWFEAT_EASP BIT(4)
+#define XGMAC_HWFEAT_PCSEL GENMASK(1, 0)
#define XGMAC_MAC_DPP_FSM_INT_STATUS 0x00000150
#define XGMAC_MAC_FSM_CONTROL 0x00000158
#define XGMAC_PRTYEN BIT(1)
@@ -165,7 +202,7 @@
#define XGMAC_DCS_SHIFT 16
#define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8)
#define XGMAC_L3L4_ADDR_CTRL 0x00000c00
-#define XGMAC_IDDR GENMASK(15, 8)
+#define XGMAC_IDDR GENMASK(16, 8)
#define XGMAC_IDDR_SHIFT 8
#define XGMAC_IDDR_FNUM 4
#define XGMAC_TT BIT(1)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index a0c2ef8bb0ac..f352be269deb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -47,6 +47,14 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
}
+static void xgmac_phylink_get_caps(struct stmmac_priv *priv)
+{
+ priv->phylink_config.mac_capabilities |= MAC_2500FD | MAC_5000FD |
+ MAC_10000FD | MAC_25000FD |
+ MAC_40000FD | MAC_50000FD |
+ MAC_100000FD;
+}
+
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
{
u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
@@ -127,6 +135,36 @@ static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
writel(value, ioaddr + reg);
}
+static void dwxgmac2_rx_queue_routing(struct mac_device_info *hw,
+ u8 packet, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ static const struct stmmac_rx_routing dwxgmac2_route_possibilities[] = {
+ { XGMAC_AVCPQ, XGMAC_AVCPQ_SHIFT },
+ { XGMAC_PTPQ, XGMAC_PTPQ_SHIFT },
+ { XGMAC_DCBCPQ, XGMAC_DCBCPQ_SHIFT },
+ { XGMAC_UPQ, XGMAC_UPQ_SHIFT },
+ { XGMAC_MCBCQ, XGMAC_MCBCQ_SHIFT },
+ };
+
+ value = readl(ioaddr + XGMAC_RXQ_CTRL1);
+
+ /* routing configuration */
+ value &= ~dwxgmac2_route_possibilities[packet - 1].reg_mask;
+ value |= (queue << dwxgmac2_route_possibilities[packet - 1].reg_shift) &
+ dwxgmac2_route_possibilities[packet - 1].reg_mask;
+
+ /* some packets require extra ops */
+ if (packet == PACKET_AVCPQ)
+ value |= FIELD_PREP(XGMAC_TACPQE, 1);
+ else if (packet == PACKET_MCBCQ)
+ value |= FIELD_PREP(XGMAC_MCBCQEN, 1);
+
+ writel(value, ioaddr + XGMAC_RXQ_CTRL1);
+}
+
static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
u32 rx_alg)
{
@@ -831,8 +869,10 @@ dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
- /* Only ECC Protection for External Memory feature is selected */
- if (asp <= 0x1)
+ /* 0x2: Without ECC or Parity Ports on External Application Interface
+ * 0x4: Only ECC Protection for External Memory feature is selected
+ */
+ if (asp == 0x2 || asp == 0x4)
return 0;
/* 4. Enable Parity and Timeout for FSM */
@@ -1458,12 +1498,13 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq,
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
+ .phylink_get_caps = xgmac_phylink_get_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable,
.rx_queue_prio = dwxgmac2_rx_queue_prio,
.tx_queue_prio = dwxgmac2_tx_queue_prio,
- .rx_queue_routing = NULL,
+ .rx_queue_routing = dwxgmac2_rx_queue_routing,
.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
@@ -1519,12 +1560,13 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
const struct stmmac_ops dwxlgmac2_ops = {
.core_init = dwxgmac2_core_init,
+ .phylink_get_caps = xgmac_phylink_get_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxlgmac2_rx_queue_enable,
.rx_queue_prio = dwxgmac2_rx_queue_prio,
.tx_queue_prio = dwxgmac2_tx_queue_prio,
- .rx_queue_routing = NULL,
+ .rx_queue_routing = dwxgmac2_rx_queue_routing,
.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index 13c347ee8be9..fc82862a612c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -8,8 +8,7 @@
#include "common.h"
#include "dwxgmac2.h"
-static int dwxgmac2_get_tx_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static int dwxgmac2_get_tx_status(struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
unsigned int tdes3 = le32_to_cpu(p->des3);
@@ -23,8 +22,7 @@ static int dwxgmac2_get_tx_status(struct net_device_stats *stats,
return ret;
}
-static int dwxgmac2_get_rx_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static int dwxgmac2_get_rx_status(struct stmmac_extra_stats *x,
struct dma_desc *p)
{
unsigned int rdes3 = le32_to_cpu(p->des3);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 070bd912580b..fa69d64a8694 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -337,6 +337,8 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
int ret = 0;
@@ -364,16 +366,16 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
/* TX/RX NORMAL interrupts */
if (likely(intr_status & XGMAC_NIS)) {
- x->normal_irq_n++;
-
if (likely(intr_status & XGMAC_RI)) {
- x->rx_normal_irq_n++;
- x->rxq_stats[chan].rx_normal_irq_n++;
+ u64_stats_update_begin(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.rx_normal_irq_n++;
+ u64_stats_update_end(&rx_q->rxq_stats.syncp);
ret |= handle_rx;
}
if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
- x->tx_normal_irq_n++;
- x->txq_stats[chan].tx_normal_irq_n++;
+ u64_stats_update_begin(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_normal_irq_n++;
+ u64_stats_update_end(&tx_q->txq_stats.syncp);
ret |= handle_tx;
}
}
@@ -389,9 +391,14 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
{
u32 hw_cap;
- /* MAC HW feature 0 */
+ /* MAC HW feature 0 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
+ dma_cap->edma = (hw_cap & XGMAC_HWFEAT_EDMA) >> 31;
+ dma_cap->ediffc = (hw_cap & XGMAC_HWFEAT_EDIFFC) >> 30;
+ dma_cap->vxn = (hw_cap & XGMAC_HWFEAT_VXN) >> 29;
dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27;
+ dma_cap->tssrc = (hw_cap & XGMAC_HWFEAT_TSSTSSEL) >> 25;
+ dma_cap->multi_addr = (hw_cap & XGMAC_HWFEAT_ADDMACADRSEL) >> 18;
dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13;
@@ -402,16 +409,31 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
+ dma_cap->sma_mdio = (hw_cap & XGMAC_HWFEAT_SMASEL) >> 5;
dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
+ dma_cap->half_duplex = (hw_cap & XGMAC_HWFEAT_HDSEL) >> 3;
dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
/* MAC HW feature 1 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27;
+ /* If L3L4FNUM < 8, then the number of L3L4 filters supported by
+ * XGMAC is equal to L3L4FNUM. From L3L4FNUM >= 8 the number of
+ * L3L4 filters goes on like 8, 16, 32, ... Current maximum of
+ * L3L4FNUM = 10.
+ */
+ if (dma_cap->l3l4fnum >= 8 && dma_cap->l3l4fnum <= 10)
+ dma_cap->l3l4fnum = 8 << (dma_cap->l3l4fnum - 8);
+ else if (dma_cap->l3l4fnum > 10)
+ dma_cap->l3l4fnum = 32;
+
dma_cap->hash_tb_sz = (hw_cap & XGMAC_HWFEAT_HASHTBLSZ) >> 24;
+ dma_cap->numtc = ((hw_cap & XGMAC_HWFEAT_NUMTC) >> 21) + 1;
dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
+ dma_cap->dbgmem = (hw_cap & XGMAC_HWFEAT_DBGMEMA) >> 19;
dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17;
+ dma_cap->dcben = (hw_cap & XGMAC_HWFEAT_DCBEN) >> 16;
dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14;
switch (dma_cap->addr64) {
@@ -429,13 +451,18 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
break;
}
+ dma_cap->advthword = (hw_cap & XGMAC_HWFEAT_ADVTHWORD) >> 13;
+ dma_cap->ptoen = (hw_cap & XGMAC_HWFEAT_PTOEN) >> 12;
+ dma_cap->osten = (hw_cap & XGMAC_HWFEAT_OSTEN) >> 11;
dma_cap->tx_fifo_size =
128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6);
+ dma_cap->pfcen = (hw_cap & XGMAC_HWFEAT_PFCEN) >> 5;
dma_cap->rx_fifo_size =
128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0);
/* MAC HW feature 2 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2);
+ dma_cap->aux_snapshot_n = (hw_cap & XGMAC_HWFEAT_AUXSNAPNUM) >> 28;
dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24;
dma_cap->number_tx_channel =
((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1;
@@ -448,16 +475,28 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature 3 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3);
+ dma_cap->tbs_ch_num = ((hw_cap & XGMAC_HWFEAT_TBSCH) >> 28) + 1;
dma_cap->tbssel = (hw_cap & XGMAC_HWFEAT_TBSSEL) >> 27;
dma_cap->fpesel = (hw_cap & XGMAC_HWFEAT_FPESEL) >> 26;
+ dma_cap->sgfsel = (hw_cap & XGMAC_HWFEAT_SGFSEL) >> 25;
dma_cap->estwid = (hw_cap & XGMAC_HWFEAT_ESTWID) >> 23;
dma_cap->estdep = (hw_cap & XGMAC_HWFEAT_ESTDEP) >> 20;
dma_cap->estsel = (hw_cap & XGMAC_HWFEAT_ESTSEL) >> 19;
+ dma_cap->ttsfd = (hw_cap & XGMAC_HWFEAT_TTSFD) >> 16;
dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14;
dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13;
dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
+ dma_cap->pou_ost_en = (hw_cap & XGMAC_HWFEAT_POUOST) >> 8;
+ dma_cap->frppipe_num = ((hw_cap & XGMAC_HWFEAT_FRPPIPE) >> 5) + 1;
+ dma_cap->cbtisel = (hw_cap & XGMAC_HWFEAT_CBTISEL) >> 4;
dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
+ dma_cap->nrvf_num = (hw_cap & XGMAC_HWFEAT_NRVF) >> 0;
+
+ /* MAC HW feature 4 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE4);
+ dma_cap->asp |= (hw_cap & XGMAC_HWFEAT_EASP) >> 2;
+ dma_cap->pcsel = (hw_cap & XGMAC_HWFEAT_PCSEL) >> 0;
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index a91d8f13a931..937b7a0466fc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -12,8 +12,7 @@
#include "common.h"
#include "descs_com.h"
-static int enh_desc_get_tx_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static int enh_desc_get_tx_status(struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
unsigned int tdes0 = le32_to_cpu(p->des0);
@@ -38,15 +37,13 @@ static int enh_desc_get_tx_status(struct net_device_stats *stats,
if (unlikely(tdes0 & ETDES0_LOSS_CARRIER)) {
x->tx_losscarrier++;
- stats->tx_carrier_errors++;
}
if (unlikely(tdes0 & ETDES0_NO_CARRIER)) {
x->tx_carrier++;
- stats->tx_carrier_errors++;
}
if (unlikely((tdes0 & ETDES0_LATE_COLLISION) ||
(tdes0 & ETDES0_EXCESSIVE_COLLISIONS)))
- stats->collisions +=
+ x->tx_collision +=
(tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3;
if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL))
@@ -117,8 +114,7 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
return ret;
}
-static void enh_desc_get_ext_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static void enh_desc_get_ext_status(struct stmmac_extra_stats *x,
struct dma_extended_desc *p)
{
unsigned int rdes0 = le32_to_cpu(p->basic.des0);
@@ -182,8 +178,7 @@ static void enh_desc_get_ext_status(struct net_device_stats *stats,
}
}
-static int enh_desc_get_rx_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static int enh_desc_get_rx_status(struct stmmac_extra_stats *x,
struct dma_desc *p)
{
unsigned int rdes0 = le32_to_cpu(p->des0);
@@ -193,14 +188,14 @@ static int enh_desc_get_rx_status(struct net_device_stats *stats,
return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
- stats->rx_length_errors++;
+ x->rx_length++;
return discard_frame;
}
if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
x->rx_desc++;
- stats->rx_length_errors++;
+ x->rx_length++;
}
if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
x->rx_gmac_overflow++;
@@ -209,7 +204,7 @@ static int enh_desc_get_rx_status(struct net_device_stats *stats,
pr_err("\tIPC Csum Error/Giant frame\n");
if (unlikely(rdes0 & RDES0_COLLISION))
- stats->collisions++;
+ x->rx_collision++;
if (unlikely(rdes0 & RDES0_RECEIVE_WATCHDOG))
x->rx_watchdog++;
@@ -218,7 +213,6 @@ static int enh_desc_get_rx_status(struct net_device_stats *stats,
if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
x->rx_crc_errors++;
- stats->rx_crc_errors++;
}
ret = discard_frame;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 6ee7cf07cfd7..b95d3e137813 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -57,8 +57,7 @@ struct stmmac_desc_ops {
/* Last tx segment reports the transmit status */
int (*get_tx_ls)(struct dma_desc *p);
/* Return the transmit status looking at the TDES1 */
- int (*tx_status)(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+ int (*tx_status)(struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr);
/* Get the buffer size from the descriptor */
int (*get_tx_len)(struct dma_desc *p);
@@ -67,11 +66,9 @@ struct stmmac_desc_ops {
/* Get the receive frame size */
int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type);
/* Return the reception status looking at the RDES1 */
- int (*rx_status)(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+ int (*rx_status)(struct stmmac_extra_stats *x,
struct dma_desc *p);
- void (*rx_extended_status)(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+ void (*rx_extended_status)(struct stmmac_extra_stats *x,
struct dma_extended_desc *p);
/* Set tx timestamp enable bit */
void (*enable_tx_timestamp) (struct dma_desc *p);
@@ -191,8 +188,7 @@ struct stmmac_dma_ops {
void (*dma_tx_mode)(struct stmmac_priv *priv, void __iomem *ioaddr,
int mode, u32 channel, int fifosz, u8 qmode);
/* To track extra statistic (if supported) */
- void (*dma_diagnostic_fr)(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+ void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x,
void __iomem *ioaddr);
void (*enable_dma_transmission) (void __iomem *ioaddr);
void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -304,6 +300,8 @@ struct stmmac_est;
struct stmmac_ops {
/* MAC core initialization */
void (*core_init)(struct mac_device_info *hw, struct net_device *dev);
+ /* Get phylink capabilities */
+ void (*phylink_get_caps)(struct stmmac_priv *priv);
/* Enable the MAC RX/TX */
void (*set_mac)(void __iomem *ioaddr, bool enable);
/* Enable and verify that the IPC module is supported */
@@ -423,6 +421,8 @@ struct stmmac_ops {
#define stmmac_core_init(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, core_init, __args)
+#define stmmac_mac_phylink_get_caps(__priv) \
+ stmmac_do_void_callback(__priv, mac, phylink_get_caps, __priv)
#define stmmac_mac_set(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_mac, __args)
#define stmmac_rx_ipc(__priv, __args...) \
@@ -536,6 +536,7 @@ struct stmmac_hwtimestamp {
void (*get_systime) (void __iomem *ioaddr, u64 *systime);
void (*get_ptptime)(void __iomem *ioaddr, u64 *ptp_time);
void (*timestamp_interrupt)(struct stmmac_priv *priv);
+ void (*hwtstamp_correct_latency)(struct stmmac_priv *priv);
};
#define stmmac_config_hw_tstamping(__priv, __args...) \
@@ -554,6 +555,8 @@ struct stmmac_hwtimestamp {
stmmac_do_void_callback(__priv, ptp, get_ptptime, __args)
#define stmmac_timestamp_interrupt(__priv, __args...) \
stmmac_do_void_callback(__priv, ptp, timestamp_interrupt, __args)
+#define stmmac_hwtstamp_correct_latency(__priv, __args...) \
+ stmmac_do_void_callback(__priv, ptp, hwtstamp_correct_latency, __args)
struct stmmac_tx_queue;
struct stmmac_rx_queue;
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 350e6670a576..68a7cfcb1d8f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -12,8 +12,7 @@
#include "common.h"
#include "descs_com.h"
-static int ndesc_get_tx_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static int ndesc_get_tx_status(struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
unsigned int tdes0 = le32_to_cpu(p->des0);
@@ -31,15 +30,12 @@ static int ndesc_get_tx_status(struct net_device_stats *stats,
if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) {
if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) {
x->tx_underflow++;
- stats->tx_fifo_errors++;
}
if (unlikely(tdes0 & TDES0_NO_CARRIER)) {
x->tx_carrier++;
- stats->tx_carrier_errors++;
}
if (unlikely(tdes0 & TDES0_LOSS_CARRIER)) {
x->tx_losscarrier++;
- stats->tx_carrier_errors++;
}
if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) ||
(tdes0 & TDES0_EXCESSIVE_COLLISIONS) ||
@@ -47,7 +43,7 @@ static int ndesc_get_tx_status(struct net_device_stats *stats,
unsigned int collisions;
collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3;
- stats->collisions += collisions;
+ x->tx_collision += collisions;
}
ret = tx_err;
}
@@ -70,8 +66,7 @@ static int ndesc_get_tx_len(struct dma_desc *p)
* and, if required, updates the multicast statistics.
* In case of success, it returns good_frame because the GMAC device
* is supposed to be able to compute the csum in HW. */
-static int ndesc_get_rx_status(struct net_device_stats *stats,
- struct stmmac_extra_stats *x,
+static int ndesc_get_rx_status(struct stmmac_extra_stats *x,
struct dma_desc *p)
{
int ret = good_frame;
@@ -81,7 +76,7 @@ static int ndesc_get_rx_status(struct net_device_stats *stats,
return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
- stats->rx_length_errors++;
+ x->rx_length++;
return discard_frame;
}
@@ -96,11 +91,9 @@ static int ndesc_get_rx_status(struct net_device_stats *stats,
x->ipc_csum_error++;
if (unlikely(rdes0 & RDES0_COLLISION)) {
x->rx_collision++;
- stats->collisions++;
}
if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
x->rx_crc_errors++;
- stats->rx_crc_errors++;
}
ret = discard_frame;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 07ea5ab0a60b..3401e888a9f6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -21,7 +21,8 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/reset.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
+#include <net/xdp.h>
#include <uapi/linux/bpf.h>
struct stmmac_resources {
@@ -77,6 +78,7 @@ struct stmmac_tx_queue {
dma_addr_t dma_tx_phy;
dma_addr_t tx_tail_addr;
u32 mss;
+ struct stmmac_txq_stats txq_stats;
};
struct stmmac_rx_buffer {
@@ -121,6 +123,7 @@ struct stmmac_rx_queue {
unsigned int len;
unsigned int error;
} state;
+ struct stmmac_rxq_stats rxq_stats;
};
struct stmmac_channel {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 2ae73ab842d4..b7ac7abecdd3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -89,14 +89,6 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
/* Tx/Rx IRQ Events */
STMMAC_STAT(rx_early_irq),
STMMAC_STAT(threshold),
- STMMAC_STAT(tx_pkt_n),
- STMMAC_STAT(rx_pkt_n),
- STMMAC_STAT(normal_irq_n),
- STMMAC_STAT(rx_normal_irq_n),
- STMMAC_STAT(napi_poll),
- STMMAC_STAT(tx_normal_irq_n),
- STMMAC_STAT(tx_clean),
- STMMAC_STAT(tx_set_ic_bit),
STMMAC_STAT(irq_receive_pmt_irq_n),
/* MMC info */
STMMAC_STAT(mmc_tx_irq_n),
@@ -163,9 +155,6 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(mtl_rx_fifo_ctrl_active),
STMMAC_STAT(mac_rx_frame_ctrl_fifo),
STMMAC_STAT(mac_gmii_rx_proto_engine),
- /* TSO */
- STMMAC_STAT(tx_tso_frames),
- STMMAC_STAT(tx_tso_nfrags),
/* EST */
STMMAC_STAT(mtl_est_cgce),
STMMAC_STAT(mtl_est_hlbs),
@@ -175,6 +164,23 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
};
#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
+/* statistics collected in queue which will be summed up for all TX or RX
+ * queues, or summed up for both TX and RX queues(napi_poll, normal_irq_n).
+ */
+static const char stmmac_qstats_string[][ETH_GSTRING_LEN] = {
+ "rx_pkt_n",
+ "rx_normal_irq_n",
+ "tx_pkt_n",
+ "tx_normal_irq_n",
+ "tx_clean",
+ "tx_set_ic_bit",
+ "tx_tso_frames",
+ "tx_tso_nfrags",
+ "normal_irq_n",
+ "napi_poll",
+};
+#define STMMAC_QSTATS ARRAY_SIZE(stmmac_qstats_string)
+
/* HW MAC Management counters (if supported) */
#define STMMAC_MMC_STAT(m) \
{ #m, sizeof_field(struct stmmac_counters, m), \
@@ -535,23 +541,44 @@ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 rx_cnt = priv->plat->rx_queues_to_use;
+ unsigned int start;
int q, stat;
+ u64 *pos;
char *p;
+ pos = data;
for (q = 0; q < tx_cnt; q++) {
- p = (char *)priv + offsetof(struct stmmac_priv,
- xstats.txq_stats[q].tx_pkt_n);
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[q];
+ struct stmmac_txq_stats snapshot;
+
+ data = pos;
+ do {
+ start = u64_stats_fetch_begin(&tx_q->txq_stats.syncp);
+ snapshot = tx_q->txq_stats;
+ } while (u64_stats_fetch_retry(&tx_q->txq_stats.syncp, start));
+
+ p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n);
for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
- *data++ = (*(unsigned long *)p);
- p += sizeof(unsigned long);
+ *data++ += (*(u64 *)p);
+ p += sizeof(u64);
}
}
+
+ pos = data;
for (q = 0; q < rx_cnt; q++) {
- p = (char *)priv + offsetof(struct stmmac_priv,
- xstats.rxq_stats[q].rx_pkt_n);
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[q];
+ struct stmmac_rxq_stats snapshot;
+
+ data = pos;
+ do {
+ start = u64_stats_fetch_begin(&rx_q->rxq_stats.syncp);
+ snapshot = rx_q->rxq_stats;
+ } while (u64_stats_fetch_retry(&rx_q->rxq_stats.syncp, start));
+
+ p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n);
for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
- *data++ = (*(unsigned long *)p);
- p += sizeof(unsigned long);
+ *data++ += (*(u64 *)p);
+ p += sizeof(u64);
}
}
}
@@ -562,8 +589,10 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_queues_count = priv->plat->rx_queues_to_use;
u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u64 napi_poll = 0, normal_irq_n = 0;
+ int i, j = 0, pos, ret;
unsigned long count;
- int i, j = 0, ret;
+ unsigned int start;
if (priv->dma_cap.asp) {
for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
@@ -574,8 +603,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
}
/* Update the DMA HW counters for dwmac10/100 */
- ret = stmmac_dma_diagnostic_fr(priv, &dev->stats, (void *) &priv->xstats,
- priv->ioaddr);
+ ret = stmmac_dma_diagnostic_fr(priv, &priv->xstats, priv->ioaddr);
if (ret) {
/* If supported, for new GMAC chips expose the MMC counters */
if (priv->dma_cap.rmon) {
@@ -606,6 +634,48 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
data[j++] = (stmmac_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
}
+
+ pos = j;
+ for (i = 0; i < rx_queues_count; i++) {
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[i];
+ struct stmmac_rxq_stats snapshot;
+
+ j = pos;
+ do {
+ start = u64_stats_fetch_begin(&rx_q->rxq_stats.syncp);
+ snapshot = rx_q->rxq_stats;
+ } while (u64_stats_fetch_retry(&rx_q->rxq_stats.syncp, start));
+
+ data[j++] += snapshot.rx_pkt_n;
+ data[j++] += snapshot.rx_normal_irq_n;
+ normal_irq_n += snapshot.rx_normal_irq_n;
+ napi_poll += snapshot.napi_poll;
+ }
+
+ pos = j;
+ for (i = 0; i < tx_queues_count; i++) {
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[i];
+ struct stmmac_txq_stats snapshot;
+
+ j = pos;
+ do {
+ start = u64_stats_fetch_begin(&tx_q->txq_stats.syncp);
+ snapshot = tx_q->txq_stats;
+ } while (u64_stats_fetch_retry(&tx_q->txq_stats.syncp, start));
+
+ data[j++] += snapshot.tx_pkt_n;
+ data[j++] += snapshot.tx_normal_irq_n;
+ normal_irq_n += snapshot.tx_normal_irq_n;
+ data[j++] += snapshot.tx_clean;
+ data[j++] += snapshot.tx_set_ic_bit;
+ data[j++] += snapshot.tx_tso_frames;
+ data[j++] += snapshot.tx_tso_nfrags;
+ napi_poll += snapshot.napi_poll;
+ }
+ normal_irq_n += priv->xstats.rx_early_irq;
+ data[j++] = normal_irq_n;
+ data[j++] = napi_poll;
+
stmmac_get_per_qstats(priv, &data[j]);
}
@@ -618,7 +688,7 @@ static int stmmac_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
- len = STMMAC_STATS_LEN +
+ len = STMMAC_STATS_LEN + STMMAC_QSTATS +
STMMAC_TXQ_STATS * tx_cnt +
STMMAC_RXQ_STATS * rx_cnt;
@@ -691,8 +761,11 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
for (i = 0; i < STMMAC_STATS_LEN; i++) {
- memcpy(p, stmmac_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ memcpy(p, stmmac_gstrings_stats[i].stat_string, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < STMMAC_QSTATS; i++) {
+ memcpy(p, stmmac_qstats_string[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
stmmac_get_qstats_string(priv, p);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 8b50f03056b7..540f6a4ec0b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -60,6 +60,48 @@ static void config_sub_second_increment(void __iomem *ioaddr,
*ssinc = data;
}
+static void hwtstamp_correct_latency(struct stmmac_priv *priv)
+{
+ void __iomem *ioaddr = priv->ptpaddr;
+ u32 reg_tsic, reg_tsicsns;
+ u32 reg_tsec, reg_tsecsns;
+ u64 scaled_ns;
+ u32 val;
+
+ /* MAC-internal ingress latency */
+ scaled_ns = readl(ioaddr + PTP_TS_INGR_LAT);
+
+ /* See section 11.7.2.5.3.1 "Ingress Correction" on page 4001 of
+ * i.MX8MP Applications Processor Reference Manual Rev. 1, 06/2021
+ */
+ val = readl(ioaddr + PTP_TCR);
+ if (val & PTP_TCR_TSCTRLSSR)
+ /* nanoseconds field is in decimal format with granularity of 1ns/bit */
+ scaled_ns = ((u64)NSEC_PER_SEC << 16) - scaled_ns;
+ else
+ /* nanoseconds field is in binary format with granularity of ~0.466ns/bit */
+ scaled_ns = ((1ULL << 31) << 16) -
+ DIV_U64_ROUND_CLOSEST(scaled_ns * PSEC_PER_NSEC, 466U);
+
+ reg_tsic = scaled_ns >> 16;
+ reg_tsicsns = scaled_ns & 0xff00;
+
+ /* set bit 31 for 2's compliment */
+ reg_tsic |= BIT(31);
+
+ writel(reg_tsic, ioaddr + PTP_TS_INGR_CORR_NS);
+ writel(reg_tsicsns, ioaddr + PTP_TS_INGR_CORR_SNS);
+
+ /* MAC-internal egress latency */
+ scaled_ns = readl(ioaddr + PTP_TS_EGR_LAT);
+
+ reg_tsec = scaled_ns >> 16;
+ reg_tsecsns = scaled_ns & 0xff00;
+
+ writel(reg_tsec, ioaddr + PTP_TS_EGR_CORR_NS);
+ writel(reg_tsecsns, ioaddr + PTP_TS_EGR_CORR_SNS);
+}
+
static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
{
u32 value;
@@ -180,7 +222,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
u64 ptp_time;
int i;
- if (priv->plat->int_snapshot_en) {
+ if (priv->plat->flags & STMMAC_FLAG_INT_SNAPSHOT_EN) {
wake_up(&priv->tstamp_busy_wait);
return;
}
@@ -195,7 +237,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
*/
ts_status = readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS);
- if (!priv->plat->ext_snapshot_en)
+ if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)
return;
num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >>
@@ -221,4 +263,5 @@ const struct stmmac_hwtimestamp stmmac_ptp = {
.get_systime = get_systime,
.get_ptptime = get_ptptime,
.timestamp_interrupt = timestamp_interrupt,
+ .hwtstamp_correct_latency = hwtstamp_correct_latency,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4727f7be4f86..9a3182b9e767 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -39,6 +39,7 @@
#include <linux/phylink.h>
#include <linux/udp.h>
#include <linux/bpf_trace.h>
+#include <net/page_pool/helpers.h>
#include <net/pkt_cls.h>
#include <net/xdp_sock_drv.h>
#include "stmmac_ptp.h"
@@ -325,7 +326,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
priv->clk_csr = STMMAC_CSR_250_300M;
}
- if (priv->plat->has_sun8i) {
+ if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
if (clk_rate > 160000000)
priv->clk_csr = 0x03;
else if (clk_rate > 80000000)
@@ -421,7 +422,7 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
/* Check and enter in LPI mode */
if (!priv->tx_path_in_lpi_mode)
stmmac_set_eee_mode(priv, priv->hw,
- priv->plat->en_tx_lpi_clockgating);
+ priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
return 0;
}
@@ -909,6 +910,9 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
+ if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
+ stmmac_hwtstamp_correct_latency(priv, priv);
+
return 0;
}
@@ -991,7 +995,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
u32 old_ctrl, ctrl;
- if (priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup)
+ if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
+ priv->plat->serdes_powerup)
priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
@@ -1059,7 +1064,7 @@ static void stmmac_mac_link_up(struct phylink_config *config,
priv->speed = speed;
if (priv->plat->fix_mac_speed)
- priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
if (!duplex)
ctrl &= ~priv->hw->link.duplex;
@@ -1084,7 +1089,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
priv->eee_active =
- phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
+ phy_init_eee(phy, !(priv->plat->flags &
+ STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
priv->eee_enabled = stmmac_eee_init(priv);
priv->tx_lpi_enabled = priv->eee_enabled;
stmmac_set_eee_pls(priv, priv->hw, true);
@@ -1092,6 +1098,9 @@ static void stmmac_mac_link_up(struct phylink_config *config,
if (priv->dma_cap.fpesel)
stmmac_fpe_link_state_handle(priv, true);
+
+ if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
+ stmmac_hwtstamp_correct_latency(priv, priv);
}
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
@@ -1110,7 +1119,7 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
*/
static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
{
- int interface = priv->plat->interface;
+ int interface = priv->plat->mac_interface;
if (priv->dma_cap.pcs) {
if ((interface == PHY_INTERFACE_MODE_RGMII) ||
@@ -1144,7 +1153,7 @@ static int stmmac_init_phy(struct net_device *dev)
if (!phylink_expects_phy(priv->phylink))
return 0;
- fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ fwnode = priv->plat->port_node;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
@@ -1190,22 +1199,24 @@ static int stmmac_init_phy(struct net_device *dev)
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
- struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
- struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
- int max_speed = priv->plat->max_speed;
+ struct stmmac_mdio_bus_data *mdio_bus_data;
int mode = priv->plat->phy_interface;
+ struct fwnode_handle *fwnode;
struct phylink *phylink;
+ int max_speed;
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
- if (priv->plat->mdio_bus_data)
+ priv->phylink_config.mac_managed_pm = true;
+
+ mdio_bus_data = priv->plat->mdio_bus_data;
+ if (mdio_bus_data)
priv->phylink_config.ovr_an_inband =
mdio_bus_data->xpcs_an_inband;
- if (!fwnode)
- fwnode = dev_fwnode(priv->device);
-
- /* Set the platform/firmware specified interface mode */
+ /* Set the platform/firmware specified interface mode. Note, phylink
+ * deals with the PHY interface mode, not the MAC interface mode.
+ */
__set_bit(mode, priv->phylink_config.supported_interfaces);
/* If we have an xpcs, it defines which PHY interfaces are supported. */
@@ -1214,36 +1225,24 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
priv->phylink_config.supported_interfaces);
priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10 | MAC_100;
-
- if (!max_speed || max_speed >= 1000)
- priv->phylink_config.mac_capabilities |= MAC_1000;
-
- if (priv->plat->has_gmac4) {
- if (!max_speed || max_speed >= 2500)
- priv->phylink_config.mac_capabilities |= MAC_2500FD;
- } else if (priv->plat->has_xgmac) {
- if (!max_speed || max_speed >= 2500)
- priv->phylink_config.mac_capabilities |= MAC_2500FD;
- if (!max_speed || max_speed >= 5000)
- priv->phylink_config.mac_capabilities |= MAC_5000FD;
- if (!max_speed || max_speed >= 10000)
- priv->phylink_config.mac_capabilities |= MAC_10000FD;
- if (!max_speed || max_speed >= 25000)
- priv->phylink_config.mac_capabilities |= MAC_25000FD;
- if (!max_speed || max_speed >= 40000)
- priv->phylink_config.mac_capabilities |= MAC_40000FD;
- if (!max_speed || max_speed >= 50000)
- priv->phylink_config.mac_capabilities |= MAC_50000FD;
- if (!max_speed || max_speed >= 100000)
- priv->phylink_config.mac_capabilities |= MAC_100000FD;
- }
+ MAC_10FD | MAC_100FD |
+ MAC_1000FD;
/* Half-Duplex can only work with single queue */
- if (priv->plat->tx_queues_to_use > 1)
- priv->phylink_config.mac_capabilities &=
- ~(MAC_10HD | MAC_100HD | MAC_1000HD);
- priv->phylink_config.mac_managed_pm = true;
+ if (priv->plat->tx_queues_to_use <= 1)
+ priv->phylink_config.mac_capabilities |= MAC_10HD | MAC_100HD |
+ MAC_1000HD;
+
+ /* Get the MAC specific capabilities */
+ stmmac_mac_phylink_get_caps(priv);
+
+ max_speed = priv->plat->max_speed;
+ if (max_speed)
+ phylink_limit_mac_speed(&priv->phylink_config, max_speed);
+
+ fwnode = priv->plat->port_node;
+ if (!fwnode)
+ fwnode = dev_fwnode(priv->device);
phylink = phylink_create(&priv->phylink_config, fwnode,
mode, &stmmac_phylink_mac_ops);
@@ -2432,6 +2431,8 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
struct dma_desc *tx_desc = NULL;
struct xdp_desc xdp_desc;
bool work_done = true;
+ u32 tx_set_ic_bit = 0;
+ unsigned long flags;
/* Avoids TX time-out as we are sharing with slow path */
txq_trans_cond_update(nq);
@@ -2492,7 +2493,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
if (set_ic) {
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
- priv->xstats.tx_set_ic_bit++;
+ tx_set_ic_bit++;
}
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
@@ -2504,6 +2505,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_set_ic_bit += tx_set_ic_bit;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
if (tx_desc) {
stmmac_flush_tx_descriptors(priv, queue);
@@ -2545,11 +2549,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0;
+ u32 tx_packets = 0, tx_errors = 0;
+ unsigned long flags;
__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
- priv->xstats.tx_clean++;
-
tx_q->xsk_frames_done = 0;
entry = tx_q->dirty_tx;
@@ -2580,8 +2584,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
else
p = tx_q->dma_tx + entry;
- status = stmmac_tx_status(priv, &priv->dev->stats,
- &priv->xstats, p, priv->ioaddr);
+ status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
/* Check if the descriptor is owned by the DMA */
if (unlikely(status & tx_dma_own))
break;
@@ -2597,13 +2600,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if (likely(!(status & tx_not_ls))) {
/* ... verify the status error condition */
if (unlikely(status & tx_err)) {
- priv->dev->stats.tx_errors++;
+ tx_errors++;
if (unlikely(status & tx_err_bump_tc))
stmmac_bump_dma_threshold(priv, queue);
} else {
- priv->dev->stats.tx_packets++;
- priv->xstats.tx_pkt_n++;
- priv->xstats.txq_stats[queue].tx_pkt_n++;
+ tx_packets++;
}
if (skb)
stmmac_get_tx_hwtstamp(priv, p, skb);
@@ -2707,6 +2708,14 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
HRTIMER_MODE_REL);
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_packets += tx_packets;
+ tx_q->txq_stats.tx_pkt_n += tx_packets;
+ tx_q->txq_stats.tx_clean++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+
+ priv->xstats.tx_errors += tx_errors;
+
__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
/* Combine decisions from TX clean and XSK TX */
@@ -2734,7 +2743,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
tx_q->dma_tx_phy, chan);
stmmac_start_tx_dma(priv, chan);
- priv->dev->stats.tx_errors++;
+ priv->xstats.tx_errors++;
netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
}
@@ -3710,7 +3719,7 @@ static int stmmac_request_irq(struct net_device *dev)
int ret;
/* Request the IRQ lines */
- if (priv->plat->multi_msi_en)
+ if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
ret = stmmac_request_irq_multi_msi(dev);
else
ret = stmmac_request_irq_single(dev);
@@ -3827,10 +3836,6 @@ static int __stmmac_open(struct net_device *dev,
}
}
- /* Extra statistics */
- memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
- priv->xstats.threshold = tc;
-
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
buf_sz = dma_conf->dma_buf_sz;
@@ -3838,7 +3843,8 @@ static int __stmmac_open(struct net_device *dev,
stmmac_reset_queues_param(priv);
- if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) {
+ if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
+ priv->plat->serdes_powerup) {
ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
if (ret < 0) {
netdev_err(priv->dev, "%s: Serdes powerup failed\n",
@@ -4110,6 +4116,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
u8 proto_hdr_len, hdr;
+ unsigned long flags;
u32 pay_len, mss;
dma_addr_t des;
int i;
@@ -4258,7 +4265,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
- priv->xstats.tx_set_ic_bit++;
}
/* We've used all descriptors we need for this skb, however,
@@ -4274,9 +4280,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- dev->stats.tx_bytes += skb->len;
- priv->xstats.tx_tso_frames++;
- priv->xstats.tx_tso_nfrags += nfrags;
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_bytes += skb->len;
+ tx_q->txq_stats.tx_tso_frames++;
+ tx_q->txq_stats.tx_tso_nfrags += nfrags;
+ if (set_ic)
+ tx_q->txq_stats.tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4326,7 +4336,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
dma_map_err:
dev_err(priv->device, "Tx dma map failed\n");
dev_kfree_skb(skb);
- priv->dev->stats.tx_dropped++;
+ priv->xstats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -4352,6 +4362,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
int entry, first_tx;
+ unsigned long flags;
dma_addr_t des;
tx_q = &priv->dma_conf.tx_queue[queue];
@@ -4480,7 +4491,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
- priv->xstats.tx_set_ic_bit++;
}
/* We've used all descriptors we need for this skb, however,
@@ -4507,7 +4517,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- dev->stats.tx_bytes += skb->len;
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_bytes += skb->len;
+ if (set_ic)
+ tx_q->txq_stats.tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4569,7 +4583,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dma_map_err:
netdev_err(priv->dev, "Tx DMA map failed\n");
dev_kfree_skb(skb);
- priv->dev->stats.tx_dropped++;
+ priv->xstats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -4770,9 +4784,12 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
set_ic = false;
if (set_ic) {
+ unsigned long flags;
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
- priv->xstats.tx_set_ic_bit++;
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
}
stmmac_enable_dma_transmission(priv, priv->ioaddr);
@@ -4917,16 +4934,18 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
struct dma_desc *p, struct dma_desc *np,
struct xdp_buff *xdp)
{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int len = xdp->data_end - xdp->data;
enum pkt_hash_types hash_type;
int coe = priv->hw->rx_csum;
+ unsigned long flags;
struct sk_buff *skb;
u32 hash;
skb = stmmac_construct_skb_zc(ch, xdp);
if (!skb) {
- priv->dev->stats.rx_dropped++;
+ priv->xstats.rx_dropped++;
return;
}
@@ -4945,8 +4964,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rxtx_napi, skb);
- priv->dev->stats.rx_packets++;
- priv->dev->stats.rx_bytes += len;
+ flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.rx_pkt_n++;
+ rx_q->rxq_stats.rx_bytes += len;
+ u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
}
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -5023,9 +5044,11 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int count = 0, error = 0, len = 0;
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int next_entry = rx_q->cur_rx;
+ u32 rx_errors = 0, rx_dropped = 0;
unsigned int desc_size;
struct bpf_prog *prog;
bool failure = false;
+ unsigned long flags;
int xdp_status = 0;
int status = 0;
@@ -5081,8 +5104,7 @@ read_again:
p = rx_q->dma_rx + entry;
/* read the status of the incoming frame */
- status = stmmac_rx_status(priv, &priv->dev->stats,
- &priv->xstats, p);
+ status = stmmac_rx_status(priv, &priv->xstats, p);
/* check if managed by the DMA otherwise go ahead */
if (unlikely(status & dma_own))
break;
@@ -5104,8 +5126,7 @@ read_again:
break;
if (priv->extend_desc)
- stmmac_rx_extended_status(priv, &priv->dev->stats,
- &priv->xstats,
+ stmmac_rx_extended_status(priv, &priv->xstats,
rx_q->dma_erx + entry);
if (unlikely(status == discard_frame)) {
xsk_buff_free(buf->xdp);
@@ -5113,7 +5134,7 @@ read_again:
dirty++;
error = 1;
if (!priv->hwts_rx_en)
- priv->dev->stats.rx_errors++;
+ rx_errors++;
}
if (unlikely(error && (status & rx_not_ls)))
@@ -5161,7 +5182,7 @@ read_again:
break;
case STMMAC_XDP_CONSUMED:
xsk_buff_free(buf->xdp);
- priv->dev->stats.rx_dropped++;
+ rx_dropped++;
break;
case STMMAC_XDP_TX:
case STMMAC_XDP_REDIRECT:
@@ -5182,8 +5203,12 @@ read_again:
stmmac_finalize_xdp_rx(priv, xdp_status);
- priv->xstats.rx_pkt_n += count;
- priv->xstats.rxq_stats[queue].rx_pkt_n += count;
+ flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.rx_pkt_n += count;
+ u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+
+ priv->xstats.rx_dropped += rx_dropped;
+ priv->xstats.rx_errors += rx_errors;
if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
if (failure || stmmac_rx_dirty(priv, queue) > 0)
@@ -5207,6 +5232,7 @@ read_again:
*/
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
+ u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int count = 0, error = 0, len = 0;
@@ -5216,6 +5242,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int desc_size;
struct sk_buff *skb = NULL;
struct stmmac_xdp_buff ctx;
+ unsigned long flags;
int xdp_status = 0;
int buf_sz;
@@ -5271,8 +5298,7 @@ read_again:
p = rx_q->dma_rx + entry;
/* read the status of the incoming frame */
- status = stmmac_rx_status(priv, &priv->dev->stats,
- &priv->xstats, p);
+ status = stmmac_rx_status(priv, &priv->xstats, p);
/* check if managed by the DMA otherwise go ahead */
if (unlikely(status & dma_own))
break;
@@ -5289,14 +5315,13 @@ read_again:
prefetch(np);
if (priv->extend_desc)
- stmmac_rx_extended_status(priv, &priv->dev->stats,
- &priv->xstats, rx_q->dma_erx + entry);
+ stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
if (unlikely(status == discard_frame)) {
page_pool_recycle_direct(rx_q->page_pool, buf->page);
buf->page = NULL;
error = 1;
if (!priv->hwts_rx_en)
- priv->dev->stats.rx_errors++;
+ rx_errors++;
}
if (unlikely(error && (status & rx_not_ls)))
@@ -5364,7 +5389,7 @@ read_again:
virt_to_head_page(ctx.xdp.data),
sync_len, true);
buf->page = NULL;
- priv->dev->stats.rx_dropped++;
+ rx_dropped++;
/* Clear skb as it was set as
* status by XDP program.
@@ -5393,7 +5418,7 @@ read_again:
skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
if (!skb) {
- priv->dev->stats.rx_dropped++;
+ rx_dropped++;
count++;
goto drain_data;
}
@@ -5413,7 +5438,7 @@ read_again:
priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */
- page_pool_release_page(rx_q->page_pool, buf->page);
+ skb_mark_for_recycle(skb);
buf->page = NULL;
}
@@ -5425,7 +5450,7 @@ read_again:
priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */
- page_pool_release_page(rx_q->page_pool, buf->sec_page);
+ skb_mark_for_recycle(skb);
buf->sec_page = NULL;
}
@@ -5453,8 +5478,8 @@ drain_data:
napi_gro_receive(&ch->rx_napi, skb);
skb = NULL;
- priv->dev->stats.rx_packets++;
- priv->dev->stats.rx_bytes += len;
+ rx_packets++;
+ rx_bytes += len;
count++;
}
@@ -5469,8 +5494,14 @@ drain_data:
stmmac_rx_refill(priv, queue);
- priv->xstats.rx_pkt_n += count;
- priv->xstats.rxq_stats[queue].rx_pkt_n += count;
+ flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.rx_packets += rx_packets;
+ rx_q->rxq_stats.rx_bytes += rx_bytes;
+ rx_q->rxq_stats.rx_pkt_n += count;
+ u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+
+ priv->xstats.rx_dropped += rx_dropped;
+ priv->xstats.rx_errors += rx_errors;
return count;
}
@@ -5480,10 +5511,15 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, rx_napi);
struct stmmac_priv *priv = ch->priv_data;
+ struct stmmac_rx_queue *rx_q;
u32 chan = ch->index;
+ unsigned long flags;
int work_done;
- priv->xstats.napi_poll++;
+ rx_q = &priv->dma_conf.rx_queue[chan];
+ flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.napi_poll++;
+ u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
work_done = stmmac_rx(priv, budget, chan);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -5502,10 +5538,15 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, tx_napi);
struct stmmac_priv *priv = ch->priv_data;
+ struct stmmac_tx_queue *tx_q;
u32 chan = ch->index;
+ unsigned long flags;
int work_done;
- priv->xstats.napi_poll++;
+ tx_q = &priv->dma_conf.tx_queue[chan];
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.napi_poll++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
work_done = stmmac_tx_clean(priv, budget, chan);
work_done = min(work_done, budget);
@@ -5527,9 +5568,20 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
container_of(napi, struct stmmac_channel, rxtx_napi);
struct stmmac_priv *priv = ch->priv_data;
int rx_done, tx_done, rxtx_done;
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
u32 chan = ch->index;
+ unsigned long flags;
+
+ rx_q = &priv->dma_conf.rx_queue[chan];
+ flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.napi_poll++;
+ u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
- priv->xstats.napi_poll++;
+ tx_q = &priv->dma_conf.tx_queue[chan];
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.napi_poll++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
tx_done = stmmac_tx_clean(priv, budget, chan);
tx_done = min(tx_done, budget);
@@ -5677,7 +5729,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
features &= ~NETIF_F_CSUM_MASK;
/* Disable tso if asked by ethtool */
- if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
if (features & NETIF_F_TSO)
priv->tso = true;
else
@@ -5798,7 +5850,8 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
}
/* PCS link status */
- if (priv->hw->pcs && !priv->plat->has_integrated_pcs) {
+ if (priv->hw->pcs &&
+ !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
if (priv->xstats.pcs_link)
netif_carrier_on(priv->dev);
else
@@ -5951,7 +6004,7 @@ static void stmmac_poll_controller(struct net_device *dev)
if (test_bit(STMMAC_DOWN, &priv->state))
return;
- if (priv->plat->multi_msi_en) {
+ if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) {
for (i = 0; i < priv->plat->rx_queues_to_use; i++)
stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
@@ -6174,6 +6227,22 @@ DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
{
+ static const char * const dwxgmac_timestamp_source[] = {
+ "None",
+ "Internal",
+ "External",
+ "Both",
+ };
+ static const char * const dwxgmac_safety_feature_desc[] = {
+ "No",
+ "All Safety Features with ECC and Parity",
+ "All Safety Features without ECC or Parity",
+ "All Safety Features with Parity Only",
+ "ECC Only",
+ "UNDEFINED",
+ "UNDEFINED",
+ "UNDEFINED",
+ };
struct net_device *dev = seq->private;
struct stmmac_priv *priv = netdev_priv(dev);
@@ -6192,10 +6261,16 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
(priv->dma_cap.mbps_1000) ? "Y" : "N");
seq_printf(seq, "\tHalf duplex: %s\n",
(priv->dma_cap.half_duplex) ? "Y" : "N");
- seq_printf(seq, "\tHash Filter: %s\n",
- (priv->dma_cap.hash_filter) ? "Y" : "N");
- seq_printf(seq, "\tMultiple MAC address registers: %s\n",
- (priv->dma_cap.multi_addr) ? "Y" : "N");
+ if (priv->plat->has_xgmac) {
+ seq_printf(seq,
+ "\tNumber of Additional MAC address registers: %d\n",
+ priv->dma_cap.multi_addr);
+ } else {
+ seq_printf(seq, "\tHash Filter: %s\n",
+ (priv->dma_cap.hash_filter) ? "Y" : "N");
+ seq_printf(seq, "\tMultiple MAC address registers: %s\n",
+ (priv->dma_cap.multi_addr) ? "Y" : "N");
+ }
seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
(priv->dma_cap.pcs) ? "Y" : "N");
seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
@@ -6210,12 +6285,16 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
(priv->dma_cap.time_stamp) ? "Y" : "N");
seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
(priv->dma_cap.atime_stamp) ? "Y" : "N");
+ if (priv->plat->has_xgmac)
+ seq_printf(seq, "\tTimestamp System Time Source: %s\n",
+ dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
(priv->dma_cap.eee) ? "Y" : "N");
seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
seq_printf(seq, "\tChecksum Offload in TX: %s\n",
(priv->dma_cap.tx_coe) ? "Y" : "N");
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
+ priv->plat->has_xgmac) {
seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
(priv->dma_cap.rx_coe) ? "Y" : "N");
} else {
@@ -6223,9 +6302,9 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
(priv->dma_cap.rx_coe_type1) ? "Y" : "N");
seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
(priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+ seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
+ (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
}
- seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
- (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
priv->dma_cap.number_rx_channel);
seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
@@ -6238,12 +6317,13 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
(priv->dma_cap.enh_desc) ? "Y" : "N");
seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
- seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
+ seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
+ (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
priv->dma_cap.pps_out_num);
seq_printf(seq, "\tSafety Features: %s\n",
- priv->dma_cap.asp ? "Y" : "N");
+ dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
seq_printf(seq, "\tFlexible RX Parser: %s\n",
priv->dma_cap.frpsel ? "Y" : "N");
seq_printf(seq, "\tEnhanced Addressing: %d\n",
@@ -6268,6 +6348,53 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
priv->dma_cap.fpesel ? "Y" : "N");
seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
priv->dma_cap.tbssel ? "Y" : "N");
+ seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
+ priv->dma_cap.tbs_ch_num);
+ seq_printf(seq, "\tPer-Stream Filtering: %s\n",
+ priv->dma_cap.sgfsel ? "Y" : "N");
+ seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
+ BIT(priv->dma_cap.ttsfd) >> 1);
+ seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
+ priv->dma_cap.numtc);
+ seq_printf(seq, "\tDCB Feature: %s\n",
+ priv->dma_cap.dcben ? "Y" : "N");
+ seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
+ priv->dma_cap.advthword ? "Y" : "N");
+ seq_printf(seq, "\tPTP Offload: %s\n",
+ priv->dma_cap.ptoen ? "Y" : "N");
+ seq_printf(seq, "\tOne-Step Timestamping: %s\n",
+ priv->dma_cap.osten ? "Y" : "N");
+ seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
+ priv->dma_cap.pfcen ? "Y" : "N");
+ seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
+ BIT(priv->dma_cap.frpes) << 6);
+ seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
+ BIT(priv->dma_cap.frpbs) << 6);
+ seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
+ priv->dma_cap.frppipe_num);
+ seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
+ priv->dma_cap.nrvf_num ?
+ (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
+ seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
+ priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
+ seq_printf(seq, "\tDepth of GCL: %lu\n",
+ priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
+ seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
+ priv->dma_cap.cbtisel ? "Y" : "N");
+ seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
+ priv->dma_cap.aux_snapshot_n);
+ seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
+ priv->dma_cap.pou_ost_en ? "Y" : "N");
+ seq_printf(seq, "\tEnhanced DMA: %s\n",
+ priv->dma_cap.edma ? "Y" : "N");
+ seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
+ priv->dma_cap.ediffc ? "Y" : "N");
+ seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
+ priv->dma_cap.vxn ? "Y" : "N");
+ seq_printf(seq, "\tDebug Memory Interface: %s\n",
+ priv->dma_cap.dbgmem ? "Y" : "N");
+ seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
+ priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
@@ -6788,6 +6915,56 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
return 0;
}
+static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ unsigned int start;
+ int q;
+
+ for (q = 0; q < tx_cnt; q++) {
+ struct stmmac_txq_stats *txq_stats = &priv->dma_conf.tx_queue[q].txq_stats;
+ u64 tx_packets;
+ u64 tx_bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->syncp);
+ tx_packets = txq_stats->tx_packets;
+ tx_bytes = txq_stats->tx_bytes;
+ } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ }
+
+ for (q = 0; q < rx_cnt; q++) {
+ struct stmmac_rxq_stats *rxq_stats = &priv->dma_conf.rx_queue[q].rxq_stats;
+ u64 rx_packets;
+ u64 rx_bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ rx_packets = rxq_stats->rx_packets;
+ rx_bytes = rxq_stats->rx_bytes;
+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ }
+
+ stats->rx_dropped = priv->xstats.rx_dropped;
+ stats->rx_errors = priv->xstats.rx_errors;
+ stats->tx_dropped = priv->xstats.tx_dropped;
+ stats->tx_errors = priv->xstats.tx_errors;
+ stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
+ stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
+ stats->rx_length_errors = priv->xstats.rx_length;
+ stats->rx_crc_errors = priv->xstats.rx_crc_errors;
+ stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
+ stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
+}
+
static const struct net_device_ops stmmac_netdev_ops = {
.ndo_open = stmmac_open,
.ndo_start_xmit = stmmac_xmit,
@@ -6798,6 +6975,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_set_rx_mode = stmmac_set_rx_mode,
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_eth_ioctl = stmmac_ioctl,
+ .ndo_get_stats64 = stmmac_get_stats64,
.ndo_setup_tc = stmmac_setup_tc,
.ndo_select_queue = stmmac_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6855,7 +7033,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
int ret;
/* dwmac-sun8i only work in chain mode */
- if (priv->plat->has_sun8i)
+ if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
chain_mode = 1;
priv->chain_mode = chain_mode;
@@ -6876,7 +7054,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
*/
priv->plat->enh_desc = priv->dma_cap.enh_desc;
priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
- !priv->plat->use_phy_wol;
+ !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
priv->hw->pmt = priv->plat->pmt;
if (priv->dma_cap.hash_tb_sz) {
priv->hw->multicast_filter_bins =
@@ -6920,7 +7098,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
if (priv->dma_cap.tsoen)
dev_info(priv->device, "TSO supported\n");
- priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
+ priv->hw->vlan_fail_q_en =
+ (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
/* Run HW quirks, if any */
@@ -7160,12 +7339,18 @@ int stmmac_dvr_probe(struct device *device,
priv->device = device;
priv->dev = ndev;
+ for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
+ u64_stats_init(&priv->dma_conf.rx_queue[i].rxq_stats.syncp);
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
+ u64_stats_init(&priv->dma_conf.tx_queue[i].txq_stats.syncp);
+
stmmac_set_ethtool_ops(ndev);
priv->pause = pause;
priv->plat = plat_dat;
priv->ioaddr = res->addr;
priv->dev->base_addr = (unsigned long)res->addr;
- priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
+ priv->plat->dma_cfg->multi_msi_en =
+ (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
priv->dev->irq = res->irq;
priv->wol_irq = res->wol_irq;
@@ -7249,7 +7434,7 @@ int stmmac_dvr_probe(struct device *device,
ndev->hw_features |= NETIF_F_HW_TC;
}
- if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
if (priv->plat->has_gmac4)
ndev->hw_features |= NETIF_F_GSO_UDP_L4;
@@ -7257,7 +7442,8 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "TSO feature enabled\n");
}
- if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
+ if (priv->dma_cap.sphen &&
+ !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
ndev->hw_features |= NETIF_F_GRO;
priv->sph_cap = true;
priv->sph = priv->sph_cap;
@@ -7315,6 +7501,8 @@ int stmmac_dvr_probe(struct device *device,
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
+ priv->xstats.threshold = tc;
+
/* Initialize RSS */
rxq = priv->plat->rx_queues_to_use;
netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
@@ -7621,7 +7809,8 @@ int stmmac_resume(struct device *dev)
stmmac_mdio_reset(priv->mii);
}
- if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) {
+ if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
+ priv->plat->serdes_powerup) {
ret = priv->plat->serdes_powerup(ndev,
priv->plat->bsp_priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 3db1cb0fd160..fa9e7e7040b9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -62,11 +62,16 @@ static void stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr,
static void stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
int phyreg, u32 *hw_addr)
{
- u32 tmp;
+ u32 tmp = 0;
+ if (priv->synopsys_id < DWXGMAC_CORE_2_20) {
+ /* Until ver 2.20 XGMAC does not support C22 addr >= 4. Those
+ * bits above bit 3 of XGMAC_MDIO_C22P register are reserved.
+ */
+ tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P);
+ tmp &= ~MII_XGMAC_C22P_MASK;
+ }
/* Set port as Clause 22 */
- tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P);
- tmp &= ~MII_XGMAC_C22P_MASK;
tmp |= BIT(phyaddr);
writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
@@ -132,8 +137,9 @@ static int stmmac_xgmac2_mdio_read_c22(struct mii_bus *bus, int phyaddr,
priv = netdev_priv(ndev);
- /* HW does not support C22 addr >= 4 */
- if (phyaddr > MII_XGMAC_MAX_C22ADDR)
+ /* Until ver 2.20 XGMAC does not support C22 addr >= 4 */
+ if (priv->synopsys_id < DWXGMAC_CORE_2_20 &&
+ phyaddr > MII_XGMAC_MAX_C22ADDR)
return -ENODEV;
stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
@@ -209,8 +215,9 @@ static int stmmac_xgmac2_mdio_write_c22(struct mii_bus *bus, int phyaddr,
priv = netdev_priv(ndev);
- /* HW does not support C22 addr >= 4 */
- if (phyaddr > MII_XGMAC_MAX_C22ADDR)
+ /* Until ver 2.20 XGMAC does not support C22 addr >= 4 */
+ if (priv->synopsys_id < DWXGMAC_CORE_2_20 &&
+ phyaddr > MII_XGMAC_MAX_C22ADDR)
return -ENODEV;
stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
@@ -526,11 +533,11 @@ int stmmac_mdio_register(struct net_device *ndev)
int err = 0;
struct mii_bus *new_bus;
struct stmmac_priv *priv = netdev_priv(ndev);
- struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
struct device_node *mdio_node = priv->plat->mdio_node;
struct device *dev = ndev->dev.parent;
struct fwnode_handle *fixed_node;
+ struct fwnode_handle *fwnode;
int addr, found, max_addr;
if (!mdio_bus_data)
@@ -551,13 +558,18 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->read_c45 = &stmmac_xgmac2_mdio_read_c45;
new_bus->write_c45 = &stmmac_xgmac2_mdio_write_c45;
- /* Right now only C22 phys are supported */
- max_addr = MII_XGMAC_MAX_C22ADDR + 1;
+ if (priv->synopsys_id < DWXGMAC_CORE_2_20) {
+ /* Right now only C22 phys are supported */
+ max_addr = MII_XGMAC_MAX_C22ADDR + 1;
- /* Check if DT specified an unsupported phy addr */
- if (priv->plat->phy_addr > MII_XGMAC_MAX_C22ADDR)
- dev_err(dev, "Unsupported phy_addr (max=%d)\n",
+ /* Check if DT specified an unsupported phy addr */
+ if (priv->plat->phy_addr > MII_XGMAC_MAX_C22ADDR)
+ dev_err(dev, "Unsupported phy_addr (max=%d)\n",
MII_XGMAC_MAX_C22ADDR);
+ } else {
+ /* XGMAC version 2.20 onwards support 32 phy addr */
+ max_addr = PHY_MAX_ADDR;
+ }
} else {
new_bus->read = &stmmac_mdio_read_c22;
new_bus->write = &stmmac_mdio_write_c22;
@@ -589,6 +601,7 @@ int stmmac_mdio_register(struct net_device *ndev)
stmmac_xgmac2_mdio_read_c45(new_bus, 0, 0, 0);
/* If fixed-link is set, skip PHY scanning */
+ fwnode = priv->plat->port_node;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 644bb54f5f02..352b01678c22 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -77,7 +77,7 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
plat->clk_csr = 5;
plat->has_gmac4 = 1;
plat->force_sf_dma_mode = 1;
- plat->tso_en = 1;
+ plat->flags |= STMMAC_FLAG_TSO_EN;
plat->pmt = 1;
/* Set default value for multicast hash bins */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 231152ee5a32..35f4b1484029 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -15,7 +15,6 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_net.h>
-#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include "stmmac.h"
@@ -420,16 +419,16 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
return ERR_PTR(phy_mode);
plat->phy_interface = phy_mode;
- plat->interface = stmmac_of_get_mac_mode(np);
- if (plat->interface < 0)
- plat->interface = plat->phy_interface;
+ plat->mac_interface = stmmac_of_get_mac_mode(np);
+ if (plat->mac_interface < 0)
+ plat->mac_interface = plat->phy_interface;
/* Some wrapper drivers still rely on phy_node. Let's save it while
* they are not converted to phylink. */
plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* PHYLINK automatically parses the phy-handle property */
- plat->phylink_node = np;
+ plat->port_node = of_fwnode_handle(np);
/* Get max speed of operation from device tree */
of_property_read_u32(np, "max-speed", &plat->max_speed);
@@ -466,8 +465,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->force_sf_dma_mode =
of_property_read_bool(np, "snps,force_sf_dma_mode");
- plat->en_tx_lpi_clockgating =
- of_property_read_bool(np, "snps,en-tx-lpi-clockgating");
+ if (of_property_read_bool(np, "snps,en-tx-lpi-clockgating"))
+ plat->flags |= STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
/* Set the maxmtu to a default of JUMBO_LEN in case the
* parameter is not present in the device tree.
@@ -525,7 +524,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->has_gmac4 = 1;
plat->has_gmac = 0;
plat->pmt = 1;
- plat->tso_en = of_property_read_bool(np, "snps,tso");
+ if (of_property_read_bool(np, "snps,tso"))
+ plat->flags |= STMMAC_FLAG_TSO_EN;
}
if (of_device_is_compatible(np, "snps,dwmac-3.610") ||
@@ -538,7 +538,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
if (of_device_is_compatible(np, "snps,dwxgmac")) {
plat->has_xgmac = 1;
plat->pmt = 1;
- plat->tso_en = of_property_read_bool(np, "snps,tso");
+ if (of_property_read_bool(np, "snps,tso"))
+ plat->flags |= STMMAC_FLAG_TSO_EN;
}
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index b4388ca8d211..3d7825cb30bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -192,7 +192,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
write_unlock_irqrestore(&priv->ptp_lock, flags);
break;
case PTP_CLK_REQ_EXTTS:
- priv->plat->ext_snapshot_en = on;
+ if (on)
+ priv->plat->flags |= STMMAC_FLAG_EXT_SNAPSHOT_EN;
+ else
+ priv->plat->flags &= ~STMMAC_FLAG_EXT_SNAPSHOT_EN;
mutex_lock(&priv->aux_ts_lock);
acr_value = readl(ptpaddr + PTP_ACR);
acr_value &= ~PTP_ACR_MASK;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index bf619295d079..d1fe4b46f162 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -26,6 +26,12 @@
#define PTP_ACR 0x40 /* Auxiliary Control Reg */
#define PTP_ATNR 0x48 /* Auxiliary Timestamp - Nanoseconds Reg */
#define PTP_ATSR 0x4c /* Auxiliary Timestamp - Seconds Reg */
+#define PTP_TS_INGR_CORR_NS 0x58 /* Ingress timestamp correction nanoseconds */
+#define PTP_TS_EGR_CORR_NS 0x5C /* Egress timestamp correction nanoseconds*/
+#define PTP_TS_INGR_CORR_SNS 0x60 /* Ingress timestamp correction subnanoseconds */
+#define PTP_TS_EGR_CORR_SNS 0x64 /* Egress timestamp correction subnanoseconds */
+#define PTP_TS_INGR_LAT 0x68 /* MAC internal Ingress Latency */
+#define PTP_TS_EGR_LAT 0x6c /* MAC internal Egress Latency */
#define PTP_STNSUR_ADDSUB_SHIFT 31
#define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 687f43cd466c..f9e43fc32ee8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -1355,7 +1355,7 @@ static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
goto cleanup_rss;
}
- dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+ dissector->used_keys |= (1ULL << FLOW_DISSECTOR_KEY_IPV4_ADDRS);
dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0;
cls = kzalloc(sizeof(*cls), GFP_KERNEL);
@@ -1481,8 +1481,8 @@ static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
goto cleanup_rss;
}
- dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC);
- dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS);
+ dissector->used_keys |= (1ULL << FLOW_DISSECTOR_KEY_BASIC);
+ dissector->used_keys |= (1ULL << FLOW_DISSECTOR_KEY_PORTS);
dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0;
dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key);
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 734a817d3c94..a9a6670b5ff1 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -124,7 +124,7 @@ static void vsw_set_rx_mode(struct net_device *dev)
return sunvnet_set_rx_mode_common(dev, port->vp);
}
-int ldmvsw_open(struct net_device *dev)
+static int ldmvsw_open(struct net_device *dev)
{
struct vnet_port *port = netdev_priv(dev);
struct vio_driver_state *vio = &port->vio;
@@ -136,7 +136,6 @@ int ldmvsw_open(struct net_device *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(ldmvsw_open);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void vsw_poll_controller(struct net_device *dev)
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 7a2e76776297..011d74087f86 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -31,7 +31,7 @@
#include <linux/slab.h>
#include <linux/io.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include "niu.h"
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 34b94153bf0c..cc34d92d2e3d 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -25,7 +25,7 @@
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/gfp.h>
#include <asm/auxio.h>
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 4154e68639ac..9bd1df8308d2 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -40,6 +40,7 @@
#include <linux/bitops.h>
#include <linux/mm.h>
#include <linux/gfp.h>
+#include <linux/of.h>
#include <asm/io.h>
#include <asm/byteorder.h>
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index b93613cd1994..b983b9c23be6 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -32,9 +32,10 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/of_device.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <linux/random.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 6418fcc3139f..b37360f44972 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -27,8 +27,8 @@
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pgtable.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/dma.h>
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index fce06663e1e1..88b5b1b47779 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -183,4 +183,29 @@ config CPMAC
help
TI AR7 CPMAC Ethernet support
+config TI_ICSSG_PRUETH
+ tristate "TI Gigabit PRU Ethernet driver"
+ select PHYLIB
+ select TI_ICSS_IEP
+ depends on PRU_REMOTEPROC
+ depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ help
+ Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem.
+ This subsystem is available starting with the AM65 platform.
+
+ This driver requires firmware binaries which will run on the PRUs
+ to support the Ethernet operation. Currently, it supports Ethernet
+ with 1G and 100M link speed.
+
+config TI_ICSS_IEP
+ tristate "TI PRU ICSS IEP driver"
+ depends on TI_PRUSS
+ default TI_PRUSS
+ help
+ This driver enables support for the PRU-ICSS Industrial Ethernet
+ Peripheral within a PRU-ICSS subsystem present on various TI SoCs.
+
+ To compile this driver as a module, choose M here. The module
+ will be called icss_iep.
+
endif # NET_VENDOR_TI
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 75f761efbea7..34fd7a716ba6 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -28,3 +28,14 @@ obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o am65-cpsw-qos.o
ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o
obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
+
+obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o
+icssg-prueth-y := k3-cppi-desc-pool.o \
+ icssg/icssg_prueth.o \
+ icssg/icssg_classifier.o \
+ icssg/icssg_queues.o \
+ icssg/icssg_config.o \
+ icssg/icssg_mii_cfg.o \
+ icssg/icssg_stats.o \
+ icssg/icssg_ethtool.o
+obj-$(CONFIG_TI_ICSS_IEP) += icssg/icss_iep.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index bebcfd5e6b57..bea6fc0f324c 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -19,6 +19,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/phylink.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index bf40c88fbd9b..f3dad2ab9828 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -192,7 +192,6 @@ struct am65_cpsw_ndev_priv {
extern const struct ethtool_ops am65_cpsw_ethtool_ops_slave;
-void am65_cpsw_nuss_adjust_link(struct net_device *ndev);
void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common);
void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common);
int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index eced87fa261c..9ac2ff05d501 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -624,9 +624,9 @@ static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port,
int ret;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
NL_SET_ERR_MSG_MOD(extack,
"Unsupported keys used");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index bfa81bbfce3f..26dc906eae90 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -3,7 +3,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 25e707d7b87c..4edb7963f856 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -12,7 +12,6 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include "cpsw.h"
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index f9cd566d1c9b..ca4d4548f85e 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -31,7 +31,7 @@
#include <linux/if_vlan.h>
#include <linux/kmemleak.h>
#include <linux/sys_soc.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index c61e4e44a78f..0e4f526b1753 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -30,7 +30,7 @@
#include <linux/sys_soc.h>
#include <net/switchdev.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
#include <net/pkt_cls.h>
#include <net/devlink.h>
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index e966dd47e2db..0ec85635dfd6 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -18,7 +18,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/skbuff.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
@@ -1396,9 +1396,9 @@ static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv,
int ret;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
NL_SET_ERR_MSG_MOD(extack,
"Unsupported keys used");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 34230145ca0b..0e27c433098d 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -6,6 +6,7 @@
#ifndef DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_
#define DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_
+#include <net/xdp.h>
#include <uapi/linux/bpf.h>
#include "davinci_cpdma.h"
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 23169e36a3d4..89b6d23e9937 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -23,7 +23,6 @@
#include <linux/pm_runtime.h>
#include <linux/davinci_emac.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/mdio-bitbang.h>
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
new file mode 100644
index 000000000000..4cf2a52e4378
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -0,0 +1,965 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver
+ *
+ * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/timekeeping.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+
+#include "icss_iep.h"
+
+#define IEP_MAX_DEF_INC 0xf
+#define IEP_MAX_COMPEN_INC 0xfff
+#define IEP_MAX_COMPEN_COUNT 0xffffff
+
+#define IEP_GLOBAL_CFG_CNT_ENABLE BIT(0)
+#define IEP_GLOBAL_CFG_DEFAULT_INC_MASK GENMASK(7, 4)
+#define IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT 4
+#define IEP_GLOBAL_CFG_COMPEN_INC_MASK GENMASK(19, 8)
+#define IEP_GLOBAL_CFG_COMPEN_INC_SHIFT 8
+
+#define IEP_GLOBAL_STATUS_CNT_OVF BIT(0)
+
+#define IEP_CMP_CFG_SHADOW_EN BIT(17)
+#define IEP_CMP_CFG_CMP0_RST_CNT_EN BIT(0)
+#define IEP_CMP_CFG_CMP_EN(cmp) (GENMASK(16, 1) & (1 << ((cmp) + 1)))
+
+#define IEP_CMP_STATUS(cmp) (1 << (cmp))
+
+#define IEP_SYNC_CTRL_SYNC_EN BIT(0)
+#define IEP_SYNC_CTRL_SYNC_N_EN(n) (GENMASK(2, 1) & (BIT(1) << (n)))
+
+#define IEP_MIN_CMP 0
+#define IEP_MAX_CMP 15
+
+#define ICSS_IEP_64BIT_COUNTER_SUPPORT BIT(0)
+#define ICSS_IEP_SLOW_COMPEN_REG_SUPPORT BIT(1)
+#define ICSS_IEP_SHADOW_MODE_SUPPORT BIT(2)
+
+#define LATCH_INDEX(ts_index) ((ts_index) + 6)
+#define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n) BIT(LATCH_INDEX(n))
+#define IEP_CAP_CFG_CAP_ASYNC_EN(n) BIT(LATCH_INDEX(n) + 10)
+
+enum {
+ ICSS_IEP_GLOBAL_CFG_REG,
+ ICSS_IEP_GLOBAL_STATUS_REG,
+ ICSS_IEP_COMPEN_REG,
+ ICSS_IEP_SLOW_COMPEN_REG,
+ ICSS_IEP_COUNT_REG0,
+ ICSS_IEP_COUNT_REG1,
+ ICSS_IEP_CAPTURE_CFG_REG,
+ ICSS_IEP_CAPTURE_STAT_REG,
+
+ ICSS_IEP_CAP6_RISE_REG0,
+ ICSS_IEP_CAP6_RISE_REG1,
+
+ ICSS_IEP_CAP7_RISE_REG0,
+ ICSS_IEP_CAP7_RISE_REG1,
+
+ ICSS_IEP_CMP_CFG_REG,
+ ICSS_IEP_CMP_STAT_REG,
+ ICSS_IEP_CMP0_REG0,
+ ICSS_IEP_CMP0_REG1,
+ ICSS_IEP_CMP1_REG0,
+ ICSS_IEP_CMP1_REG1,
+
+ ICSS_IEP_CMP8_REG0,
+ ICSS_IEP_CMP8_REG1,
+ ICSS_IEP_SYNC_CTRL_REG,
+ ICSS_IEP_SYNC0_STAT_REG,
+ ICSS_IEP_SYNC1_STAT_REG,
+ ICSS_IEP_SYNC_PWIDTH_REG,
+ ICSS_IEP_SYNC0_PERIOD_REG,
+ ICSS_IEP_SYNC1_DELAY_REG,
+ ICSS_IEP_SYNC_START_REG,
+ ICSS_IEP_MAX_REGS,
+};
+
+/**
+ * struct icss_iep_plat_data - Plat data to handle SoC variants
+ * @config: Regmap configuration data
+ * @reg_offs: register offsets to capture offset differences across SoCs
+ * @flags: Flags to represent IEP properties
+ */
+struct icss_iep_plat_data {
+ struct regmap_config *config;
+ u32 reg_offs[ICSS_IEP_MAX_REGS];
+ u32 flags;
+};
+
+struct icss_iep {
+ struct device *dev;
+ void __iomem *base;
+ const struct icss_iep_plat_data *plat_data;
+ struct regmap *map;
+ struct device_node *client_np;
+ unsigned long refclk_freq;
+ int clk_tick_time; /* one refclk tick time in ns */
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ struct mutex ptp_clk_mutex; /* PHC access serializer */
+ spinlock_t irq_lock; /* CMP IRQ vs icss_iep_ptp_enable access */
+ u32 def_inc;
+ s16 slow_cmp_inc;
+ u32 slow_cmp_count;
+ const struct icss_iep_clockops *ops;
+ void *clockops_data;
+ u32 cycle_time_ns;
+ u32 perout_enabled;
+ bool pps_enabled;
+ int cap_cmp_irq;
+ u64 period;
+ u32 latch_enable;
+};
+
+/**
+ * icss_iep_get_count_hi() - Get the upper 32 bit IEP counter
+ * @iep: Pointer to structure representing IEP.
+ *
+ * Return: upper 32 bit IEP counter
+ */
+int icss_iep_get_count_hi(struct icss_iep *iep)
+{
+ u32 val = 0;
+
+ if (iep && (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT))
+ val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(icss_iep_get_count_hi);
+
+/**
+ * icss_iep_get_count_low() - Get the lower 32 bit IEP counter
+ * @iep: Pointer to structure representing IEP.
+ *
+ * Return: lower 32 bit IEP counter
+ */
+int icss_iep_get_count_low(struct icss_iep *iep)
+{
+ u32 val = 0;
+
+ if (iep)
+ val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(icss_iep_get_count_low);
+
+/**
+ * icss_iep_get_ptp_clock_idx() - Get PTP clock index using IEP driver
+ * @iep: Pointer to structure representing IEP.
+ *
+ * Return: PTP clock index, -1 if not registered
+ */
+int icss_iep_get_ptp_clock_idx(struct icss_iep *iep)
+{
+ if (!iep || !iep->ptp_clock)
+ return -1;
+ return ptp_clock_index(iep->ptp_clock);
+}
+EXPORT_SYMBOL_GPL(icss_iep_get_ptp_clock_idx);
+
+static void icss_iep_set_counter(struct icss_iep *iep, u64 ns)
+{
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ writel(upper_32_bits(ns), iep->base +
+ iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
+ writel(upper_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
+}
+
+static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns);
+
+/**
+ * icss_iep_settime() - Set time of the PTP clock using IEP driver
+ * @iep: Pointer to structure representing IEP.
+ * @ns: Time to be set in nanoseconds
+ *
+ * This API uses writel() instead of regmap_write() for write operations as
+ * regmap_write() is too slow and this API is time sensitive.
+ */
+static void icss_iep_settime(struct icss_iep *iep, u64 ns)
+{
+ unsigned long flags;
+
+ if (iep->ops && iep->ops->settime) {
+ iep->ops->settime(iep->clockops_data, ns);
+ return;
+ }
+
+ spin_lock_irqsave(&iep->irq_lock, flags);
+ if (iep->pps_enabled || iep->perout_enabled)
+ writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
+
+ icss_iep_set_counter(iep, ns);
+
+ if (iep->pps_enabled || iep->perout_enabled) {
+ icss_iep_update_to_next_boundary(iep, ns);
+ writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN,
+ iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
+ }
+ spin_unlock_irqrestore(&iep->irq_lock, flags);
+}
+
+/**
+ * icss_iep_gettime() - Get time of the PTP clock using IEP driver
+ * @iep: Pointer to structure representing IEP.
+ * @sts: Pointer to structure representing PTP system timestamp.
+ *
+ * This API uses readl() instead of regmap_read() for read operations as
+ * regmap_read() is too slow and this API is time sensitive.
+ *
+ * Return: The current timestamp of the PTP clock using IEP driver
+ */
+static u64 icss_iep_gettime(struct icss_iep *iep,
+ struct ptp_system_timestamp *sts)
+{
+ u32 ts_hi = 0, ts_lo;
+ unsigned long flags;
+
+ if (iep->ops && iep->ops->gettime)
+ return iep->ops->gettime(iep->clockops_data, sts);
+
+ /* use local_irq_x() to make it work for both RT/non-RT */
+ local_irq_save(flags);
+
+ /* no need to play with hi-lo, hi is latched when lo is read */
+ ptp_read_system_prets(sts);
+ ts_lo = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
+ ptp_read_system_postts(sts);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ ts_hi = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
+
+ local_irq_restore(flags);
+
+ return (u64)ts_lo | (u64)ts_hi << 32;
+}
+
+static void icss_iep_enable(struct icss_iep *iep)
+{
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
+ IEP_GLOBAL_CFG_CNT_ENABLE,
+ IEP_GLOBAL_CFG_CNT_ENABLE);
+}
+
+static void icss_iep_disable(struct icss_iep *iep)
+{
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
+ IEP_GLOBAL_CFG_CNT_ENABLE,
+ 0);
+}
+
+static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
+{
+ u32 cycle_time;
+ int cmp;
+
+ cycle_time = iep->cycle_time_ns - iep->def_inc;
+
+ icss_iep_disable(iep);
+
+ /* disable shadow mode */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_SHADOW_EN, 0);
+
+ /* enable shadow mode */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_SHADOW_EN, IEP_CMP_CFG_SHADOW_EN);
+
+ /* clear counters */
+ icss_iep_set_counter(iep, 0);
+
+ /* clear overflow status */
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_STATUS_REG,
+ IEP_GLOBAL_STATUS_CNT_OVF,
+ IEP_GLOBAL_STATUS_CNT_OVF);
+
+ /* clear compare status */
+ for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
+ IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
+ }
+
+ /* enable reset counter on CMP0 event */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP0_RST_CNT_EN,
+ IEP_CMP_CFG_CMP0_RST_CNT_EN);
+ /* enable compare */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(0),
+ IEP_CMP_CFG_CMP_EN(0));
+
+ /* set CMP0 value to cycle time */
+ regmap_write(iep->map, ICSS_IEP_CMP0_REG0, cycle_time);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP0_REG1, cycle_time);
+
+ icss_iep_set_counter(iep, 0);
+ icss_iep_enable(iep);
+}
+
+static void icss_iep_set_default_inc(struct icss_iep *iep, u8 def_inc)
+{
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
+ IEP_GLOBAL_CFG_DEFAULT_INC_MASK,
+ def_inc << IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT);
+}
+
+static void icss_iep_set_compensation_inc(struct icss_iep *iep, u16 compen_inc)
+{
+ struct device *dev = regmap_get_device(iep->map);
+
+ if (compen_inc > IEP_MAX_COMPEN_INC) {
+ dev_err(dev, "%s: too high compensation inc %d\n",
+ __func__, compen_inc);
+ compen_inc = IEP_MAX_COMPEN_INC;
+ }
+
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
+ IEP_GLOBAL_CFG_COMPEN_INC_MASK,
+ compen_inc << IEP_GLOBAL_CFG_COMPEN_INC_SHIFT);
+}
+
+static void icss_iep_set_compensation_count(struct icss_iep *iep,
+ u32 compen_count)
+{
+ struct device *dev = regmap_get_device(iep->map);
+
+ if (compen_count > IEP_MAX_COMPEN_COUNT) {
+ dev_err(dev, "%s: too high compensation count %d\n",
+ __func__, compen_count);
+ compen_count = IEP_MAX_COMPEN_COUNT;
+ }
+
+ regmap_write(iep->map, ICSS_IEP_COMPEN_REG, compen_count);
+}
+
+static void icss_iep_set_slow_compensation_count(struct icss_iep *iep,
+ u32 compen_count)
+{
+ regmap_write(iep->map, ICSS_IEP_SLOW_COMPEN_REG, compen_count);
+}
+
+/* PTP PHC operations */
+static int icss_iep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+ u32 cyc_count;
+ u16 cmp_inc;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ /* ppb is amount of frequency we want to adjust in 1GHz (billion)
+ * e.g. 100ppb means we need to speed up clock by 100Hz
+ * i.e. at end of 1 second (1 billion ns) clock time, we should be
+ * counting 100 more ns.
+ * We use IEP slow compensation to achieve continuous freq. adjustment.
+ * There are 2 parts. Cycle time and adjustment per cycle.
+ * Simplest case would be 1 sec Cycle time. Then adjustment
+ * pre cycle would be (def_inc + ppb) value.
+ * Cycle time will have to be chosen based on how worse the ppb is.
+ * e.g. smaller the ppb, cycle time has to be large.
+ * The minimum adjustment we can do is +-1ns per cycle so let's
+ * reduce the cycle time to get 1ns per cycle adjustment.
+ * 1ppb = 1sec cycle time & 1ns adjust
+ * 1000ppb = 1/1000 cycle time & 1ns adjust per cycle
+ */
+
+ if (iep->cycle_time_ns)
+ iep->slow_cmp_inc = iep->clk_tick_time; /* 4ns adj per cycle */
+ else
+ iep->slow_cmp_inc = 1; /* 1ns adjust per cycle */
+
+ if (ppb < 0) {
+ iep->slow_cmp_inc = -iep->slow_cmp_inc;
+ ppb = -ppb;
+ }
+
+ cyc_count = NSEC_PER_SEC; /* 1s cycle time @1GHz */
+ cyc_count /= ppb; /* cycle time per ppb */
+
+ /* slow_cmp_count is decremented every clock cycle, e.g. @250MHz */
+ if (!iep->cycle_time_ns)
+ cyc_count /= iep->clk_tick_time;
+ iep->slow_cmp_count = cyc_count;
+
+ /* iep->clk_tick_time is def_inc */
+ cmp_inc = iep->clk_tick_time + iep->slow_cmp_inc;
+ icss_iep_set_compensation_inc(iep, cmp_inc);
+ icss_iep_set_slow_compensation_count(iep, iep->slow_cmp_count);
+
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int icss_iep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+ s64 ns;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+ if (iep->ops && iep->ops->adjtime) {
+ iep->ops->adjtime(iep->clockops_data, delta);
+ } else {
+ ns = icss_iep_gettime(iep, NULL);
+ ns += delta;
+ icss_iep_settime(iep, ns);
+ }
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int icss_iep_ptp_gettimeex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+ u64 ns;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+ ns = icss_iep_gettime(iep, sts);
+ *ts = ns_to_timespec64(ns);
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int icss_iep_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+ u64 ns;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+ ns = timespec64_to_ns(ts);
+ icss_iep_settime(iep, ns);
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return 0;
+}
+
+static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
+{
+ u64 ns, p_ns;
+ u32 offset;
+
+ ns = icss_iep_gettime(iep, NULL);
+ if (start_ns < ns)
+ start_ns = ns;
+ p_ns = iep->period;
+ /* Round up to next period boundary */
+ start_ns += p_ns - 1;
+ offset = do_div(start_ns, p_ns);
+ start_ns = start_ns * p_ns;
+ /* If it is too close to update, shift to next boundary */
+ if (p_ns - offset < 10)
+ start_ns += p_ns;
+
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(start_ns));
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(start_ns));
+}
+
+static int icss_iep_perout_enable_hw(struct icss_iep *iep,
+ struct ptp_perout_request *req, int on)
+{
+ int ret;
+ u64 cmp;
+
+ if (iep->ops && iep->ops->perout_enable) {
+ ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
+ if (ret)
+ return ret;
+
+ if (on) {
+ /* Configure CMP */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
+ /* Configure SYNC, 1ms pulse width */
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000);
+ regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
+ /* Enable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
+ } else {
+ /* Disable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), 0);
+
+ /* clear regs */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
+ }
+ } else {
+ if (on) {
+ u64 start_ns;
+
+ iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
+ req->period.nsec;
+ start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
+ + req->period.nsec;
+ icss_iep_update_to_next_boundary(iep, start_ns);
+
+ /* Enable Sync in single shot mode */
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
+ IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
+ /* Enable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
+ } else {
+ /* Disable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), 0);
+
+ /* clear CMP regs */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
+
+ /* Disable sync */
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
+ }
+ }
+
+ return 0;
+}
+
+static int icss_iep_perout_enable(struct icss_iep *iep,
+ struct ptp_perout_request *req, int on)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ if (iep->pps_enabled) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (iep->perout_enabled == !!on)
+ goto exit;
+
+ spin_lock_irqsave(&iep->irq_lock, flags);
+ ret = icss_iep_perout_enable_hw(iep, req, on);
+ if (!ret)
+ iep->perout_enabled = !!on;
+ spin_unlock_irqrestore(&iep->irq_lock, flags);
+
+exit:
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return ret;
+}
+
+static int icss_iep_pps_enable(struct icss_iep *iep, int on)
+{
+ struct ptp_clock_request rq;
+ struct timespec64 ts;
+ unsigned long flags;
+ int ret = 0;
+ u64 ns;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ if (iep->perout_enabled) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (iep->pps_enabled == !!on)
+ goto exit;
+
+ spin_lock_irqsave(&iep->irq_lock, flags);
+
+ rq.perout.index = 0;
+ if (on) {
+ ns = icss_iep_gettime(iep, NULL);
+ ts = ns_to_timespec64(ns);
+ rq.perout.period.sec = 1;
+ rq.perout.period.nsec = 0;
+ rq.perout.start.sec = ts.tv_sec + 2;
+ rq.perout.start.nsec = 0;
+ ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
+ } else {
+ ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
+ }
+
+ if (!ret)
+ iep->pps_enabled = !!on;
+
+ spin_unlock_irqrestore(&iep->irq_lock, flags);
+
+exit:
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return ret;
+}
+
+static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
+{
+ u32 val, cap, ret = 0;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ if (iep->ops && iep->ops->extts_enable) {
+ ret = iep->ops->extts_enable(iep->clockops_data, index, on);
+ goto exit;
+ }
+
+ if (((iep->latch_enable & BIT(index)) >> index) == on)
+ goto exit;
+
+ regmap_read(iep->map, ICSS_IEP_CAPTURE_CFG_REG, &val);
+ cap = IEP_CAP_CFG_CAP_ASYNC_EN(index) | IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(index);
+ if (on) {
+ val |= cap;
+ iep->latch_enable |= BIT(index);
+ } else {
+ val &= ~cap;
+ iep->latch_enable &= ~BIT(index);
+ }
+ regmap_write(iep->map, ICSS_IEP_CAPTURE_CFG_REG, val);
+
+exit:
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return ret;
+}
+
+static int icss_iep_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ return icss_iep_perout_enable(iep, &rq->perout, on);
+ case PTP_CLK_REQ_PPS:
+ return icss_iep_pps_enable(iep, on);
+ case PTP_CLK_REQ_EXTTS:
+ return icss_iep_extts_enable(iep, rq->extts.index, on);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info icss_iep_ptp_info = {
+ .owner = THIS_MODULE,
+ .name = "ICSS IEP timer",
+ .max_adj = 10000000,
+ .adjfine = icss_iep_ptp_adjfine,
+ .adjtime = icss_iep_ptp_adjtime,
+ .gettimex64 = icss_iep_ptp_gettimeex,
+ .settime64 = icss_iep_ptp_settime,
+ .enable = icss_iep_ptp_enable,
+};
+
+struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
+{
+ struct platform_device *pdev;
+ struct device_node *iep_np;
+ struct icss_iep *iep;
+
+ iep_np = of_parse_phandle(np, "ti,iep", idx);
+ if (!iep_np || !of_device_is_available(iep_np))
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(iep_np);
+ of_node_put(iep_np);
+
+ if (!pdev)
+ /* probably IEP not yet probed */
+ return ERR_PTR(-EPROBE_DEFER);
+
+ iep = platform_get_drvdata(pdev);
+ if (!iep)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ device_lock(iep->dev);
+ if (iep->client_np) {
+ device_unlock(iep->dev);
+ dev_err(iep->dev, "IEP is already acquired by %s",
+ iep->client_np->name);
+ return ERR_PTR(-EBUSY);
+ }
+ iep->client_np = np;
+ device_unlock(iep->dev);
+ get_device(iep->dev);
+
+ return iep;
+}
+EXPORT_SYMBOL_GPL(icss_iep_get_idx);
+
+struct icss_iep *icss_iep_get(struct device_node *np)
+{
+ return icss_iep_get_idx(np, 0);
+}
+EXPORT_SYMBOL_GPL(icss_iep_get);
+
+void icss_iep_put(struct icss_iep *iep)
+{
+ device_lock(iep->dev);
+ iep->client_np = NULL;
+ device_unlock(iep->dev);
+ put_device(iep->dev);
+}
+EXPORT_SYMBOL_GPL(icss_iep_put);
+
+void icss_iep_init_fw(struct icss_iep *iep)
+{
+ /* start IEP for FW use in raw 64bit mode, no PTP support */
+ iep->clk_tick_time = iep->def_inc;
+ iep->cycle_time_ns = 0;
+ iep->ops = NULL;
+ iep->clockops_data = NULL;
+ icss_iep_set_default_inc(iep, iep->def_inc);
+ icss_iep_set_compensation_inc(iep, iep->def_inc);
+ icss_iep_set_compensation_count(iep, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
+ regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
+ if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
+ icss_iep_set_slow_compensation_count(iep, 0);
+
+ icss_iep_enable(iep);
+ icss_iep_settime(iep, 0);
+}
+EXPORT_SYMBOL_GPL(icss_iep_init_fw);
+
+void icss_iep_exit_fw(struct icss_iep *iep)
+{
+ icss_iep_disable(iep);
+}
+EXPORT_SYMBOL_GPL(icss_iep_exit_fw);
+
+int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
+ void *clockops_data, u32 cycle_time_ns)
+{
+ int ret = 0;
+
+ iep->cycle_time_ns = cycle_time_ns;
+ iep->clk_tick_time = iep->def_inc;
+ iep->ops = clkops;
+ iep->clockops_data = clockops_data;
+ icss_iep_set_default_inc(iep, iep->def_inc);
+ icss_iep_set_compensation_inc(iep, iep->def_inc);
+ icss_iep_set_compensation_count(iep, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
+ regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
+ if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
+ icss_iep_set_slow_compensation_count(iep, 0);
+
+ if (!(iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) ||
+ !(iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT))
+ goto skip_perout;
+
+ if (iep->ops && iep->ops->perout_enable) {
+ iep->ptp_info.n_per_out = 1;
+ iep->ptp_info.pps = 1;
+ }
+
+ if (iep->ops && iep->ops->extts_enable)
+ iep->ptp_info.n_ext_ts = 2;
+
+skip_perout:
+ if (cycle_time_ns)
+ icss_iep_enable_shadow_mode(iep);
+ else
+ icss_iep_enable(iep);
+ icss_iep_settime(iep, ktime_get_real_ns());
+
+ iep->ptp_clock = ptp_clock_register(&iep->ptp_info, iep->dev);
+ if (IS_ERR(iep->ptp_clock)) {
+ ret = PTR_ERR(iep->ptp_clock);
+ iep->ptp_clock = NULL;
+ dev_err(iep->dev, "Failed to register ptp clk %d\n", ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icss_iep_init);
+
+int icss_iep_exit(struct icss_iep *iep)
+{
+ if (iep->ptp_clock) {
+ ptp_clock_unregister(iep->ptp_clock);
+ iep->ptp_clock = NULL;
+ }
+ icss_iep_disable(iep);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icss_iep_exit);
+
+static int icss_iep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct icss_iep *iep;
+ struct clk *iep_clk;
+
+ iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL);
+ if (!iep)
+ return -ENOMEM;
+
+ iep->dev = dev;
+ iep->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(iep->base))
+ return -ENODEV;
+
+ iep_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(iep_clk))
+ return PTR_ERR(iep_clk);
+
+ iep->refclk_freq = clk_get_rate(iep_clk);
+
+ iep->def_inc = NSEC_PER_SEC / iep->refclk_freq; /* ns per clock tick */
+ if (iep->def_inc > IEP_MAX_DEF_INC) {
+ dev_err(dev, "Failed to set def_inc %d. IEP_clock is too slow to be supported\n",
+ iep->def_inc);
+ return -EINVAL;
+ }
+
+ iep->plat_data = device_get_match_data(dev);
+ if (!iep->plat_data)
+ return -EINVAL;
+
+ iep->map = devm_regmap_init(dev, NULL, iep, iep->plat_data->config);
+ if (IS_ERR(iep->map)) {
+ dev_err(dev, "Failed to create regmap for IEP %ld\n",
+ PTR_ERR(iep->map));
+ return PTR_ERR(iep->map);
+ }
+
+ iep->ptp_info = icss_iep_ptp_info;
+ mutex_init(&iep->ptp_clk_mutex);
+ spin_lock_init(&iep->irq_lock);
+ dev_set_drvdata(dev, iep);
+ icss_iep_disable(iep);
+
+ return 0;
+}
+
+static bool am654_icss_iep_valid_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_SYNC_START_REG:
+ return true;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static int icss_iep_regmap_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct icss_iep *iep = context;
+
+ writel(val, iep->base + iep->plat_data->reg_offs[reg]);
+
+ return 0;
+}
+
+static int icss_iep_regmap_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct icss_iep *iep = context;
+
+ *val = readl(iep->base + iep->plat_data->reg_offs[reg]);
+
+ return 0;
+}
+
+static struct regmap_config am654_icss_iep_regmap_config = {
+ .name = "icss iep",
+ .reg_stride = 1,
+ .reg_write = icss_iep_regmap_write,
+ .reg_read = icss_iep_regmap_read,
+ .writeable_reg = am654_icss_iep_valid_reg,
+ .readable_reg = am654_icss_iep_valid_reg,
+ .fast_io = 1,
+};
+
+static const struct icss_iep_plat_data am654_icss_iep_plat_data = {
+ .flags = ICSS_IEP_64BIT_COUNTER_SUPPORT |
+ ICSS_IEP_SLOW_COMPEN_REG_SUPPORT |
+ ICSS_IEP_SHADOW_MODE_SUPPORT,
+ .reg_offs = {
+ [ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
+ [ICSS_IEP_COMPEN_REG] = 0x08,
+ [ICSS_IEP_SLOW_COMPEN_REG] = 0x0C,
+ [ICSS_IEP_COUNT_REG0] = 0x10,
+ [ICSS_IEP_COUNT_REG1] = 0x14,
+ [ICSS_IEP_CAPTURE_CFG_REG] = 0x18,
+ [ICSS_IEP_CAPTURE_STAT_REG] = 0x1c,
+
+ [ICSS_IEP_CAP6_RISE_REG0] = 0x50,
+ [ICSS_IEP_CAP6_RISE_REG1] = 0x54,
+
+ [ICSS_IEP_CAP7_RISE_REG0] = 0x60,
+ [ICSS_IEP_CAP7_RISE_REG1] = 0x64,
+
+ [ICSS_IEP_CMP_CFG_REG] = 0x70,
+ [ICSS_IEP_CMP_STAT_REG] = 0x74,
+ [ICSS_IEP_CMP0_REG0] = 0x78,
+ [ICSS_IEP_CMP0_REG1] = 0x7c,
+ [ICSS_IEP_CMP1_REG0] = 0x80,
+ [ICSS_IEP_CMP1_REG1] = 0x84,
+
+ [ICSS_IEP_CMP8_REG0] = 0xc0,
+ [ICSS_IEP_CMP8_REG1] = 0xc4,
+ [ICSS_IEP_SYNC_CTRL_REG] = 0x180,
+ [ICSS_IEP_SYNC0_STAT_REG] = 0x188,
+ [ICSS_IEP_SYNC1_STAT_REG] = 0x18c,
+ [ICSS_IEP_SYNC_PWIDTH_REG] = 0x190,
+ [ICSS_IEP_SYNC0_PERIOD_REG] = 0x194,
+ [ICSS_IEP_SYNC1_DELAY_REG] = 0x198,
+ [ICSS_IEP_SYNC_START_REG] = 0x19c,
+ },
+ .config = &am654_icss_iep_regmap_config,
+};
+
+static const struct of_device_id icss_iep_of_match[] = {
+ {
+ .compatible = "ti,am654-icss-iep",
+ .data = &am654_icss_iep_plat_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, icss_iep_of_match);
+
+static struct platform_driver icss_iep_driver = {
+ .driver = {
+ .name = "icss-iep",
+ .of_match_table = icss_iep_of_match,
+ },
+ .probe = icss_iep_probe,
+};
+module_platform_driver(icss_iep_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI ICSS IEP driver");
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.h b/drivers/net/ethernet/ti/icssg/icss_iep.h
new file mode 100644
index 000000000000..803a4b714893
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver
+ *
+ * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#ifndef __NET_TI_ICSS_IEP_H
+#define __NET_TI_ICSS_IEP_H
+
+#include <linux/mutex.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/regmap.h>
+
+struct icss_iep;
+extern const struct icss_iep_clockops prueth_iep_clockops;
+
+/* Firmware specific clock operations */
+struct icss_iep_clockops {
+ void (*settime)(void *clockops_data, u64 ns);
+ void (*adjtime)(void *clockops_data, s64 delta);
+ u64 (*gettime)(void *clockops_data, struct ptp_system_timestamp *sts);
+ int (*perout_enable)(void *clockops_data,
+ struct ptp_perout_request *req, int on,
+ u64 *cmp);
+ int (*extts_enable)(void *clockops_data, u32 index, int on);
+};
+
+struct icss_iep *icss_iep_get(struct device_node *np);
+struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx);
+void icss_iep_put(struct icss_iep *iep);
+int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
+ void *clockops_data, u32 cycle_time_ns);
+int icss_iep_exit(struct icss_iep *iep);
+int icss_iep_get_count_low(struct icss_iep *iep);
+int icss_iep_get_count_hi(struct icss_iep *iep);
+int icss_iep_get_ptp_clock_idx(struct icss_iep *iep);
+void icss_iep_init_fw(struct icss_iep *iep);
+void icss_iep_exit_fw(struct icss_iep *iep);
+
+#endif /* __NET_TI_ICSS_IEP_H */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_classifier.c b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
new file mode 100644
index 000000000000..6df53ab17fbc
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments ICSSG Ethernet Driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/regmap.h>
+
+#include "icssg_prueth.h"
+
+#define ICSSG_NUM_CLASSIFIERS 16
+#define ICSSG_NUM_FT1_SLOTS 8
+#define ICSSG_NUM_FT3_SLOTS 16
+
+#define ICSSG_NUM_CLASSIFIERS_IN_USE 5
+
+/* Filter 1 - FT1 */
+#define FT1_NUM_SLOTS 8
+#define FT1_SLOT_SIZE 0x10 /* bytes */
+
+/* offsets from FT1 slot base i.e. slot 1 start */
+#define FT1_DA0 0x0
+#define FT1_DA1 0x4
+#define FT1_DA0_MASK 0x8
+#define FT1_DA1_MASK 0xc
+
+#define FT1_N_REG(slize, n, reg) \
+ (offs[slice].ft1_slot_base + FT1_SLOT_SIZE * (n) + (reg))
+
+#define FT1_LEN_MASK GENMASK(19, 16)
+#define FT1_LEN_SHIFT 16
+#define FT1_LEN(len) (((len) << FT1_LEN_SHIFT) & FT1_LEN_MASK)
+#define FT1_START_MASK GENMASK(14, 0)
+#define FT1_START(start) ((start) & FT1_START_MASK)
+#define FT1_MATCH_SLOT(n) (GENMASK(23, 16) & (BIT(n) << 16))
+
+/* FT1 config type */
+enum ft1_cfg_type {
+ FT1_CFG_TYPE_DISABLED = 0,
+ FT1_CFG_TYPE_EQ,
+ FT1_CFG_TYPE_GT,
+ FT1_CFG_TYPE_LT,
+};
+
+#define FT1_CFG_SHIFT(n) (2 * (n))
+#define FT1_CFG_MASK(n) (0x3 << FT1_CFG_SHIFT((n)))
+
+/* Filter 3 - FT3 */
+#define FT3_NUM_SLOTS 16
+#define FT3_SLOT_SIZE 0x20 /* bytes */
+
+/* offsets from FT3 slot n's base */
+#define FT3_START 0
+#define FT3_START_AUTO 0x4
+#define FT3_START_OFFSET 0x8
+#define FT3_JUMP_OFFSET 0xc
+#define FT3_LEN 0x10
+#define FT3_CFG 0x14
+#define FT3_T 0x18
+#define FT3_T_MASK 0x1c
+
+#define FT3_N_REG(slize, n, reg) \
+ (offs[slice].ft3_slot_base + FT3_SLOT_SIZE * (n) + (reg))
+
+/* offsets from rx_class n's base */
+#define RX_CLASS_AND_EN 0
+#define RX_CLASS_OR_EN 0x4
+#define RX_CLASS_NUM_SLOTS 16
+#define RX_CLASS_EN_SIZE 0x8 /* bytes */
+
+#define RX_CLASS_N_REG(slice, n, reg) \
+ (offs[slice].rx_class_base + RX_CLASS_EN_SIZE * (n) + (reg))
+
+/* RX Class Gates */
+#define RX_CLASS_GATES_SIZE 0x4 /* bytes */
+
+#define RX_CLASS_GATES_N_REG(slice, n) \
+ (offs[slice].rx_class_gates_base + RX_CLASS_GATES_SIZE * (n))
+
+#define RX_CLASS_GATES_ALLOW_MASK BIT(6)
+#define RX_CLASS_GATES_RAW_MASK BIT(5)
+#define RX_CLASS_GATES_PHASE_MASK BIT(4)
+
+/* RX Class traffic data matching bits */
+#define RX_CLASS_FT_UC BIT(31)
+#define RX_CLASS_FT_MC BIT(30)
+#define RX_CLASS_FT_BC BIT(29)
+#define RX_CLASS_FT_FW BIT(28)
+#define RX_CLASS_FT_RCV BIT(27)
+#define RX_CLASS_FT_VLAN BIT(26)
+#define RX_CLASS_FT_DA_P BIT(25)
+#define RX_CLASS_FT_DA_I BIT(24)
+#define RX_CLASS_FT_FT1_MATCH_MASK GENMASK(23, 16)
+#define RX_CLASS_FT_FT1_MATCH_SHIFT 16
+#define RX_CLASS_FT_FT3_MATCH_MASK GENMASK(15, 0)
+#define RX_CLASS_FT_FT3_MATCH_SHIFT 0
+
+#define RX_CLASS_FT_FT1_MATCH(slot) \
+ ((BIT(slot) << RX_CLASS_FT_FT1_MATCH_SHIFT) & \
+ RX_CLASS_FT_FT1_MATCH_MASK)
+
+/* RX class type */
+enum rx_class_sel_type {
+ RX_CLASS_SEL_TYPE_OR = 0,
+ RX_CLASS_SEL_TYPE_AND = 1,
+ RX_CLASS_SEL_TYPE_OR_AND_AND = 2,
+ RX_CLASS_SEL_TYPE_OR_OR_AND = 3,
+};
+
+#define FT1_CFG_SHIFT(n) (2 * (n))
+#define FT1_CFG_MASK(n) (0x3 << FT1_CFG_SHIFT((n)))
+
+#define RX_CLASS_SEL_SHIFT(n) (2 * (n))
+#define RX_CLASS_SEL_MASK(n) (0x3 << RX_CLASS_SEL_SHIFT((n)))
+
+#define ICSSG_CFG_OFFSET 0
+#define MAC_INTERFACE_0 0x18
+#define MAC_INTERFACE_1 0x1c
+
+#define ICSSG_CFG_RX_L2_G_EN BIT(2)
+
+/* These are register offsets per PRU */
+struct miig_rt_offsets {
+ u32 mac0;
+ u32 mac1;
+ u32 ft1_start_len;
+ u32 ft1_cfg;
+ u32 ft1_slot_base;
+ u32 ft3_slot_base;
+ u32 ft3_p_base;
+ u32 ft_rx_ptr;
+ u32 rx_class_base;
+ u32 rx_class_cfg1;
+ u32 rx_class_cfg2;
+ u32 rx_class_gates_base;
+ u32 rx_green;
+ u32 rx_rate_cfg_base;
+ u32 rx_rate_src_sel0;
+ u32 rx_rate_src_sel1;
+ u32 tx_rate_cfg_base;
+ u32 stat_base;
+ u32 tx_hsr_tag;
+ u32 tx_hsr_seq;
+ u32 tx_vlan_type;
+ u32 tx_vlan_ins;
+};
+
+/* These are the offset values for miig_rt_offsets registers */
+static const struct miig_rt_offsets offs[] = {
+ /* PRU0 */
+ {
+ 0x8,
+ 0xc,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x108,
+ 0x308,
+ 0x408,
+ 0x40c,
+ 0x48c,
+ 0x490,
+ 0x494,
+ 0x4d4,
+ 0x4e4,
+ 0x504,
+ 0x508,
+ 0x50c,
+ 0x54c,
+ 0x63c,
+ 0x640,
+ 0x644,
+ 0x648,
+ },
+ /* PRU1 */
+ {
+ 0x10,
+ 0x14,
+ 0x64c,
+ 0x650,
+ 0x654,
+ 0x6d4,
+ 0x8d4,
+ 0x9d4,
+ 0x9d8,
+ 0xa58,
+ 0xa5c,
+ 0xa60,
+ 0xaa0,
+ 0xab0,
+ 0xad0,
+ 0xad4,
+ 0xad8,
+ 0xb18,
+ 0xc08,
+ 0xc0c,
+ 0xc10,
+ 0xc14,
+ },
+};
+
+static void rx_class_ft1_set_start_len(struct regmap *miig_rt, int slice,
+ u16 start, u8 len)
+{
+ u32 offset, val;
+
+ offset = offs[slice].ft1_start_len;
+ val = FT1_LEN(len) | FT1_START(start);
+ regmap_write(miig_rt, offset, val);
+}
+
+static void rx_class_ft1_set_da(struct regmap *miig_rt, int slice,
+ int n, const u8 *addr)
+{
+ u32 offset;
+
+ offset = FT1_N_REG(slice, n, FT1_DA0);
+ regmap_write(miig_rt, offset, (u32)(addr[0] | addr[1] << 8 |
+ addr[2] << 16 | addr[3] << 24));
+ offset = FT1_N_REG(slice, n, FT1_DA1);
+ regmap_write(miig_rt, offset, (u32)(addr[4] | addr[5] << 8));
+}
+
+static void rx_class_ft1_set_da_mask(struct regmap *miig_rt, int slice,
+ int n, const u8 *addr)
+{
+ u32 offset;
+
+ offset = FT1_N_REG(slice, n, FT1_DA0_MASK);
+ regmap_write(miig_rt, offset, (u32)(addr[0] | addr[1] << 8 |
+ addr[2] << 16 | addr[3] << 24));
+ offset = FT1_N_REG(slice, n, FT1_DA1_MASK);
+ regmap_write(miig_rt, offset, (u32)(addr[4] | addr[5] << 8));
+}
+
+static void rx_class_ft1_cfg_set_type(struct regmap *miig_rt, int slice, int n,
+ enum ft1_cfg_type type)
+{
+ u32 offset;
+
+ offset = offs[slice].ft1_cfg;
+ regmap_update_bits(miig_rt, offset, FT1_CFG_MASK(n),
+ type << FT1_CFG_SHIFT(n));
+}
+
+static void rx_class_sel_set_type(struct regmap *miig_rt, int slice, int n,
+ enum rx_class_sel_type type)
+{
+ u32 offset;
+
+ offset = offs[slice].rx_class_cfg1;
+ regmap_update_bits(miig_rt, offset, RX_CLASS_SEL_MASK(n),
+ type << RX_CLASS_SEL_SHIFT(n));
+}
+
+static void rx_class_set_and(struct regmap *miig_rt, int slice, int n,
+ u32 data)
+{
+ u32 offset;
+
+ offset = RX_CLASS_N_REG(slice, n, RX_CLASS_AND_EN);
+ regmap_write(miig_rt, offset, data);
+}
+
+static void rx_class_set_or(struct regmap *miig_rt, int slice, int n,
+ u32 data)
+{
+ u32 offset;
+
+ offset = RX_CLASS_N_REG(slice, n, RX_CLASS_OR_EN);
+ regmap_write(miig_rt, offset, data);
+}
+
+void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac)
+{
+ regmap_write(miig_rt, MAC_INTERFACE_0, (u32)(mac[0] | mac[1] << 8 |
+ mac[2] << 16 | mac[3] << 24));
+ regmap_write(miig_rt, MAC_INTERFACE_1, (u32)(mac[4] | mac[5] << 8));
+}
+
+void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac)
+{
+ regmap_write(miig_rt, offs[slice].mac0, (u32)(mac[0] | mac[1] << 8 |
+ mac[2] << 16 | mac[3] << 24));
+ regmap_write(miig_rt, offs[slice].mac1, (u32)(mac[4] | mac[5] << 8));
+}
+
+/* disable all RX traffic */
+void icssg_class_disable(struct regmap *miig_rt, int slice)
+{
+ u32 data, offset;
+ int n;
+
+ /* Enable RX_L2_G */
+ regmap_update_bits(miig_rt, ICSSG_CFG_OFFSET, ICSSG_CFG_RX_L2_G_EN,
+ ICSSG_CFG_RX_L2_G_EN);
+
+ for (n = 0; n < ICSSG_NUM_CLASSIFIERS; n++) {
+ /* AND_EN = 0 */
+ rx_class_set_and(miig_rt, slice, n, 0);
+ /* OR_EN = 0 */
+ rx_class_set_or(miig_rt, slice, n, 0);
+
+ /* set CFG1 to OR */
+ rx_class_sel_set_type(miig_rt, slice, n, RX_CLASS_SEL_TYPE_OR);
+
+ /* configure gate */
+ offset = RX_CLASS_GATES_N_REG(slice, n);
+ regmap_read(miig_rt, offset, &data);
+ /* clear class_raw so we go through filters */
+ data &= ~RX_CLASS_GATES_RAW_MASK;
+ /* set allow and phase mask */
+ data |= RX_CLASS_GATES_ALLOW_MASK | RX_CLASS_GATES_PHASE_MASK;
+ regmap_write(miig_rt, offset, data);
+ }
+
+ /* FT1 Disabled */
+ for (n = 0; n < ICSSG_NUM_FT1_SLOTS; n++) {
+ const u8 addr[] = { 0, 0, 0, 0, 0, 0, };
+
+ rx_class_ft1_cfg_set_type(miig_rt, slice, n,
+ FT1_CFG_TYPE_DISABLED);
+ rx_class_ft1_set_da(miig_rt, slice, n, addr);
+ rx_class_ft1_set_da_mask(miig_rt, slice, n, addr);
+ }
+
+ /* clear CFG2 */
+ regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
+}
+
+void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti)
+{
+ u32 data;
+
+ /* defaults */
+ icssg_class_disable(miig_rt, slice);
+
+ /* Setup Classifier */
+ /* match on Broadcast or MAC_PRU address */
+ data = RX_CLASS_FT_BC | RX_CLASS_FT_DA_P;
+
+ /* multicast */
+ if (allmulti)
+ data |= RX_CLASS_FT_MC;
+
+ rx_class_set_or(miig_rt, slice, 0, data);
+
+ /* set CFG1 for OR_OR_AND for classifier */
+ rx_class_sel_set_type(miig_rt, slice, 0, RX_CLASS_SEL_TYPE_OR_OR_AND);
+
+ /* clear CFG2 */
+ regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
+}
+
+/* required for SAV check */
+void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr)
+{
+ const u8 mask_addr[] = { 0, 0, 0, 0, 0, 0, };
+
+ rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
+ rx_class_ft1_set_da(miig_rt, slice, 0, mac_addr);
+ rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr);
+ rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ);
+}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
new file mode 100644
index 000000000000..933b84666574
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0
+/* ICSSG Ethernet driver
+ *
+ * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/iopoll.h>
+#include <linux/regmap.h>
+#include <uapi/linux/if_ether.h>
+#include "icssg_config.h"
+#include "icssg_prueth.h"
+#include "icssg_switch_map.h"
+#include "icssg_mii_rt.h"
+
+/* TX IPG Values to be set for 100M link speed. These values are
+ * in ocp_clk cycles. So need change if ocp_clk is changed for a specific
+ * h/w design.
+ */
+
+/* IPG is in core_clk cycles */
+#define MII_RT_TX_IPG_100M 0x17
+#define MII_RT_TX_IPG_1G 0xb
+
+#define ICSSG_QUEUES_MAX 64
+#define ICSSG_QUEUE_OFFSET 0xd00
+#define ICSSG_QUEUE_PEEK_OFFSET 0xe00
+#define ICSSG_QUEUE_CNT_OFFSET 0xe40
+#define ICSSG_QUEUE_RESET_OFFSET 0xf40
+
+#define ICSSG_NUM_TX_QUEUES 8
+
+#define RECYCLE_Q_SLICE0 16
+#define RECYCLE_Q_SLICE1 17
+
+#define ICSSG_NUM_OTHER_QUEUES 5 /* port, host and special queues */
+
+#define PORT_HI_Q_SLICE0 32
+#define PORT_LO_Q_SLICE0 33
+#define HOST_HI_Q_SLICE0 34
+#define HOST_LO_Q_SLICE0 35
+#define HOST_SPL_Q_SLICE0 40 /* Special Queue */
+
+#define PORT_HI_Q_SLICE1 36
+#define PORT_LO_Q_SLICE1 37
+#define HOST_HI_Q_SLICE1 38
+#define HOST_LO_Q_SLICE1 39
+#define HOST_SPL_Q_SLICE1 41 /* Special Queue */
+
+#define MII_RXCFG_DEFAULT (PRUSS_MII_RT_RXCFG_RX_ENABLE | \
+ PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS | \
+ PRUSS_MII_RT_RXCFG_RX_L2_EN | \
+ PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS)
+
+#define MII_TXCFG_DEFAULT (PRUSS_MII_RT_TXCFG_TX_ENABLE | \
+ PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE | \
+ PRUSS_MII_RT_TXCFG_TX_32_MODE_EN | \
+ PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN)
+
+#define ICSSG_CFG_DEFAULT (ICSSG_CFG_TX_L1_EN | \
+ ICSSG_CFG_TX_L2_EN | ICSSG_CFG_RX_L2_G_EN | \
+ ICSSG_CFG_TX_PRU_EN | \
+ ICSSG_CFG_SGMII_MODE)
+
+#define FDB_GEN_CFG1 0x60
+#define SMEM_VLAN_OFFSET 8
+#define SMEM_VLAN_OFFSET_MASK GENMASK(25, 8)
+
+#define FDB_GEN_CFG2 0x64
+#define FDB_VLAN_EN BIT(6)
+#define FDB_HOST_EN BIT(2)
+#define FDB_PRU1_EN BIT(1)
+#define FDB_PRU0_EN BIT(0)
+#define FDB_EN_ALL (FDB_PRU0_EN | FDB_PRU1_EN | \
+ FDB_HOST_EN | FDB_VLAN_EN)
+
+/**
+ * struct map - ICSSG Queue Map
+ * @queue: Queue number
+ * @pd_addr_start: Packet descriptor queue reserved memory
+ * @flags: Flags
+ * @special: Indicates whether this queue is a special queue or not
+ */
+struct map {
+ int queue;
+ u32 pd_addr_start;
+ u32 flags;
+ bool special;
+};
+
+/* Hardware queue map for ICSSG */
+static const struct map hwq_map[2][ICSSG_NUM_OTHER_QUEUES] = {
+ {
+ { PORT_HI_Q_SLICE0, PORT_DESC0_HI, 0x200000, 0 },
+ { PORT_LO_Q_SLICE0, PORT_DESC0_LO, 0, 0 },
+ { HOST_HI_Q_SLICE0, HOST_DESC0_HI, 0x200000, 0 },
+ { HOST_LO_Q_SLICE0, HOST_DESC0_LO, 0, 0 },
+ { HOST_SPL_Q_SLICE0, HOST_SPPD0, 0x400000, 1 },
+ },
+ {
+ { PORT_HI_Q_SLICE1, PORT_DESC1_HI, 0xa00000, 0 },
+ { PORT_LO_Q_SLICE1, PORT_DESC1_LO, 0x800000, 0 },
+ { HOST_HI_Q_SLICE1, HOST_DESC1_HI, 0xa00000, 0 },
+ { HOST_LO_Q_SLICE1, HOST_DESC1_LO, 0x800000, 0 },
+ { HOST_SPL_Q_SLICE1, HOST_SPPD1, 0xc00000, 1 },
+ },
+};
+
+static void icssg_config_mii_init(struct prueth_emac *emac)
+{
+ u32 rxcfg, txcfg, rxcfg_reg, txcfg_reg, pcnt_reg;
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ struct regmap *mii_rt;
+
+ mii_rt = prueth->mii_rt;
+
+ rxcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RXCFG0 :
+ PRUSS_MII_RT_RXCFG1;
+ txcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 :
+ PRUSS_MII_RT_TXCFG1;
+ pcnt_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 :
+ PRUSS_MII_RT_RX_PCNT1;
+
+ rxcfg = MII_RXCFG_DEFAULT;
+ txcfg = MII_TXCFG_DEFAULT;
+
+ if (slice == ICSS_MII1)
+ rxcfg |= PRUSS_MII_RT_RXCFG_RX_MUX_SEL;
+
+ /* In MII mode TX lines swapped inside ICSSG, so TX_MUX_SEL cfg need
+ * to be swapped also comparing to RGMII mode.
+ */
+ if (emac->phy_if == PHY_INTERFACE_MODE_MII && slice == ICSS_MII0)
+ txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+ else if (emac->phy_if != PHY_INTERFACE_MODE_MII && slice == ICSS_MII1)
+ txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+
+ regmap_write(mii_rt, rxcfg_reg, rxcfg);
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+ regmap_write(mii_rt, pcnt_reg, 0x1);
+}
+
+static void icssg_miig_queues_init(struct prueth *prueth, int slice)
+{
+ struct regmap *miig_rt = prueth->miig_rt;
+ void __iomem *smem = prueth->shram.va;
+ u8 pd[ICSSG_SPECIAL_PD_SIZE];
+ int queue = 0, i, j;
+ u32 *pdword;
+
+ /* reset hwqueues */
+ if (slice)
+ queue = ICSSG_NUM_TX_QUEUES;
+
+ for (i = 0; i < ICSSG_NUM_TX_QUEUES; i++) {
+ regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, queue);
+ queue++;
+ }
+
+ queue = slice ? RECYCLE_Q_SLICE1 : RECYCLE_Q_SLICE0;
+ regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, queue);
+
+ for (i = 0; i < ICSSG_NUM_OTHER_QUEUES; i++) {
+ regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET,
+ hwq_map[slice][i].queue);
+ }
+
+ /* initialize packet descriptors in SMEM */
+ /* push pakcet descriptors to hwqueues */
+
+ pdword = (u32 *)pd;
+ for (j = 0; j < ICSSG_NUM_OTHER_QUEUES; j++) {
+ const struct map *mp;
+ int pd_size, num_pds;
+ u32 pdaddr;
+
+ mp = &hwq_map[slice][j];
+ if (mp->special) {
+ pd_size = ICSSG_SPECIAL_PD_SIZE;
+ num_pds = ICSSG_NUM_SPECIAL_PDS;
+ } else {
+ pd_size = ICSSG_NORMAL_PD_SIZE;
+ num_pds = ICSSG_NUM_NORMAL_PDS;
+ }
+
+ for (i = 0; i < num_pds; i++) {
+ memset(pd, 0, pd_size);
+
+ pdword[0] &= ICSSG_FLAG_MASK;
+ pdword[0] |= mp->flags;
+ pdaddr = mp->pd_addr_start + i * pd_size;
+
+ memcpy_toio(smem + pdaddr, pd, pd_size);
+ queue = mp->queue;
+ regmap_write(miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue,
+ pdaddr);
+ }
+ }
+}
+
+void icssg_config_ipg(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+
+ switch (emac->speed) {
+ case SPEED_1000:
+ icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_1G);
+ break;
+ case SPEED_100:
+ icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_100M);
+ break;
+ case SPEED_10:
+ /* IPG for 10M is same as 100M */
+ icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_100M);
+ break;
+ default:
+ /* Other links speeds not supported */
+ netdev_err(emac->ndev, "Unsupported link speed\n");
+ return;
+ }
+}
+
+static void emac_r30_cmd_init(struct prueth_emac *emac)
+{
+ struct icssg_r30_cmd __iomem *p;
+ int i;
+
+ p = emac->dram.va + MGR_R30_CMD_OFFSET;
+
+ for (i = 0; i < 4; i++)
+ writel(EMAC_NONE, &p->cmd[i]);
+}
+
+static int emac_r30_is_done(struct prueth_emac *emac)
+{
+ const struct icssg_r30_cmd __iomem *p;
+ u32 cmd;
+ int i;
+
+ p = emac->dram.va + MGR_R30_CMD_OFFSET;
+
+ for (i = 0; i < 4; i++) {
+ cmd = readl(&p->cmd[i]);
+ if (cmd != EMAC_NONE)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int prueth_emac_buffer_setup(struct prueth_emac *emac)
+{
+ struct icssg_buffer_pool_cfg __iomem *bpool_cfg;
+ struct icssg_rxq_ctx __iomem *rxq_ctx;
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ u32 addr;
+ int i;
+
+ /* Layout to have 64KB aligned buffer pool
+ * |BPOOL0|BPOOL1|RX_CTX0|RX_CTX1|
+ */
+
+ addr = lower_32_bits(prueth->msmcram.pa);
+ if (slice)
+ addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
+
+ if (addr % SZ_64K) {
+ dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
+ return -EINVAL;
+ }
+
+ bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
+ /* workaround for f/w bug. bpool 0 needs to be initilalized */
+ writel(addr, &bpool_cfg[0].addr);
+ writel(0, &bpool_cfg[0].len);
+
+ for (i = PRUETH_EMAC_BUF_POOL_START;
+ i < PRUETH_EMAC_BUF_POOL_START + PRUETH_NUM_BUF_POOLS;
+ i++) {
+ writel(addr, &bpool_cfg[i].addr);
+ writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len);
+ addr += PRUETH_EMAC_BUF_POOL_SIZE;
+ }
+
+ if (!slice)
+ addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
+ else
+ addr += PRUETH_EMAC_RX_CTX_BUF_SIZE * 2;
+
+ /* Pre-emptible RX buffer queue */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
+ for (i = 0; i < 3; i++)
+ writel(addr, &rxq_ctx->start[i]);
+
+ addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ writel(addr, &rxq_ctx->end);
+
+ /* Express RX buffer queue */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
+ for (i = 0; i < 3; i++)
+ writel(addr, &rxq_ctx->start[i]);
+
+ addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ writel(addr, &rxq_ctx->end);
+
+ return 0;
+}
+
+static void icssg_init_emac_mode(struct prueth *prueth)
+{
+ /* When the device is configured as a bridge and it is being brought
+ * back to the emac mode, the host mac address has to be set as 0.
+ */
+ u8 mac[ETH_ALEN] = { 0 };
+
+ if (prueth->emacs_initialized)
+ return;
+
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1,
+ SMEM_VLAN_OFFSET_MASK, 0);
+ regmap_write(prueth->miig_rt, FDB_GEN_CFG2, 0);
+ /* Clear host MAC address */
+ icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
+}
+
+int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
+{
+ void __iomem *config = emac->dram.va + ICSSG_CONFIG_OFFSET;
+ struct icssg_flow_cfg __iomem *flow_cfg;
+ int ret;
+
+ icssg_init_emac_mode(prueth);
+
+ memset_io(config, 0, TAS_GATE_MASK_LIST0);
+ icssg_miig_queues_init(prueth, slice);
+
+ emac->speed = SPEED_1000;
+ emac->duplex = DUPLEX_FULL;
+ if (!phy_interface_mode_is_rgmii(emac->phy_if)) {
+ emac->speed = SPEED_100;
+ emac->duplex = DUPLEX_FULL;
+ }
+ regmap_update_bits(prueth->miig_rt, ICSSG_CFG_OFFSET,
+ ICSSG_CFG_DEFAULT, ICSSG_CFG_DEFAULT);
+ icssg_miig_set_interface_mode(prueth->miig_rt, slice, emac->phy_if);
+ icssg_config_mii_init(emac);
+ icssg_config_ipg(emac);
+ icssg_update_rgmii_cfg(prueth->miig_rt, emac);
+
+ /* set GPI mode */
+ pruss_cfg_gpimode(prueth->pruss, prueth->pru_id[slice],
+ PRUSS_GPI_MODE_MII);
+
+ /* enable XFR shift for PRU and RTU */
+ pruss_cfg_xfr_enable(prueth->pruss, PRU_TYPE_PRU, true);
+ pruss_cfg_xfr_enable(prueth->pruss, PRU_TYPE_RTU, true);
+
+ /* set C28 to 0x100 */
+ pru_rproc_set_ctable(prueth->pru[slice], PRU_C28, 0x100 << 8);
+ pru_rproc_set_ctable(prueth->rtu[slice], PRU_C28, 0x100 << 8);
+ pru_rproc_set_ctable(prueth->txpru[slice], PRU_C28, 0x100 << 8);
+
+ flow_cfg = config + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
+ writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
+ writew(0, &flow_cfg->mgm_base_flow);
+ writeb(0, config + SPL_PKT_DEFAULT_PRIORITY);
+ writeb(0, config + QUEUE_NUM_UNTAGGED);
+
+ ret = prueth_emac_buffer_setup(emac);
+ if (ret)
+ return ret;
+
+ emac_r30_cmd_init(emac);
+
+ return 0;
+}
+
+/* Bitmask for ICSSG r30 commands */
+static const struct icssg_r30_cmd emac_r32_bitmask[] = {
+ {{0xffff0004, 0xffff0100, 0xffff0100, EMAC_NONE}}, /* EMAC_PORT_DISABLE */
+ {{0xfffb0040, 0xfeff0200, 0xfeff0200, EMAC_NONE}}, /* EMAC_PORT_BLOCK */
+ {{0xffbb0000, 0xfcff0000, 0xdcff0000, EMAC_NONE}}, /* EMAC_PORT_FORWARD */
+ {{0xffbb0000, 0xfcff0000, 0xfcff2000, EMAC_NONE}}, /* EMAC_PORT_FORWARD_WO_LEARNING */
+ {{0xffff0001, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT ALL */
+ {{0xfffe0002, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT TAGGED */
+ {{0xfffc0000, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT UNTAGGED and PRIO */
+ {{EMAC_NONE, 0xffff0020, EMAC_NONE, EMAC_NONE}}, /* TAS Trigger List change */
+ {{EMAC_NONE, 0xdfff1000, EMAC_NONE, EMAC_NONE}}, /* TAS set state ENABLE*/
+ {{EMAC_NONE, 0xefff2000, EMAC_NONE, EMAC_NONE}}, /* TAS set state RESET*/
+ {{EMAC_NONE, 0xcfff0000, EMAC_NONE, EMAC_NONE}}, /* TAS set state DISABLE*/
+ {{EMAC_NONE, EMAC_NONE, 0xffff0400, EMAC_NONE}}, /* UC flooding ENABLE*/
+ {{EMAC_NONE, EMAC_NONE, 0xfbff0000, EMAC_NONE}}, /* UC flooding DISABLE*/
+ {{EMAC_NONE, EMAC_NONE, 0xffff0800, EMAC_NONE}}, /* MC flooding ENABLE*/
+ {{EMAC_NONE, EMAC_NONE, 0xf7ff0000, EMAC_NONE}}, /* MC flooding DISABLE*/
+ {{EMAC_NONE, 0xffff4000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx ENABLE*/
+ {{EMAC_NONE, 0xbfff0000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx DISABLE*/
+ {{0xffff0010, EMAC_NONE, 0xffff0010, EMAC_NONE}}, /* VLAN AWARE*/
+ {{0xffef0000, EMAC_NONE, 0xffef0000, EMAC_NONE}} /* VLAN UNWARE*/
+};
+
+int emac_set_port_state(struct prueth_emac *emac,
+ enum icssg_port_state_cmd cmd)
+{
+ struct icssg_r30_cmd __iomem *p;
+ int ret = -ETIMEDOUT;
+ int done = 0;
+ int i;
+
+ p = emac->dram.va + MGR_R30_CMD_OFFSET;
+
+ if (cmd >= ICSSG_EMAC_PORT_MAX_COMMANDS) {
+ netdev_err(emac->ndev, "invalid port command\n");
+ return -EINVAL;
+ }
+
+ /* only one command at a time allowed to firmware */
+ mutex_lock(&emac->cmd_lock);
+
+ for (i = 0; i < 4; i++)
+ writel(emac_r32_bitmask[cmd].cmd[i], &p->cmd[i]);
+
+ /* wait for done */
+ ret = read_poll_timeout(emac_r30_is_done, done, done == 1,
+ 1000, 10000, false, emac);
+
+ if (ret == -ETIMEDOUT)
+ netdev_err(emac->ndev, "timeout waiting for command done\n");
+
+ mutex_unlock(&emac->cmd_lock);
+
+ return ret;
+}
+
+void icssg_config_set_speed(struct prueth_emac *emac)
+{
+ u8 fw_speed;
+
+ switch (emac->speed) {
+ case SPEED_1000:
+ fw_speed = FW_LINK_SPEED_1G;
+ break;
+ case SPEED_100:
+ fw_speed = FW_LINK_SPEED_100M;
+ break;
+ case SPEED_10:
+ fw_speed = FW_LINK_SPEED_10M;
+ break;
+ default:
+ /* Other links speeds not supported */
+ netdev_err(emac->ndev, "Unsupported link speed\n");
+ return;
+ }
+
+ writeb(fw_speed, emac->dram.va + PORT_LINK_SPEED_OFFSET);
+}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h
new file mode 100644
index 000000000000..43eb0922172a
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments ICSSG Ethernet driver
+ *
+ * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#ifndef __NET_TI_ICSSG_CONFIG_H
+#define __NET_TI_ICSSG_CONFIG_H
+
+struct icssg_buffer_pool_cfg {
+ __le32 addr;
+ __le32 len;
+} __packed;
+
+struct icssg_flow_cfg {
+ __le16 rx_base_flow;
+ __le16 mgm_base_flow;
+} __packed;
+
+#define PRUETH_PKT_TYPE_CMD 0x10
+#define PRUETH_NAV_PS_DATA_SIZE 16 /* Protocol specific data size */
+#define PRUETH_NAV_SW_DATA_SIZE 16 /* SW related data size */
+#define PRUETH_MAX_TX_DESC 512
+#define PRUETH_MAX_RX_DESC 512
+#define PRUETH_MAX_RX_FLOWS 1 /* excluding default flow */
+#define PRUETH_RX_FLOW_DATA 0
+
+#define PRUETH_EMAC_BUF_POOL_SIZE SZ_8K
+#define PRUETH_EMAC_POOLS_PER_SLICE 24
+#define PRUETH_EMAC_BUF_POOL_START 8
+#define PRUETH_NUM_BUF_POOLS 8
+#define PRUETH_EMAC_RX_CTX_BUF_SIZE SZ_16K /* per slice */
+#define MSMC_RAM_SIZE \
+ (2 * (PRUETH_EMAC_BUF_POOL_SIZE * PRUETH_NUM_BUF_POOLS + \
+ PRUETH_EMAC_RX_CTX_BUF_SIZE * 2))
+
+struct icssg_rxq_ctx {
+ __le32 start[3];
+ __le32 end;
+} __packed;
+
+/* Load time Fiwmware Configuration */
+
+#define ICSSG_FW_MGMT_CMD_HEADER 0x81
+#define ICSSG_FW_MGMT_FDB_CMD_TYPE 0x03
+#define ICSSG_FW_MGMT_CMD_TYPE 0x04
+#define ICSSG_FW_MGMT_PKT 0x80000000
+
+struct icssg_r30_cmd {
+ u32 cmd[4];
+} __packed;
+
+enum icssg_port_state_cmd {
+ ICSSG_EMAC_PORT_DISABLE = 0,
+ ICSSG_EMAC_PORT_BLOCK,
+ ICSSG_EMAC_PORT_FORWARD,
+ ICSSG_EMAC_PORT_FORWARD_WO_LEARNING,
+ ICSSG_EMAC_PORT_ACCEPT_ALL,
+ ICSSG_EMAC_PORT_ACCEPT_TAGGED,
+ ICSSG_EMAC_PORT_ACCEPT_UNTAGGED_N_PRIO,
+ ICSSG_EMAC_PORT_TAS_TRIGGER,
+ ICSSG_EMAC_PORT_TAS_ENABLE,
+ ICSSG_EMAC_PORT_TAS_RESET,
+ ICSSG_EMAC_PORT_TAS_DISABLE,
+ ICSSG_EMAC_PORT_UC_FLOODING_ENABLE,
+ ICSSG_EMAC_PORT_UC_FLOODING_DISABLE,
+ ICSSG_EMAC_PORT_MC_FLOODING_ENABLE,
+ ICSSG_EMAC_PORT_MC_FLOODING_DISABLE,
+ ICSSG_EMAC_PORT_PREMPT_TX_ENABLE,
+ ICSSG_EMAC_PORT_PREMPT_TX_DISABLE,
+ ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE,
+ ICSSG_EMAC_PORT_VLAN_AWARE_DISABLE,
+ ICSSG_EMAC_PORT_MAX_COMMANDS
+};
+
+#define EMAC_NONE 0xffff0000
+#define EMAC_PRU0_P_DI 0xffff0004
+#define EMAC_PRU1_P_DI 0xffff0040
+#define EMAC_TX_P_DI 0xffff0100
+
+#define EMAC_PRU0_P_EN 0xfffb0000
+#define EMAC_PRU1_P_EN 0xffbf0000
+#define EMAC_TX_P_EN 0xfeff0000
+
+#define EMAC_P_BLOCK 0xffff0040
+#define EMAC_TX_P_BLOCK 0xffff0200
+#define EMAC_P_UNBLOCK 0xffbf0000
+#define EMAC_TX_P_UNBLOCK 0xfdff0000
+#define EMAC_LEAN_EN 0xfff70000
+#define EMAC_LEAN_DI 0xffff0008
+
+#define EMAC_ACCEPT_ALL 0xffff0001
+#define EMAC_ACCEPT_TAG 0xfffe0002
+#define EMAC_ACCEPT_PRIOR 0xfffc0000
+
+/* Config area lies in DRAM */
+#define ICSSG_CONFIG_OFFSET 0x0
+
+/* Config area lies in shared RAM */
+#define ICSSG_CONFIG_OFFSET_SLICE0 0
+#define ICSSG_CONFIG_OFFSET_SLICE1 0x8000
+
+#define ICSSG_NUM_NORMAL_PDS 64
+#define ICSSG_NUM_SPECIAL_PDS 16
+
+#define ICSSG_NORMAL_PD_SIZE 8
+#define ICSSG_SPECIAL_PD_SIZE 20
+
+#define ICSSG_FLAG_MASK 0xff00ffff
+
+struct icssg_setclock_desc {
+ u8 request;
+ u8 restore;
+ u8 acknowledgment;
+ u8 cmp_status;
+ u32 margin;
+ u32 cyclecounter0_set;
+ u32 cyclecounter1_set;
+ u32 iepcount_set;
+ u32 rsvd1;
+ u32 rsvd2;
+ u32 CMP0_current;
+ u32 iepcount_current;
+ u32 difference;
+ u32 cyclecounter0_new;
+ u32 cyclecounter1_new;
+ u32 CMP0_new;
+} __packed;
+
+#define ICSSG_CMD_POP_SLICE0 56
+#define ICSSG_CMD_POP_SLICE1 60
+
+#define ICSSG_CMD_PUSH_SLICE0 57
+#define ICSSG_CMD_PUSH_SLICE1 61
+
+#define ICSSG_RSP_POP_SLICE0 58
+#define ICSSG_RSP_POP_SLICE1 62
+
+#define ICSSG_RSP_PUSH_SLICE0 56
+#define ICSSG_RSP_PUSH_SLICE1 60
+
+#define ICSSG_TS_POP_SLICE0 59
+#define ICSSG_TS_POP_SLICE1 63
+
+#define ICSSG_TS_PUSH_SLICE0 40
+#define ICSSG_TS_PUSH_SLICE1 41
+
+/* FDB FID_C2 flag definitions */
+/* Indicates host port membership.*/
+#define ICSSG_FDB_ENTRY_P0_MEMBERSHIP BIT(0)
+/* Indicates that MAC ID is connected to physical port 1 */
+#define ICSSG_FDB_ENTRY_P1_MEMBERSHIP BIT(1)
+/* Indicates that MAC ID is connected to physical port 2 */
+#define ICSSG_FDB_ENTRY_P2_MEMBERSHIP BIT(2)
+/* Ageable bit is set for learned entries and cleared for static entries */
+#define ICSSG_FDB_ENTRY_AGEABLE BIT(3)
+/* If set for DA then packet is determined to be a special packet */
+#define ICSSG_FDB_ENTRY_BLOCK BIT(4)
+/* If set for DA then the SA from the packet is not learned */
+#define ICSSG_FDB_ENTRY_SECURE BIT(5)
+/* If set, it means packet has been seen recently with source address + FID
+ * matching MAC address/FID of entry
+ */
+#define ICSSG_FDB_ENTRY_TOUCHED BIT(6)
+/* Set if entry is valid */
+#define ICSSG_FDB_ENTRY_VALID BIT(7)
+
+/**
+ * struct prueth_vlan_tbl - VLAN table entries struct in ICSSG SMEM
+ * @fid_c1: membership and forwarding rules flag to this table. See
+ * above to defines for bit definitions
+ * @fid: FDB index for this VID (there is 1-1 mapping b/w VID and FID)
+ */
+struct prueth_vlan_tbl {
+ u8 fid_c1;
+ u8 fid;
+} __packed;
+
+/**
+ * struct prueth_fdb_slot - Result of FDB slot lookup
+ * @mac: MAC address
+ * @fid: fid to be associated with MAC
+ * @fid_c2: FID_C2 entry for this MAC
+ */
+struct prueth_fdb_slot {
+ u8 mac[ETH_ALEN];
+ u8 fid;
+ u8 fid_c2;
+} __packed;
+
+enum icssg_ietfpe_verify_states {
+ ICSSG_IETFPE_STATE_UNKNOWN = 0,
+ ICSSG_IETFPE_STATE_INITIAL,
+ ICSSG_IETFPE_STATE_VERIFYING,
+ ICSSG_IETFPE_STATE_SUCCEEDED,
+ ICSSG_IETFPE_STATE_FAILED,
+ ICSSG_IETFPE_STATE_DISABLED
+};
+#endif /* __NET_TI_ICSSG_CONFIG_H */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
new file mode 100644
index 000000000000..a27ec1dcc8d5
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments ICSSG Ethernet driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include "icssg_prueth.h"
+#include "icssg_stats.h"
+
+static void emac_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ strscpy(info->driver, dev_driver_string(prueth->dev),
+ sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(prueth->dev), sizeof(info->bus_info));
+}
+
+static u32 emac_get_msglevel(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ return emac->msg_enable;
+}
+
+static void emac_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ emac->msg_enable = value;
+}
+
+static int emac_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ return phy_ethtool_get_link_ksettings(ndev, ecmd);
+}
+
+static int emac_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ return phy_ethtool_set_link_ksettings(ndev, ecmd);
+}
+
+static int emac_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ if (!ndev->phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_get_eee(ndev->phydev, edata);
+}
+
+static int emac_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ if (!ndev->phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_set_eee(ndev->phydev, edata);
+}
+
+static int emac_nway_reset(struct net_device *ndev)
+{
+ return phy_ethtool_nway_reset(ndev);
+}
+
+static int emac_get_sset_count(struct net_device *ndev, int stringset)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ return ICSSG_NUM_ETHTOOL_STATS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void emac_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) {
+ if (!icssg_all_stats[i].standard_stats) {
+ memcpy(p, icssg_all_stats[i].name,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void emac_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int i;
+
+ emac_update_hardware_stats(emac);
+
+ for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++)
+ if (!icssg_all_stats[i].standard_stats)
+ *(data++) = emac->stats[i];
+}
+
+static int emac_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = icss_iep_get_ptp_clock_idx(emac->iep);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static int emac_set_channels(struct net_device *ndev,
+ struct ethtool_channels *ch)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ /* Check if interface is up. Can change the num queues when
+ * the interface is down.
+ */
+ if (netif_running(emac->ndev))
+ return -EBUSY;
+
+ emac->tx_ch_num = ch->tx_count;
+
+ return 0;
+}
+
+static void emac_get_channels(struct net_device *ndev,
+ struct ethtool_channels *ch)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ ch->max_rx = 1;
+ ch->max_tx = PRUETH_MAX_TX_QUEUES;
+ ch->rx_count = 1;
+ ch->tx_count = emac->tx_ch_num;
+}
+
+static const struct ethtool_rmon_hist_range emac_rmon_ranges[] = {
+ { 0, 64},
+ { 65, 128},
+ { 129, 256},
+ { 257, 512},
+ { 513, PRUETH_MAX_PKT_SIZE},
+ {}
+};
+
+static void emac_get_rmon_stats(struct net_device *ndev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ *ranges = emac_rmon_ranges;
+
+ rmon_stats->undersize_pkts = emac_get_stat_by_name(emac, "rx_bucket1_frames") -
+ emac_get_stat_by_name(emac, "rx_64B_frames");
+
+ rmon_stats->hist[0] = emac_get_stat_by_name(emac, "rx_bucket1_frames");
+ rmon_stats->hist[1] = emac_get_stat_by_name(emac, "rx_bucket2_frames");
+ rmon_stats->hist[2] = emac_get_stat_by_name(emac, "rx_bucket3_frames");
+ rmon_stats->hist[3] = emac_get_stat_by_name(emac, "rx_bucket4_frames");
+ rmon_stats->hist[4] = emac_get_stat_by_name(emac, "rx_bucket5_frames");
+
+ rmon_stats->hist_tx[0] = emac_get_stat_by_name(emac, "tx_bucket1_frames");
+ rmon_stats->hist_tx[1] = emac_get_stat_by_name(emac, "tx_bucket2_frames");
+ rmon_stats->hist_tx[2] = emac_get_stat_by_name(emac, "tx_bucket3_frames");
+ rmon_stats->hist_tx[3] = emac_get_stat_by_name(emac, "tx_bucket4_frames");
+ rmon_stats->hist_tx[4] = emac_get_stat_by_name(emac, "tx_bucket5_frames");
+}
+
+const struct ethtool_ops icssg_ethtool_ops = {
+ .get_drvinfo = emac_get_drvinfo,
+ .get_msglevel = emac_get_msglevel,
+ .set_msglevel = emac_set_msglevel,
+ .get_sset_count = emac_get_sset_count,
+ .get_ethtool_stats = emac_get_ethtool_stats,
+ .get_strings = emac_get_strings,
+ .get_ts_info = emac_get_ts_info,
+ .get_channels = emac_get_channels,
+ .set_channels = emac_set_channels,
+ .get_link_ksettings = emac_get_link_ksettings,
+ .set_link_ksettings = emac_set_link_ksettings,
+ .get_link = ethtool_op_get_link,
+ .get_eee = emac_get_eee,
+ .set_eee = emac_set_eee,
+ .nway_reset = emac_nway_reset,
+ .get_rmon_stats = emac_get_rmon_stats,
+};
diff --git a/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c b/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c
new file mode 100644
index 000000000000..92718ae40d7e
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments ICSSG Ethernet Driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include "icssg_mii_rt.h"
+#include "icssg_prueth.h"
+
+void icssg_mii_update_ipg(struct regmap *mii_rt, int mii, u32 ipg)
+{
+ u32 val;
+
+ if (mii == ICSS_MII0) {
+ regmap_write(mii_rt, PRUSS_MII_RT_TX_IPG0, ipg);
+ } else {
+ regmap_read(mii_rt, PRUSS_MII_RT_TX_IPG0, &val);
+ regmap_write(mii_rt, PRUSS_MII_RT_TX_IPG1, ipg);
+ regmap_write(mii_rt, PRUSS_MII_RT_TX_IPG0, val);
+ }
+}
+
+void icssg_mii_update_mtu(struct regmap *mii_rt, int mii, int mtu)
+{
+ mtu += (ETH_HLEN + ETH_FCS_LEN);
+ if (mii == ICSS_MII0) {
+ regmap_update_bits(mii_rt,
+ PRUSS_MII_RT_RX_FRMS0,
+ PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK,
+ (mtu - 1) << PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT);
+ } else {
+ regmap_update_bits(mii_rt,
+ PRUSS_MII_RT_RX_FRMS1,
+ PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK,
+ (mtu - 1) << PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT);
+ }
+}
+
+void icssg_update_rgmii_cfg(struct regmap *miig_rt, struct prueth_emac *emac)
+{
+ u32 gig_en_mask, gig_val = 0, full_duplex_mask, full_duplex_val = 0;
+ int slice = prueth_emac_slice(emac);
+ u32 inband_en_mask, inband_val = 0;
+
+ gig_en_mask = (slice == ICSS_MII0) ? RGMII_CFG_GIG_EN_MII0 :
+ RGMII_CFG_GIG_EN_MII1;
+ if (emac->speed == SPEED_1000)
+ gig_val = gig_en_mask;
+ regmap_update_bits(miig_rt, RGMII_CFG_OFFSET, gig_en_mask, gig_val);
+
+ inband_en_mask = (slice == ICSS_MII0) ? RGMII_CFG_INBAND_EN_MII0 :
+ RGMII_CFG_INBAND_EN_MII1;
+ if (emac->speed == SPEED_10 && phy_interface_mode_is_rgmii(emac->phy_if))
+ inband_val = inband_en_mask;
+ regmap_update_bits(miig_rt, RGMII_CFG_OFFSET, inband_en_mask, inband_val);
+
+ full_duplex_mask = (slice == ICSS_MII0) ? RGMII_CFG_FULL_DUPLEX_MII0 :
+ RGMII_CFG_FULL_DUPLEX_MII1;
+ if (emac->duplex == DUPLEX_FULL)
+ full_duplex_val = full_duplex_mask;
+ regmap_update_bits(miig_rt, RGMII_CFG_OFFSET, full_duplex_mask,
+ full_duplex_val);
+}
+
+void icssg_miig_set_interface_mode(struct regmap *miig_rt, int mii, phy_interface_t phy_if)
+{
+ u32 val, mask, shift;
+
+ mask = mii == ICSS_MII0 ? ICSSG_CFG_MII0_MODE : ICSSG_CFG_MII1_MODE;
+ shift = mii == ICSS_MII0 ? ICSSG_CFG_MII0_MODE_SHIFT : ICSSG_CFG_MII1_MODE_SHIFT;
+
+ val = MII_MODE_RGMII;
+ if (phy_if == PHY_INTERFACE_MODE_MII)
+ val = MII_MODE_MII;
+
+ val <<= shift;
+ regmap_update_bits(miig_rt, ICSSG_CFG_OFFSET, mask, val);
+ regmap_read(miig_rt, ICSSG_CFG_OFFSET, &val);
+}
+
+u32 icssg_rgmii_cfg_get_bitfield(struct regmap *miig_rt, u32 mask, u32 shift)
+{
+ u32 val;
+
+ regmap_read(miig_rt, RGMII_CFG_OFFSET, &val);
+ val &= mask;
+ val >>= shift;
+
+ return val;
+}
+
+u32 icssg_rgmii_get_speed(struct regmap *miig_rt, int mii)
+{
+ u32 shift = RGMII_CFG_SPEED_MII0_SHIFT, mask = RGMII_CFG_SPEED_MII0;
+
+ if (mii == ICSS_MII1) {
+ shift = RGMII_CFG_SPEED_MII1_SHIFT;
+ mask = RGMII_CFG_SPEED_MII1;
+ }
+
+ return icssg_rgmii_cfg_get_bitfield(miig_rt, mask, shift);
+}
+
+u32 icssg_rgmii_get_fullduplex(struct regmap *miig_rt, int mii)
+{
+ u32 shift = RGMII_CFG_FULLDUPLEX_MII0_SHIFT;
+ u32 mask = RGMII_CFG_FULLDUPLEX_MII0;
+
+ if (mii == ICSS_MII1) {
+ shift = RGMII_CFG_FULLDUPLEX_MII1_SHIFT;
+ mask = RGMII_CFG_FULLDUPLEX_MII1;
+ }
+
+ return icssg_rgmii_cfg_get_bitfield(miig_rt, mask, shift);
+}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_mii_rt.h b/drivers/net/ethernet/ti/icssg/icssg_mii_rt.h
new file mode 100644
index 000000000000..55a59bf5299c
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_mii_rt.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* PRU-ICSS MII_RT register definitions
+ *
+ * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef __NET_PRUSS_MII_RT_H__
+#define __NET_PRUSS_MII_RT_H__
+
+#include <linux/if_ether.h>
+#include <linux/phy.h>
+
+/* PRUSS_MII_RT Registers */
+#define PRUSS_MII_RT_RXCFG0 0x0
+#define PRUSS_MII_RT_RXCFG1 0x4
+#define PRUSS_MII_RT_TXCFG0 0x10
+#define PRUSS_MII_RT_TXCFG1 0x14
+#define PRUSS_MII_RT_TX_CRC0 0x20
+#define PRUSS_MII_RT_TX_CRC1 0x24
+#define PRUSS_MII_RT_TX_IPG0 0x30
+#define PRUSS_MII_RT_TX_IPG1 0x34
+#define PRUSS_MII_RT_PRS0 0x38
+#define PRUSS_MII_RT_PRS1 0x3c
+#define PRUSS_MII_RT_RX_FRMS0 0x40
+#define PRUSS_MII_RT_RX_FRMS1 0x44
+#define PRUSS_MII_RT_RX_PCNT0 0x48
+#define PRUSS_MII_RT_RX_PCNT1 0x4c
+#define PRUSS_MII_RT_RX_ERR0 0x50
+#define PRUSS_MII_RT_RX_ERR1 0x54
+
+/* PRUSS_MII_RT_RXCFG0/1 bits */
+#define PRUSS_MII_RT_RXCFG_RX_ENABLE BIT(0)
+#define PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS BIT(1)
+#define PRUSS_MII_RT_RXCFG_RX_CUT_PREAMBLE BIT(2)
+#define PRUSS_MII_RT_RXCFG_RX_MUX_SEL BIT(3)
+#define PRUSS_MII_RT_RXCFG_RX_L2_EN BIT(4)
+#define PRUSS_MII_RT_RXCFG_RX_BYTE_SWAP BIT(5)
+#define PRUSS_MII_RT_RXCFG_RX_AUTO_FWD_PRE BIT(6)
+#define PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS BIT(9)
+
+/* PRUSS_MII_RT_TXCFG0/1 bits */
+#define PRUSS_MII_RT_TXCFG_TX_ENABLE BIT(0)
+#define PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE BIT(1)
+#define PRUSS_MII_RT_TXCFG_TX_EN_MODE BIT(2)
+#define PRUSS_MII_RT_TXCFG_TX_BYTE_SWAP BIT(3)
+#define PRUSS_MII_RT_TXCFG_TX_MUX_SEL BIT(8)
+#define PRUSS_MII_RT_TXCFG_PRE_TX_AUTO_SEQUENCE BIT(9)
+#define PRUSS_MII_RT_TXCFG_PRE_TX_AUTO_ESC_ERR BIT(10)
+#define PRUSS_MII_RT_TXCFG_TX_32_MODE_EN BIT(11)
+#define PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN BIT(12) /* SR2.0 onwards */
+
+#define PRUSS_MII_RT_TXCFG_TX_START_DELAY_SHIFT 16
+#define PRUSS_MII_RT_TXCFG_TX_START_DELAY_MASK GENMASK(25, 16)
+
+#define PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT 28
+#define PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_MASK GENMASK(30, 28)
+
+/* PRUSS_MII_RT_TX_IPG0/1 bits */
+#define PRUSS_MII_RT_TX_IPG_IPG_SHIFT 0
+#define PRUSS_MII_RT_TX_IPG_IPG_MASK GENMASK(9, 0)
+
+/* PRUSS_MII_RT_PRS0/1 bits */
+#define PRUSS_MII_RT_PRS_COL BIT(0)
+#define PRUSS_MII_RT_PRS_CRS BIT(1)
+
+/* PRUSS_MII_RT_RX_FRMS0/1 bits */
+#define PRUSS_MII_RT_RX_FRMS_MIN_FRM_SHIFT 0
+#define PRUSS_MII_RT_RX_FRMS_MIN_FRM_MASK GENMASK(15, 0)
+
+#define PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT 16
+#define PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK GENMASK(31, 16)
+
+/* Min/Max in MII_RT_RX_FRMS */
+/* For EMAC and Switch */
+#define PRUSS_MII_RT_RX_FRMS_MAX (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+#define PRUSS_MII_RT_RX_FRMS_MIN_FRM (64)
+
+/* for HSR and PRP */
+#define PRUSS_MII_RT_RX_FRMS_MAX_FRM_LRE (PRUSS_MII_RT_RX_FRMS_MAX + \
+ ICSS_LRE_TAG_RCT_SIZE)
+/* PRUSS_MII_RT_RX_PCNT0/1 bits */
+#define PRUSS_MII_RT_RX_PCNT_MIN_PCNT_SHIFT 0
+#define PRUSS_MII_RT_RX_PCNT_MIN_PCNT_MASK GENMASK(3, 0)
+
+#define PRUSS_MII_RT_RX_PCNT_MAX_PCNT_SHIFT 4
+#define PRUSS_MII_RT_RX_PCNT_MAX_PCNT_MASK GENMASK(7, 4)
+
+/* PRUSS_MII_RT_RX_ERR0/1 bits */
+#define PRUSS_MII_RT_RX_ERR_MIN_PCNT_ERR BIT(0)
+#define PRUSS_MII_RT_RX_ERR_MAX_PCNT_ERR BIT(1)
+#define PRUSS_MII_RT_RX_ERR_MIN_FRM_ERR BIT(2)
+#define PRUSS_MII_RT_RX_ERR_MAX_FRM_ERR BIT(3)
+
+#define ICSSG_CFG_OFFSET 0
+#define RGMII_CFG_OFFSET 4
+
+/* Constant to choose between MII0 and MII1 */
+#define ICSS_MII0 0
+#define ICSS_MII1 1
+
+/* ICSSG_CFG Register bits */
+#define ICSSG_CFG_SGMII_MODE BIT(16)
+#define ICSSG_CFG_TX_PRU_EN BIT(11)
+#define ICSSG_CFG_RX_SFD_TX_SOF_EN BIT(10)
+#define ICSSG_CFG_RTU_PRU_PSI_SHARE_EN BIT(9)
+#define ICSSG_CFG_IEP1_TX_EN BIT(8)
+#define ICSSG_CFG_MII1_MODE GENMASK(6, 5)
+#define ICSSG_CFG_MII1_MODE_SHIFT 5
+#define ICSSG_CFG_MII0_MODE GENMASK(4, 3)
+#define ICSSG_CFG_MII0_MODE_SHIFT 3
+#define ICSSG_CFG_RX_L2_G_EN BIT(2)
+#define ICSSG_CFG_TX_L2_EN BIT(1)
+#define ICSSG_CFG_TX_L1_EN BIT(0)
+
+enum mii_mode {
+ MII_MODE_MII = 0,
+ MII_MODE_RGMII
+};
+
+/* RGMII CFG Register bits */
+#define RGMII_CFG_INBAND_EN_MII0 BIT(16)
+#define RGMII_CFG_GIG_EN_MII0 BIT(17)
+#define RGMII_CFG_INBAND_EN_MII1 BIT(20)
+#define RGMII_CFG_GIG_EN_MII1 BIT(21)
+#define RGMII_CFG_FULL_DUPLEX_MII0 BIT(18)
+#define RGMII_CFG_FULL_DUPLEX_MII1 BIT(22)
+#define RGMII_CFG_SPEED_MII0 GENMASK(2, 1)
+#define RGMII_CFG_SPEED_MII1 GENMASK(6, 5)
+#define RGMII_CFG_SPEED_MII0_SHIFT 1
+#define RGMII_CFG_SPEED_MII1_SHIFT 5
+#define RGMII_CFG_FULLDUPLEX_MII0 BIT(3)
+#define RGMII_CFG_FULLDUPLEX_MII1 BIT(7)
+#define RGMII_CFG_FULLDUPLEX_MII0_SHIFT 3
+#define RGMII_CFG_FULLDUPLEX_MII1_SHIFT 7
+#define RGMII_CFG_SPEED_10M 0
+#define RGMII_CFG_SPEED_100M 1
+#define RGMII_CFG_SPEED_1G 2
+
+struct regmap;
+struct prueth_emac;
+
+void icssg_mii_update_ipg(struct regmap *mii_rt, int mii, u32 ipg);
+void icssg_mii_update_mtu(struct regmap *mii_rt, int mii, int mtu);
+void icssg_update_rgmii_cfg(struct regmap *miig_rt, struct prueth_emac *emac);
+u32 icssg_rgmii_cfg_get_bitfield(struct regmap *miig_rt, u32 mask, u32 shift);
+u32 icssg_rgmii_get_speed(struct regmap *miig_rt, int mii);
+u32 icssg_rgmii_get_fullduplex(struct regmap *miig_rt, int mii);
+void icssg_miig_set_interface_mode(struct regmap *miig_rt, int mii, phy_interface_t phy_if);
+
+#endif /* __NET_PRUSS_MII_RT_H__ */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
new file mode 100644
index 000000000000..410612f43cbd
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -0,0 +1,2336 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Texas Instruments ICSSG Ethernet Driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/etherdevice.h>
+#include <linux/genalloc.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/remoteproc/pruss.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+
+#include "icssg_prueth.h"
+#include "icssg_mii_rt.h"
+#include "../k3-cppi-desc-pool.h"
+
+#define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver"
+
+/* Netif debug messages possible */
+#define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | \
+ NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_INTR | \
+ NETIF_MSG_TX_DONE | \
+ NETIF_MSG_RX_STATUS | \
+ NETIF_MSG_PKTDATA | \
+ NETIF_MSG_HW | \
+ NETIF_MSG_WOL)
+
+#define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx)
+
+/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
+#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
+
+#define IEP_DEFAULT_CYCLE_TIME_NS 1000000 /* 1 ms */
+
+static void prueth_cleanup_rx_chns(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ int max_rflows)
+{
+ if (rx_chn->desc_pool)
+ k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
+
+ if (rx_chn->rx_chn)
+ k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
+}
+
+static void prueth_cleanup_tx_chns(struct prueth_emac *emac)
+{
+ int i;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ if (tx_chn->desc_pool)
+ k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
+
+ if (tx_chn->tx_chn)
+ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
+
+ /* Assume prueth_cleanup_tx_chns() is called at the
+ * end after all channel resources are freed
+ */
+ memset(tx_chn, 0, sizeof(*tx_chn));
+ }
+}
+
+static void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ if (tx_chn->irq)
+ free_irq(tx_chn->irq, tx_chn);
+ netif_napi_del(&tx_chn->napi_tx);
+ }
+}
+
+static void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
+ struct cppi5_host_desc_t *desc)
+{
+ struct cppi5_host_desc_t *first_desc, *next_desc;
+ dma_addr_t buf_dma, next_desc_dma;
+ u32 buf_dma_len;
+
+ first_desc = desc;
+ next_desc = first_desc;
+
+ cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
+
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
+ while (next_desc_dma) {
+ next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ next_desc_dma);
+ cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
+
+ dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ }
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
+}
+
+static int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
+ int budget)
+{
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_tx;
+ struct netdev_queue *netif_txq;
+ struct prueth_tx_chn *tx_chn;
+ unsigned int total_bytes = 0;
+ struct sk_buff *skb;
+ dma_addr_t desc_dma;
+ int res, num_tx = 0;
+ void **swdata;
+
+ tx_chn = &emac->tx_chns[chn];
+
+ while (true) {
+ res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
+ if (res == -ENODATA)
+ break;
+
+ /* teardown completion */
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ if (atomic_dec_and_test(&emac->tdown_cnt))
+ complete(&emac->tdown_complete);
+ break;
+ }
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+
+ skb = *(swdata);
+ prueth_xmit_free(tx_chn, desc_tx);
+
+ ndev = skb->dev;
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ total_bytes += skb->len;
+ napi_consume_skb(skb, budget);
+ num_tx++;
+ }
+
+ if (!num_tx)
+ return 0;
+
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+ netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
+
+ if (netif_tx_queue_stopped(netif_txq)) {
+ /* If the TX queue was stopped, wake it now
+ * if we have enough room.
+ */
+ __netif_tx_lock(netif_txq, smp_processor_id());
+ if (netif_running(ndev) &&
+ (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
+ MAX_SKB_FRAGS))
+ netif_tx_wake_queue(netif_txq);
+ __netif_tx_unlock(netif_txq);
+ }
+
+ return num_tx;
+}
+
+static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx);
+ struct prueth_emac *emac = tx_chn->emac;
+ int num_tx_packets;
+
+ num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget);
+
+ if (num_tx_packets >= budget)
+ return budget;
+
+ if (napi_complete_done(napi_tx, num_tx_packets))
+ enable_irq(tx_chn->irq);
+
+ return num_tx_packets;
+}
+
+static irqreturn_t prueth_tx_irq(int irq, void *dev_id)
+{
+ struct prueth_tx_chn *tx_chn = dev_id;
+
+ disable_irq_nosync(irq);
+ napi_schedule(&tx_chn->napi_tx);
+
+ return IRQ_HANDLED;
+}
+
+static int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int i, ret;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
+ ret = request_irq(tx_chn->irq, prueth_tx_irq,
+ IRQF_TRIGGER_HIGH, tx_chn->name,
+ tx_chn);
+ if (ret) {
+ netif_napi_del(&tx_chn->napi_tx);
+ dev_err(prueth->dev, "unable to request TX IRQ %d\n",
+ tx_chn->irq);
+ goto fail;
+ }
+ }
+
+ return 0;
+fail:
+ prueth_ndev_del_tx_napi(emac, i);
+ return ret;
+}
+
+static int prueth_init_tx_chns(struct prueth_emac *emac)
+{
+ static const struct k3_ring_cfg ring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_RING,
+ .flags = 0,
+ .size = PRUETH_MAX_TX_DESC,
+ };
+ struct k3_udma_glue_tx_channel_cfg tx_cfg;
+ struct device *dev = emac->prueth->dev;
+ struct net_device *ndev = emac->ndev;
+ int ret, slice, i;
+ u32 hdesc_size;
+
+ slice = prueth_emac_slice(emac);
+ if (slice < 0)
+ return slice;
+
+ init_completion(&emac->tdown_complete);
+
+ hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
+ PRUETH_NAV_SW_DATA_SIZE);
+ memset(&tx_cfg, 0, sizeof(tx_cfg));
+ tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
+ tx_cfg.tx_cfg = ring_cfg;
+ tx_cfg.txcq_cfg = ring_cfg;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ /* To differentiate channels for SLICE0 vs SLICE1 */
+ snprintf(tx_chn->name, sizeof(tx_chn->name),
+ "tx%d-%d", slice, i);
+
+ tx_chn->emac = emac;
+ tx_chn->id = i;
+ tx_chn->descs_num = PRUETH_MAX_TX_DESC;
+
+ tx_chn->tx_chn =
+ k3_udma_glue_request_tx_chn(dev, tx_chn->name,
+ &tx_cfg);
+ if (IS_ERR(tx_chn->tx_chn)) {
+ ret = PTR_ERR(tx_chn->tx_chn);
+ tx_chn->tx_chn = NULL;
+ netdev_err(ndev,
+ "Failed to request tx dma ch: %d\n", ret);
+ goto fail;
+ }
+
+ tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
+ tx_chn->desc_pool =
+ k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
+ tx_chn->descs_num,
+ hdesc_size,
+ tx_chn->name);
+ if (IS_ERR(tx_chn->desc_pool)) {
+ ret = PTR_ERR(tx_chn->desc_pool);
+ tx_chn->desc_pool = NULL;
+ netdev_err(ndev, "Failed to create tx pool: %d\n", ret);
+ goto fail;
+ }
+
+ tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
+ if (tx_chn->irq <= 0) {
+ ret = -EINVAL;
+ netdev_err(ndev, "failed to get tx irq\n");
+ goto fail;
+ }
+
+ snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d",
+ dev_name(dev), tx_chn->id);
+ }
+
+ return 0;
+
+fail:
+ prueth_cleanup_tx_chns(emac);
+ return ret;
+}
+
+static int prueth_init_rx_chns(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ char *name, u32 max_rflows,
+ u32 max_desc_num)
+{
+ struct k3_udma_glue_rx_channel_cfg rx_cfg;
+ struct device *dev = emac->prueth->dev;
+ struct net_device *ndev = emac->ndev;
+ u32 fdqring_id, hdesc_size;
+ int i, ret = 0, slice;
+
+ slice = prueth_emac_slice(emac);
+ if (slice < 0)
+ return slice;
+
+ /* To differentiate channels for SLICE0 vs SLICE1 */
+ snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice);
+
+ hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
+ PRUETH_NAV_SW_DATA_SIZE);
+ memset(&rx_cfg, 0, sizeof(rx_cfg));
+ rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
+ rx_cfg.flow_id_num = max_rflows;
+ rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */
+
+ /* init all flows */
+ rx_chn->dev = dev;
+ rx_chn->descs_num = max_desc_num;
+
+ rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name,
+ &rx_cfg);
+ if (IS_ERR(rx_chn->rx_chn)) {
+ ret = PTR_ERR(rx_chn->rx_chn);
+ rx_chn->rx_chn = NULL;
+ netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret);
+ goto fail;
+ }
+
+ rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
+ rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
+ rx_chn->descs_num,
+ hdesc_size,
+ rx_chn->name);
+ if (IS_ERR(rx_chn->desc_pool)) {
+ ret = PTR_ERR(rx_chn->desc_pool);
+ rx_chn->desc_pool = NULL;
+ netdev_err(ndev, "Failed to create rx pool: %d\n", ret);
+ goto fail;
+ }
+
+ emac->rx_flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
+ netdev_dbg(ndev, "flow id base = %d\n", emac->rx_flow_id_base);
+
+ fdqring_id = K3_RINGACC_RING_ID_ANY;
+ for (i = 0; i < rx_cfg.flow_id_num; i++) {
+ struct k3_ring_cfg rxring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_RING,
+ .flags = 0,
+ };
+ struct k3_ring_cfg fdqring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .flags = K3_RINGACC_RING_SHARED,
+ };
+ struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
+ .rx_cfg = rxring_cfg,
+ .rxfdq_cfg = fdqring_cfg,
+ .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
+ .src_tag_lo_sel =
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
+ };
+
+ rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
+ rx_flow_cfg.rx_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode;
+
+ ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
+ i, &rx_flow_cfg);
+ if (ret) {
+ netdev_err(ndev, "Failed to init rx flow%d %d\n",
+ i, ret);
+ goto fail;
+ }
+ if (!i)
+ fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
+ i);
+ rx_chn->irq[i] = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+ if (rx_chn->irq[i] <= 0) {
+ ret = rx_chn->irq[i];
+ netdev_err(ndev, "Failed to get rx dma irq");
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ prueth_cleanup_rx_chns(emac, rx_chn, max_rflows);
+ return ret;
+}
+
+static int prueth_dma_rx_push(struct prueth_emac *emac,
+ struct sk_buff *skb,
+ struct prueth_rx_chn *rx_chn)
+{
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ u32 pkt_len = skb_tailroom(skb);
+ dma_addr_t desc_dma;
+ dma_addr_t buf_dma;
+ void **swdata;
+
+ desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
+ if (!desc_rx) {
+ netdev_err(ndev, "rx push: failed to allocate descriptor\n");
+ return -ENOMEM;
+ }
+ desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
+
+ buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ netdev_err(ndev, "rx push: failed to map rx pkt buffer\n");
+ return -EINVAL;
+ }
+
+ cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
+
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ *swdata = skb;
+
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0,
+ desc_rx, desc_dma);
+}
+
+static u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
+{
+ u32 iepcount_lo, iepcount_hi, hi_rollover_count;
+ u64 ns;
+
+ iepcount_lo = lo & GENMASK(19, 0);
+ iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20;
+ hi_rollover_count = hi >> 11;
+
+ ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw);
+ ns = ns * cycle_time_ns + iepcount_lo;
+
+ return ns;
+}
+
+static void emac_rx_timestamp(struct prueth_emac *emac,
+ struct sk_buff *skb, u32 *psdata)
+{
+ struct skb_shared_hwtstamps *ssh;
+ u64 ns;
+
+ u32 hi_sw = readl(emac->prueth->shram.va +
+ TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
+ ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0],
+ IEP_DEFAULT_CYCLE_TIME_NS);
+
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+
+static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ u32 buf_dma_len, pkt_len, port_id = 0;
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct sk_buff *skb, *new_skb;
+ dma_addr_t desc_dma, buf_dma;
+ void **swdata;
+ u32 *psdata;
+ int ret;
+
+ ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
+ if (ret) {
+ if (ret != -ENODATA)
+ netdev_err(ndev, "rx pop: failed: %d\n", ret);
+ return ret;
+ }
+
+ if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */
+ return 0;
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ /* RX HW timestamp */
+ if (emac->rx_ts_enabled)
+ emac_rx_timestamp(emac, skb, psdata);
+
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+ pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
+ /* firmware adds 4 CRC bytes, strip them */
+ pkt_len -= 4;
+ cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
+
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ skb->dev = ndev;
+ new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
+ /* if allocation fails we drop the packet but push the
+ * descriptor back to the ring with old skb to prevent a stall
+ */
+ if (!new_skb) {
+ ndev->stats.rx_dropped++;
+ new_skb = skb;
+ } else {
+ /* send the filled skb up the n/w stack */
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&emac->napi_rx, skb);
+ ndev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_packets++;
+ }
+
+ /* queue another RX DMA */
+ ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns);
+ if (WARN_ON(ret < 0)) {
+ dev_kfree_skb_any(new_skb);
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ }
+
+ return ret;
+}
+
+static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct prueth_rx_chn *rx_chn = data;
+ struct cppi5_host_desc_t *desc_rx;
+ struct sk_buff *skb;
+ dma_addr_t buf_dma;
+ u32 buf_dma_len;
+ void **swdata;
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len,
+ DMA_FROM_DEVICE);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ dev_kfree_skb_any(skb);
+}
+
+static int emac_get_tx_ts(struct prueth_emac *emac,
+ struct emac_tx_ts_response *rsp)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ int addr;
+
+ addr = icssg_queue_pop(prueth, slice == 0 ?
+ ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1);
+ if (addr < 0)
+ return addr;
+
+ memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
+ /* return buffer back for to pool */
+ icssg_queue_push(prueth, slice == 0 ?
+ ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr);
+
+ return 0;
+}
+
+static void tx_ts_work(struct prueth_emac *emac)
+{
+ struct skb_shared_hwtstamps ssh;
+ struct emac_tx_ts_response tsr;
+ struct sk_buff *skb;
+ int ret = 0;
+ u32 hi_sw;
+ u64 ns;
+
+ /* There may be more than one pending requests */
+ while (1) {
+ ret = emac_get_tx_ts(emac, &tsr);
+ if (ret) /* nothing more */
+ break;
+
+ if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS ||
+ !emac->tx_ts_skb[tsr.cookie]) {
+ netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n",
+ tsr.cookie);
+ break;
+ }
+
+ skb = emac->tx_ts_skb[tsr.cookie];
+ emac->tx_ts_skb[tsr.cookie] = NULL; /* free slot */
+ if (!skb) {
+ netdev_err(emac->ndev, "Driver Bug! got NULL skb\n");
+ break;
+ }
+
+ hi_sw = readl(emac->prueth->shram.va +
+ TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
+ ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts,
+ IEP_DEFAULT_CYCLE_TIME_NS);
+
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+
+ skb_tstamp_tx(skb, &ssh);
+ dev_consume_skb_any(skb);
+
+ if (atomic_dec_and_test(&emac->tx_ts_pending)) /* no more? */
+ break;
+ }
+}
+
+static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
+{
+ int i;
+
+ /* search and get the next free slot */
+ for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
+ if (!emac->tx_ts_skb[i]) {
+ emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */
+ return i;
+ }
+ }
+
+ return -EBUSY;
+}
+
+/**
+ * emac_ndo_start_xmit - EMAC Transmit function
+ * @skb: SKB pointer
+ * @ndev: EMAC network adapter
+ *
+ * Called by the system to transmit a packet - we queue the packet in
+ * EMAC hardware transmit queue
+ * Doesn't wait for completion we'll check for TX completion in
+ * emac_tx_complete_packets().
+ *
+ * Return: enum netdev_tx
+ */
+static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct netdev_queue *netif_txq;
+ struct prueth_tx_chn *tx_chn;
+ dma_addr_t desc_dma, buf_dma;
+ int i, ret = 0, q_idx;
+ bool in_tx_ts = 0;
+ int tx_ts_cookie;
+ void **swdata;
+ u32 pkt_len;
+ u32 *epib;
+
+ pkt_len = skb_headlen(skb);
+ q_idx = skb_get_queue_mapping(skb);
+
+ tx_chn = &emac->tx_chns[q_idx];
+ netif_txq = netdev_get_tx_queue(ndev, q_idx);
+
+ /* Map the linear buffer */
+ buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(ndev, "tx: failed to map skb buffer\n");
+ ret = NETDEV_TX_OK;
+ goto drop_free_skb;
+ }
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ netdev_dbg(ndev, "tx: failed to allocate descriptor\n");
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
+ goto drop_stop_q_busy;
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(first_desc, 0);
+ epib = first_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ emac->tx_ts_enabled) {
+ tx_ts_cookie = prueth_tx_ts_cookie_get(emac);
+ if (tx_ts_cookie >= 0) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* Request TX timestamp */
+ epib[0] = (u32)tx_ts_cookie;
+ epib[1] = 0x80000000; /* TX TS request */
+ emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb);
+ in_tx_ts = 1;
+ }
+ }
+
+ /* set dst tag to indicate internal qid at the firmware which is at
+ * bit8..bit15. bit0..bit7 indicates port num for directed
+ * packets in case of switch mode operation
+ */
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ *swdata = skb;
+
+ /* Handle the case where skb is fragmented in pages */
+ cur_desc = first_desc;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 frag_size = skb_frag_size(frag);
+
+ next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!next_desc) {
+ netdev_err(ndev,
+ "tx: failed to allocate frag. descriptor\n");
+ goto free_desc_stop_q_busy_cleanup_tx_ts;
+ }
+
+ buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(ndev, "tx: Failed to map skb page\n");
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ ret = NETDEV_TX_OK;
+ goto cleanup_tx_ts;
+ }
+
+ cppi5_hdesc_reset_hbdesc(next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(next_desc,
+ buf_dma, frag_size, buf_dma, frag_size);
+
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
+ next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
+ cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
+
+ pkt_len += frag_size;
+ cur_desc = next_desc;
+ }
+ WARN_ON_ONCE(pkt_len != skb->len);
+
+ /* report bql before sending packet */
+ netdev_tx_sent_queue(netif_txq, pkt_len);
+
+ cppi5_hdesc_set_pktlen(first_desc, pkt_len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+ /* cppi5_desc_dump(first_desc, 64); */
+
+ skb_tx_timestamp(skb); /* SW timestamp if SKBTX_IN_PROGRESS not set */
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ netdev_err(ndev, "tx: push failed: %d\n", ret);
+ goto drop_free_descs;
+ }
+
+ if (in_tx_ts)
+ atomic_inc(&emac->tx_ts_pending);
+
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
+ netif_tx_stop_queue(netif_txq);
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
+ MAX_SKB_FRAGS)
+ netif_tx_wake_queue(netif_txq);
+ }
+
+ return NETDEV_TX_OK;
+
+cleanup_tx_ts:
+ if (in_tx_ts) {
+ dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
+ emac->tx_ts_skb[tx_ts_cookie] = NULL;
+ }
+
+drop_free_descs:
+ prueth_xmit_free(tx_chn, first_desc);
+
+drop_free_skb:
+ dev_kfree_skb_any(skb);
+
+ /* error */
+ ndev->stats.tx_dropped++;
+ netdev_err(ndev, "tx: error: %d\n", ret);
+
+ return ret;
+
+free_desc_stop_q_busy_cleanup_tx_ts:
+ if (in_tx_ts) {
+ dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
+ emac->tx_ts_skb[tx_ts_cookie] = NULL;
+ }
+ prueth_xmit_free(tx_chn, first_desc);
+
+drop_stop_q_busy:
+ netif_tx_stop_queue(netif_txq);
+ return NETDEV_TX_BUSY;
+}
+
+static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct prueth_tx_chn *tx_chn = data;
+ struct cppi5_host_desc_t *desc_tx;
+ struct sk_buff *skb;
+ void **swdata;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ skb = *(swdata);
+ prueth_xmit_free(tx_chn, desc_tx);
+
+ dev_kfree_skb_any(skb);
+}
+
+static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
+{
+ struct prueth_emac *emac = dev_id;
+
+ /* currently only TX timestamp is being returned */
+ tx_ts_work(emac);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t prueth_rx_irq(int irq, void *dev_id)
+{
+ struct prueth_emac *emac = dev_id;
+
+ disable_irq_nosync(irq);
+ napi_schedule(&emac->napi_rx);
+
+ return IRQ_HANDLED;
+}
+
+struct icssg_firmwares {
+ char *pru;
+ char *rtu;
+ char *txpru;
+};
+
+static struct icssg_firmwares icssg_emac_firmwares[] = {
+ {
+ .pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf",
+ .rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf",
+ .txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf",
+ },
+ {
+ .pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf",
+ .rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf",
+ .txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf",
+ }
+};
+
+static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
+{
+ struct icssg_firmwares *firmwares;
+ struct device *dev = prueth->dev;
+ int slice, ret;
+
+ firmwares = icssg_emac_firmwares;
+
+ slice = prueth_emac_slice(emac);
+ if (slice < 0) {
+ netdev_err(emac->ndev, "invalid port\n");
+ return -EINVAL;
+ }
+
+ ret = icssg_config(prueth, emac, slice);
+ if (ret)
+ return ret;
+
+ ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
+ ret = rproc_boot(prueth->pru[slice]);
+ if (ret) {
+ dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
+ return -EINVAL;
+ }
+
+ ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
+ ret = rproc_boot(prueth->rtu[slice]);
+ if (ret) {
+ dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
+ goto halt_pru;
+ }
+
+ ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru);
+ ret = rproc_boot(prueth->txpru[slice]);
+ if (ret) {
+ dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
+ goto halt_rtu;
+ }
+
+ emac->fw_running = 1;
+ return 0;
+
+halt_rtu:
+ rproc_shutdown(prueth->rtu[slice]);
+
+halt_pru:
+ rproc_shutdown(prueth->pru[slice]);
+
+ return ret;
+}
+
+static void prueth_emac_stop(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice;
+
+ switch (emac->port_id) {
+ case PRUETH_PORT_MII0:
+ slice = ICSS_SLICE0;
+ break;
+ case PRUETH_PORT_MII1:
+ slice = ICSS_SLICE1;
+ break;
+ default:
+ netdev_err(emac->ndev, "invalid port\n");
+ return;
+ }
+
+ emac->fw_running = 0;
+ rproc_shutdown(prueth->txpru[slice]);
+ rproc_shutdown(prueth->rtu[slice]);
+ rproc_shutdown(prueth->pru[slice]);
+}
+
+static void prueth_cleanup_tx_ts(struct prueth_emac *emac)
+{
+ int i;
+
+ for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
+ if (emac->tx_ts_skb[i]) {
+ dev_kfree_skb_any(emac->tx_ts_skb[i]);
+ emac->tx_ts_skb[i] = NULL;
+ }
+ }
+}
+
+/* called back by PHY layer if there is change in link state of hw port*/
+static void emac_adjust_link(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ struct prueth *prueth = emac->prueth;
+ bool new_state = false;
+ unsigned long flags;
+
+ if (phydev->link) {
+ /* check the mode of operation - full/half duplex */
+ if (phydev->duplex != emac->duplex) {
+ new_state = true;
+ emac->duplex = phydev->duplex;
+ }
+ if (phydev->speed != emac->speed) {
+ new_state = true;
+ emac->speed = phydev->speed;
+ }
+ if (!emac->link) {
+ new_state = true;
+ emac->link = 1;
+ }
+ } else if (emac->link) {
+ new_state = true;
+ emac->link = 0;
+
+ /* f/w should support 100 & 1000 */
+ emac->speed = SPEED_1000;
+
+ /* half duplex may not be supported by f/w */
+ emac->duplex = DUPLEX_FULL;
+ }
+
+ if (new_state) {
+ phy_print_status(phydev);
+
+ /* update RGMII and MII configuration based on PHY negotiated
+ * values
+ */
+ if (emac->link) {
+ /* Set the RGMII cfg for gig en and full duplex */
+ icssg_update_rgmii_cfg(prueth->miig_rt, emac);
+
+ /* update the Tx IPG based on 100M/1G speed */
+ spin_lock_irqsave(&emac->lock, flags);
+ icssg_config_ipg(emac);
+ spin_unlock_irqrestore(&emac->lock, flags);
+ icssg_config_set_speed(emac);
+ emac_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
+
+ } else {
+ emac_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
+ }
+ }
+
+ if (emac->link) {
+ /* reactivate the transmit queue */
+ netif_tx_wake_all_queues(ndev);
+ } else {
+ netif_tx_stop_all_queues(ndev);
+ prueth_cleanup_tx_ts(emac);
+ }
+}
+
+static int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct prueth_emac *emac = prueth_napi_to_emac(napi_rx);
+ int rx_flow = PRUETH_RX_FLOW_DATA;
+ int flow = PRUETH_MAX_RX_FLOWS;
+ int num_rx = 0;
+ int cur_budget;
+ int ret;
+
+ while (flow--) {
+ cur_budget = budget - num_rx;
+
+ while (cur_budget--) {
+ ret = emac_rx_packet(emac, flow);
+ if (ret)
+ break;
+ num_rx++;
+ }
+
+ if (num_rx >= budget)
+ break;
+ }
+
+ if (num_rx < budget && napi_complete_done(napi_rx, num_rx))
+ enable_irq(emac->rx_chns.irq[rx_flow]);
+
+ return num_rx;
+}
+
+static int prueth_prepare_rx_chan(struct prueth_emac *emac,
+ struct prueth_rx_chn *chn,
+ int buf_size)
+{
+ struct sk_buff *skb;
+ int i, ret;
+
+ for (i = 0; i < chn->descs_num; i++) {
+ skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ ret = prueth_dma_rx_push(emac, skb, chn);
+ if (ret < 0) {
+ netdev_err(emac->ndev,
+ "cannot submit skb for rx chan %s ret %d\n",
+ chn->name, ret);
+ kfree_skb(skb);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
+ bool free_skb)
+{
+ int i;
+
+ for (i = 0; i < ch_num; i++) {
+ if (free_skb)
+ k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
+ &emac->tx_chns[i],
+ prueth_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
+ }
+}
+
+static void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
+ int num_flows, bool disable)
+{
+ int i;
+
+ for (i = 0; i < num_flows; i++)
+ k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
+ prueth_rx_cleanup, !!i);
+ if (disable)
+ k3_udma_glue_disable_rx_chn(chn->rx_chn);
+}
+
+static int emac_phy_connect(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ struct net_device *ndev = emac->ndev;
+ /* connect PHY */
+ ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
+ &emac_adjust_link, 0,
+ emac->phy_if);
+ if (!ndev->phydev) {
+ dev_err(prueth->dev, "couldn't connect to phy %s\n",
+ emac->phy_node->full_name);
+ return -ENODEV;
+ }
+
+ /* remove unsupported modes */
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
+
+ if (emac->phy_if == PHY_INTERFACE_MODE_MII)
+ phy_set_max_speed(ndev->phydev, SPEED_100);
+
+ return 0;
+}
+
+static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts)
+{
+ u32 hi_rollover_count, hi_rollover_count_r;
+ struct prueth_emac *emac = clockops_data;
+ struct prueth *prueth = emac->prueth;
+ void __iomem *fw_hi_r_count_addr;
+ void __iomem *fw_count_hi_addr;
+ u32 iepcount_hi, iepcount_hi_r;
+ unsigned long flags;
+ u32 iepcount_lo;
+ u64 ts = 0;
+
+ fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET;
+ fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET;
+
+ local_irq_save(flags);
+ do {
+ iepcount_hi = icss_iep_get_count_hi(emac->iep);
+ iepcount_hi += readl(fw_count_hi_addr);
+ hi_rollover_count = readl(fw_hi_r_count_addr);
+ ptp_read_system_prets(sts);
+ iepcount_lo = icss_iep_get_count_low(emac->iep);
+ ptp_read_system_postts(sts);
+
+ iepcount_hi_r = icss_iep_get_count_hi(emac->iep);
+ iepcount_hi_r += readl(fw_count_hi_addr);
+ hi_rollover_count_r = readl(fw_hi_r_count_addr);
+ } while ((iepcount_hi_r != iepcount_hi) ||
+ (hi_rollover_count != hi_rollover_count_r));
+ local_irq_restore(flags);
+
+ ts = ((u64)hi_rollover_count) << 23 | iepcount_hi;
+ ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo;
+
+ return ts;
+}
+
+static void prueth_iep_settime(void *clockops_data, u64 ns)
+{
+ struct icssg_setclock_desc __iomem *sc_descp;
+ struct prueth_emac *emac = clockops_data;
+ struct icssg_setclock_desc sc_desc;
+ u64 cyclecount;
+ u32 cycletime;
+ int timeout;
+
+ if (!emac->fw_running)
+ return;
+
+ sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
+
+ cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
+ cyclecount = ns / cycletime;
+
+ memset(&sc_desc, 0, sizeof(sc_desc));
+ sc_desc.margin = cycletime - 1000;
+ sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0);
+ sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32;
+ sc_desc.iepcount_set = ns % cycletime;
+ sc_desc.CMP0_current = cycletime - 4; //Count from 0 to (cycle time)-4
+
+ memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc));
+
+ writeb(1, &sc_descp->request);
+
+ timeout = 5; /* fw should take 2-3 ms */
+ while (timeout--) {
+ if (readb(&sc_descp->acknowledgment))
+ return;
+
+ usleep_range(500, 1000);
+ }
+
+ dev_err(emac->prueth->dev, "settime timeout\n");
+}
+
+static int prueth_perout_enable(void *clockops_data,
+ struct ptp_perout_request *req, int on,
+ u64 *cmp)
+{
+ struct prueth_emac *emac = clockops_data;
+ u32 reduction_factor = 0, offset = 0;
+ struct timespec64 ts;
+ u64 ns_period;
+
+ if (!on)
+ return 0;
+
+ /* Any firmware specific stuff for PPS/PEROUT handling */
+ ts.tv_sec = req->period.sec;
+ ts.tv_nsec = req->period.nsec;
+ ns_period = timespec64_to_ns(&ts);
+
+ /* f/w doesn't support period less than cycle time */
+ if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS)
+ return -ENXIO;
+
+ reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS;
+ offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS;
+
+ /* f/w requires at least 1uS within a cycle so CMP
+ * can trigger after SYNC is enabled
+ */
+ if (offset < 5 * NSEC_PER_USEC)
+ offset = 5 * NSEC_PER_USEC;
+
+ /* if offset is close to cycle time then we will miss
+ * the CMP event for last tick when IEP rolls over.
+ * In normal mode, IEP tick is 4ns.
+ * In slow compensation it could be 0ns or 8ns at
+ * every slow compensation cycle.
+ */
+ if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8)
+ offset = IEP_DEFAULT_CYCLE_TIME_NS - 8;
+
+ /* we're in shadow mode so need to set upper 32-bits */
+ *cmp = (u64)offset << 32;
+
+ writel(reduction_factor, emac->prueth->shram.va +
+ TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET);
+
+ writel(0, emac->prueth->shram.va +
+ TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
+
+ return 0;
+}
+
+const struct icss_iep_clockops prueth_iep_clockops = {
+ .settime = prueth_iep_settime,
+ .gettime = prueth_iep_gettime,
+ .perout_enable = prueth_perout_enable,
+};
+
+/**
+ * emac_ndo_open - EMAC device open
+ * @ndev: network adapter device
+ *
+ * Called when system wants to start the interface.
+ *
+ * Return: 0 for a successful open, or appropriate error code
+ */
+static int emac_ndo_open(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int ret, i, num_data_chn = emac->tx_ch_num;
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ struct device *dev = prueth->dev;
+ int max_rx_flows;
+ int rx_flow;
+
+ /* clear SMEM and MSMC settings for all slices */
+ if (!prueth->emacs_initialized) {
+ memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
+ memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
+ }
+
+ /* set h/w MAC as user might have re-configured */
+ ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+
+ icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
+ icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
+
+ icssg_class_default(prueth->miig_rt, slice, 0);
+
+ /* Notify the stack of the actual queue counts. */
+ ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
+ if (ret) {
+ dev_err(dev, "cannot set real number of tx queues\n");
+ return ret;
+ }
+
+ init_completion(&emac->cmd_complete);
+ ret = prueth_init_tx_chns(emac);
+ if (ret) {
+ dev_err(dev, "failed to init tx channel: %d\n", ret);
+ return ret;
+ }
+
+ max_rx_flows = PRUETH_MAX_RX_FLOWS;
+ ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
+ max_rx_flows, PRUETH_MAX_RX_DESC);
+ if (ret) {
+ dev_err(dev, "failed to init rx channel: %d\n", ret);
+ goto cleanup_tx;
+ }
+
+ ret = prueth_ndev_add_tx_napi(emac);
+ if (ret)
+ goto cleanup_rx;
+
+ /* we use only the highest priority flow for now i.e. @irq[3] */
+ rx_flow = PRUETH_RX_FLOW_DATA;
+ ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
+ IRQF_TRIGGER_HIGH, dev_name(dev), emac);
+ if (ret) {
+ dev_err(dev, "unable to request RX IRQ\n");
+ goto cleanup_napi;
+ }
+
+ /* reset and start PRU firmware */
+ ret = prueth_emac_start(prueth, emac);
+ if (ret)
+ goto free_rx_irq;
+
+ icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
+
+ if (!prueth->emacs_initialized) {
+ ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
+ emac, IEP_DEFAULT_CYCLE_TIME_NS);
+ }
+
+ ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
+ IRQF_ONESHOT, dev_name(dev), emac);
+ if (ret)
+ goto stop;
+
+ /* Prepare RX */
+ ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
+ if (ret)
+ goto free_tx_ts_irq;
+
+ ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ if (ret)
+ goto reset_rx_chn;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
+ if (ret)
+ goto reset_tx_chan;
+ }
+
+ /* Enable NAPI in Tx and Rx direction */
+ for (i = 0; i < emac->tx_ch_num; i++)
+ napi_enable(&emac->tx_chns[i].napi_tx);
+ napi_enable(&emac->napi_rx);
+
+ /* start PHY */
+ phy_start(ndev->phydev);
+
+ prueth->emacs_initialized++;
+
+ queue_work(system_long_wq, &emac->stats_work.work);
+
+ return 0;
+
+reset_tx_chan:
+ /* Since interface is not yet up, there is wouldn't be
+ * any SKB for completion. So set false to free_skb
+ */
+ prueth_reset_tx_chan(emac, i, false);
+reset_rx_chn:
+ prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
+free_tx_ts_irq:
+ free_irq(emac->tx_ts_irq, emac);
+stop:
+ prueth_emac_stop(emac);
+free_rx_irq:
+ free_irq(emac->rx_chns.irq[rx_flow], emac);
+cleanup_napi:
+ prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
+cleanup_rx:
+ prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
+cleanup_tx:
+ prueth_cleanup_tx_chns(emac);
+
+ return ret;
+}
+
+/**
+ * emac_ndo_stop - EMAC device stop
+ * @ndev: network adapter device
+ *
+ * Called when system wants to stop or down the interface.
+ *
+ * Return: Always 0 (Success)
+ */
+static int emac_ndo_stop(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int rx_flow = PRUETH_RX_FLOW_DATA;
+ int max_rx_flows;
+ int ret, i;
+
+ /* inform the upper layers. */
+ netif_tx_stop_all_queues(ndev);
+
+ /* block packets from wire */
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
+
+ icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
+
+ atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ /* tear down and disable UDMA channels */
+ reinit_completion(&emac->tdown_complete);
+ for (i = 0; i < emac->tx_ch_num; i++)
+ k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
+
+ ret = wait_for_completion_timeout(&emac->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!ret)
+ netdev_err(ndev, "tx teardown timeout\n");
+
+ prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
+ for (i = 0; i < emac->tx_ch_num; i++)
+ napi_disable(&emac->tx_chns[i].napi_tx);
+
+ max_rx_flows = PRUETH_MAX_RX_FLOWS;
+ k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
+
+ prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
+
+ napi_disable(&emac->napi_rx);
+
+ cancel_work_sync(&emac->rx_mode_work);
+
+ /* Destroying the queued work in ndo_stop() */
+ cancel_delayed_work_sync(&emac->stats_work);
+
+ /* stop PRUs */
+ prueth_emac_stop(emac);
+
+ if (prueth->emacs_initialized == 1)
+ icss_iep_exit(emac->iep);
+
+ /* stop PRUs */
+ prueth_emac_stop(emac);
+
+ free_irq(emac->tx_ts_irq, emac);
+
+ free_irq(emac->rx_chns.irq[rx_flow], emac);
+ prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
+ prueth_cleanup_tx_chns(emac);
+
+ prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
+ prueth_cleanup_tx_chns(emac);
+
+ prueth->emacs_initialized--;
+
+ return 0;
+}
+
+static void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ ndev->stats.tx_errors++;
+}
+
+static void emac_ndo_set_rx_mode_work(struct work_struct *work)
+{
+ struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work);
+ struct net_device *ndev = emac->ndev;
+ bool promisc, allmulti;
+
+ if (!netif_running(ndev))
+ return;
+
+ promisc = ndev->flags & IFF_PROMISC;
+ allmulti = ndev->flags & IFF_ALLMULTI;
+ emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
+ emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
+
+ if (promisc) {
+ emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
+ emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
+ return;
+ }
+
+ if (allmulti) {
+ emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
+ return;
+ }
+
+ if (!netdev_mc_empty(ndev)) {
+ emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
+ return;
+ }
+}
+
+/**
+ * emac_ndo_set_rx_mode - EMAC set receive mode function
+ * @ndev: The EMAC network adapter
+ *
+ * Called when system wants to set the receive mode of the device.
+ *
+ */
+static void emac_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ queue_work(emac->cmd_wq, &emac->rx_mode_work);
+}
+
+static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ emac->tx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ emac->tx_ts_enabled = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ emac->rx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ emac->rx_ts_enabled = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return emac_get_ts_config(ndev, ifr);
+ case SIOCSHWTSTAMP:
+ return emac_set_ts_config(ndev, ifr);
+ default:
+ break;
+ }
+
+ return phy_do_ioctl(ndev, ifr, cmd);
+}
+
+static void emac_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ emac_update_hardware_stats(emac);
+
+ stats->rx_packets = emac_get_stat_by_name(emac, "rx_packets");
+ stats->rx_bytes = emac_get_stat_by_name(emac, "rx_bytes");
+ stats->tx_packets = emac_get_stat_by_name(emac, "tx_packets");
+ stats->tx_bytes = emac_get_stat_by_name(emac, "tx_bytes");
+ stats->rx_crc_errors = emac_get_stat_by_name(emac, "rx_crc_errors");
+ stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors");
+ stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames");
+
+ stats->rx_errors = ndev->stats.rx_errors;
+ stats->rx_dropped = ndev->stats.rx_dropped;
+ stats->tx_errors = ndev->stats.tx_errors;
+ stats->tx_dropped = ndev->stats.tx_dropped;
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_ndo_open,
+ .ndo_stop = emac_ndo_stop,
+ .ndo_start_xmit = emac_ndo_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = emac_ndo_tx_timeout,
+ .ndo_set_rx_mode = emac_ndo_set_rx_mode,
+ .ndo_eth_ioctl = emac_ndo_ioctl,
+ .ndo_get_stats64 = emac_ndo_get_stats64,
+};
+
+/* get emac_port corresponding to eth_node name */
+static int prueth_node_port(struct device_node *eth_node)
+{
+ u32 port_id;
+ int ret;
+
+ ret = of_property_read_u32(eth_node, "reg", &port_id);
+ if (ret)
+ return ret;
+
+ if (port_id == 0)
+ return PRUETH_PORT_MII0;
+ else if (port_id == 1)
+ return PRUETH_PORT_MII1;
+ else
+ return PRUETH_PORT_INVALID;
+}
+
+/* get MAC instance corresponding to eth_node name */
+static int prueth_node_mac(struct device_node *eth_node)
+{
+ u32 port_id;
+ int ret;
+
+ ret = of_property_read_u32(eth_node, "reg", &port_id);
+ if (ret)
+ return ret;
+
+ if (port_id == 0)
+ return PRUETH_MAC0;
+ else if (port_id == 1)
+ return PRUETH_MAC1;
+ else
+ return PRUETH_MAC_INVALID;
+}
+
+static int prueth_netdev_init(struct prueth *prueth,
+ struct device_node *eth_node)
+{
+ int ret, num_tx_chn = PRUETH_MAX_TX_QUEUES;
+ struct prueth_emac *emac;
+ struct net_device *ndev;
+ enum prueth_port port;
+ const char *irq_name;
+ enum prueth_mac mac;
+
+ port = prueth_node_port(eth_node);
+ if (port == PRUETH_PORT_INVALID)
+ return -EINVAL;
+
+ mac = prueth_node_mac(eth_node);
+ if (mac == PRUETH_MAC_INVALID)
+ return -EINVAL;
+
+ ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
+ if (!ndev)
+ return -ENOMEM;
+
+ emac = netdev_priv(ndev);
+ emac->prueth = prueth;
+ emac->ndev = ndev;
+ emac->port_id = port;
+ emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
+ if (!emac->cmd_wq) {
+ ret = -ENOMEM;
+ goto free_ndev;
+ }
+ INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work);
+
+ INIT_DELAYED_WORK(&emac->stats_work, emac_stats_work_handler);
+
+ ret = pruss_request_mem_region(prueth->pruss,
+ port == PRUETH_PORT_MII0 ?
+ PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
+ &emac->dram);
+ if (ret) {
+ dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
+ ret = -ENOMEM;
+ goto free_wq;
+ }
+
+ emac->tx_ch_num = 1;
+
+ irq_name = "tx_ts0";
+ if (emac->port_id == PRUETH_PORT_MII1)
+ irq_name = "tx_ts1";
+ emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name);
+ if (emac->tx_ts_irq < 0) {
+ ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n");
+ goto free;
+ }
+
+ SET_NETDEV_DEV(ndev, prueth->dev);
+ spin_lock_init(&emac->lock);
+ mutex_init(&emac->cmd_lock);
+
+ emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
+ if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
+ dev_err(prueth->dev, "couldn't find phy-handle\n");
+ ret = -ENODEV;
+ goto free;
+ } else if (of_phy_is_fixed_link(eth_node)) {
+ ret = of_phy_register_fixed_link(eth_node);
+ if (ret) {
+ ret = dev_err_probe(prueth->dev, ret,
+ "failed to register fixed-link phy\n");
+ goto free;
+ }
+
+ emac->phy_node = eth_node;
+ }
+
+ ret = of_get_phy_mode(eth_node, &emac->phy_if);
+ if (ret) {
+ dev_err(prueth->dev, "could not get phy-mode property\n");
+ goto free;
+ }
+
+ if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
+ !phy_interface_mode_is_rgmii(emac->phy_if)) {
+ dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
+ ret = -EINVAL;
+ goto free;
+ }
+
+ /* AM65 SR2.0 has TX Internal delay always enabled by hardware
+ * and it is not possible to disable TX Internal delay. The below
+ * switch case block describes how we handle different phy modes
+ * based on hardware restriction.
+ */
+ switch (emac->phy_if) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ emac->phy_if = PHY_INTERFACE_MODE_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
+ ret = -EINVAL;
+ goto free;
+ default:
+ break;
+ }
+
+ /* get mac address from DT and set private and netdev addr */
+ ret = of_get_ethdev_address(eth_node, ndev);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
+ port, ndev->dev_addr);
+ }
+ ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+
+ ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
+ ndev->max_mtu = PRUETH_MAX_MTU;
+ ndev->netdev_ops = &emac_netdev_ops;
+ ndev->ethtool_ops = &icssg_ethtool_ops;
+ ndev->hw_features = NETIF_F_SG;
+ ndev->features = ndev->hw_features;
+
+ netif_napi_add(ndev, &emac->napi_rx, emac_napi_rx_poll);
+ prueth->emac[mac] = emac;
+
+ return 0;
+
+free:
+ pruss_release_mem_region(prueth->pruss, &emac->dram);
+free_wq:
+ destroy_workqueue(emac->cmd_wq);
+free_ndev:
+ emac->ndev = NULL;
+ prueth->emac[mac] = NULL;
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static void prueth_netdev_exit(struct prueth *prueth,
+ struct device_node *eth_node)
+{
+ struct prueth_emac *emac;
+ enum prueth_mac mac;
+
+ mac = prueth_node_mac(eth_node);
+ if (mac == PRUETH_MAC_INVALID)
+ return;
+
+ emac = prueth->emac[mac];
+ if (!emac)
+ return;
+
+ if (of_phy_is_fixed_link(emac->phy_node))
+ of_phy_deregister_fixed_link(emac->phy_node);
+
+ netif_napi_del(&emac->napi_rx);
+
+ pruss_release_mem_region(prueth->pruss, &emac->dram);
+ destroy_workqueue(emac->cmd_wq);
+ free_netdev(emac->ndev);
+ prueth->emac[mac] = NULL;
+}
+
+static int prueth_get_cores(struct prueth *prueth, int slice)
+{
+ struct device *dev = prueth->dev;
+ enum pruss_pru_id pruss_id;
+ struct device_node *np;
+ int idx = -1, ret;
+
+ np = dev->of_node;
+
+ switch (slice) {
+ case ICSS_SLICE0:
+ idx = 0;
+ break;
+ case ICSS_SLICE1:
+ idx = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ prueth->pru[slice] = pru_rproc_get(np, idx, &pruss_id);
+ if (IS_ERR(prueth->pru[slice])) {
+ ret = PTR_ERR(prueth->pru[slice]);
+ prueth->pru[slice] = NULL;
+ return dev_err_probe(dev, ret, "unable to get PRU%d\n", slice);
+ }
+ prueth->pru_id[slice] = pruss_id;
+
+ idx++;
+ prueth->rtu[slice] = pru_rproc_get(np, idx, NULL);
+ if (IS_ERR(prueth->rtu[slice])) {
+ ret = PTR_ERR(prueth->rtu[slice]);
+ prueth->rtu[slice] = NULL;
+ return dev_err_probe(dev, ret, "unable to get RTU%d\n", slice);
+ }
+
+ idx++;
+ prueth->txpru[slice] = pru_rproc_get(np, idx, NULL);
+ if (IS_ERR(prueth->txpru[slice])) {
+ ret = PTR_ERR(prueth->txpru[slice]);
+ prueth->txpru[slice] = NULL;
+ return dev_err_probe(dev, ret, "unable to get TX_PRU%d\n", slice);
+ }
+
+ return 0;
+}
+
+static void prueth_put_cores(struct prueth *prueth, int slice)
+{
+ if (prueth->txpru[slice])
+ pru_rproc_put(prueth->txpru[slice]);
+
+ if (prueth->rtu[slice])
+ pru_rproc_put(prueth->rtu[slice]);
+
+ if (prueth->pru[slice])
+ pru_rproc_put(prueth->pru[slice]);
+}
+
+static const struct of_device_id prueth_dt_match[];
+
+static int prueth_probe(struct platform_device *pdev)
+{
+ struct device_node *eth_node, *eth_ports_node;
+ struct device_node *eth0_node = NULL;
+ struct device_node *eth1_node = NULL;
+ struct genpool_data_align gp_data = {
+ .align = SZ_64K,
+ };
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ struct prueth *prueth;
+ struct pruss *pruss;
+ u32 msmc_ram_size;
+ int i, ret;
+
+ np = dev->of_node;
+
+ match = of_match_device(prueth_dt_match, dev);
+ if (!match)
+ return -ENODEV;
+
+ prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
+ if (!prueth)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, prueth);
+ prueth->pdev = pdev;
+ prueth->pdata = *(const struct prueth_pdata *)match->data;
+
+ prueth->dev = dev;
+ eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
+ if (!eth_ports_node)
+ return -ENOENT;
+
+ for_each_child_of_node(eth_ports_node, eth_node) {
+ u32 reg;
+
+ if (strcmp(eth_node->name, "port"))
+ continue;
+ ret = of_property_read_u32(eth_node, "reg", &reg);
+ if (ret < 0) {
+ dev_err(dev, "%pOF error reading port_id %d\n",
+ eth_node, ret);
+ }
+
+ of_node_get(eth_node);
+
+ if (reg == 0) {
+ eth0_node = eth_node;
+ if (!of_device_is_available(eth0_node)) {
+ of_node_put(eth0_node);
+ eth0_node = NULL;
+ }
+ } else if (reg == 1) {
+ eth1_node = eth_node;
+ if (!of_device_is_available(eth1_node)) {
+ of_node_put(eth1_node);
+ eth1_node = NULL;
+ }
+ } else {
+ dev_err(dev, "port reg should be 0 or 1\n");
+ }
+ }
+
+ of_node_put(eth_ports_node);
+
+ /* At least one node must be present and available else we fail */
+ if (!eth0_node && !eth1_node) {
+ dev_err(dev, "neither port0 nor port1 node available\n");
+ return -ENODEV;
+ }
+
+ if (eth0_node == eth1_node) {
+ dev_err(dev, "port0 and port1 can't have same reg\n");
+ of_node_put(eth0_node);
+ return -ENODEV;
+ }
+
+ prueth->eth_node[PRUETH_MAC0] = eth0_node;
+ prueth->eth_node[PRUETH_MAC1] = eth1_node;
+
+ prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
+ if (IS_ERR(prueth->miig_rt)) {
+ dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
+ return -ENODEV;
+ }
+
+ prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
+ if (IS_ERR(prueth->mii_rt)) {
+ dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
+ return -ENODEV;
+ }
+
+ if (eth0_node) {
+ ret = prueth_get_cores(prueth, ICSS_SLICE0);
+ if (ret)
+ goto put_cores;
+ }
+
+ if (eth1_node) {
+ ret = prueth_get_cores(prueth, ICSS_SLICE1);
+ if (ret)
+ goto put_cores;
+ }
+
+ pruss = pruss_get(eth0_node ?
+ prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
+ if (IS_ERR(pruss)) {
+ ret = PTR_ERR(pruss);
+ dev_err(dev, "unable to get pruss handle\n");
+ goto put_cores;
+ }
+
+ prueth->pruss = pruss;
+
+ ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
+ &prueth->shram);
+ if (ret) {
+ dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
+ pruss_put(prueth->pruss);
+ }
+
+ prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
+ if (!prueth->sram_pool) {
+ dev_err(dev, "unable to get SRAM pool\n");
+ ret = -ENODEV;
+
+ goto put_mem;
+ }
+
+ msmc_ram_size = MSMC_RAM_SIZE;
+
+ /* NOTE: FW bug needs buffer base to be 64KB aligned */
+ prueth->msmcram.va =
+ (void __iomem *)gen_pool_alloc_algo(prueth->sram_pool,
+ msmc_ram_size,
+ gen_pool_first_fit_align,
+ &gp_data);
+
+ if (!prueth->msmcram.va) {
+ ret = -ENOMEM;
+ dev_err(dev, "unable to allocate MSMC resource\n");
+ goto put_mem;
+ }
+ prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
+ (unsigned long)prueth->msmcram.va);
+ prueth->msmcram.size = msmc_ram_size;
+ memset_io(prueth->msmcram.va, 0, msmc_ram_size);
+ dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
+ prueth->msmcram.va, prueth->msmcram.size);
+
+ prueth->iep0 = icss_iep_get_idx(np, 0);
+ if (IS_ERR(prueth->iep0)) {
+ ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n");
+ prueth->iep0 = NULL;
+ goto free_pool;
+ }
+
+ prueth->iep1 = icss_iep_get_idx(np, 1);
+ if (IS_ERR(prueth->iep1)) {
+ ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
+ icss_iep_put(prueth->iep0);
+ prueth->iep0 = NULL;
+ prueth->iep1 = NULL;
+ goto free_pool;
+ }
+
+ if (prueth->pdata.quirk_10m_link_issue) {
+ /* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX
+ * traffic.
+ */
+ icss_iep_init_fw(prueth->iep1);
+ }
+
+ /* setup netdev interfaces */
+ if (eth0_node) {
+ ret = prueth_netdev_init(prueth, eth0_node);
+ if (ret) {
+ dev_err_probe(dev, ret, "netdev init %s failed\n",
+ eth0_node->name);
+ goto exit_iep;
+ }
+ prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
+ }
+
+ if (eth1_node) {
+ ret = prueth_netdev_init(prueth, eth1_node);
+ if (ret) {
+ dev_err_probe(dev, ret, "netdev init %s failed\n",
+ eth1_node->name);
+ goto netdev_exit;
+ }
+
+ prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
+ }
+
+ /* register the network devices */
+ if (eth0_node) {
+ ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
+ if (ret) {
+ dev_err(dev, "can't register netdev for port MII0");
+ goto netdev_exit;
+ }
+
+ prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
+
+ emac_phy_connect(prueth->emac[PRUETH_MAC0]);
+ phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
+ }
+
+ if (eth1_node) {
+ ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
+ if (ret) {
+ dev_err(dev, "can't register netdev for port MII1");
+ goto netdev_unregister;
+ }
+
+ prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
+ emac_phy_connect(prueth->emac[PRUETH_MAC1]);
+ phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
+ }
+
+ dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
+ (!eth0_node || !eth1_node) ? "single" : "dual");
+
+ if (eth1_node)
+ of_node_put(eth1_node);
+ if (eth0_node)
+ of_node_put(eth0_node);
+ return 0;
+
+netdev_unregister:
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->registered_netdevs[i])
+ continue;
+ if (prueth->emac[i]->ndev->phydev) {
+ phy_disconnect(prueth->emac[i]->ndev->phydev);
+ prueth->emac[i]->ndev->phydev = NULL;
+ }
+ unregister_netdev(prueth->registered_netdevs[i]);
+ }
+
+netdev_exit:
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ eth_node = prueth->eth_node[i];
+ if (!eth_node)
+ continue;
+
+ prueth_netdev_exit(prueth, eth_node);
+ }
+
+exit_iep:
+ if (prueth->pdata.quirk_10m_link_issue)
+ icss_iep_exit_fw(prueth->iep1);
+
+free_pool:
+ gen_pool_free(prueth->sram_pool,
+ (unsigned long)prueth->msmcram.va, msmc_ram_size);
+
+put_mem:
+ pruss_release_mem_region(prueth->pruss, &prueth->shram);
+ pruss_put(prueth->pruss);
+
+put_cores:
+ if (eth1_node) {
+ prueth_put_cores(prueth, ICSS_SLICE1);
+ of_node_put(eth1_node);
+ }
+
+ if (eth0_node) {
+ prueth_put_cores(prueth, ICSS_SLICE0);
+ of_node_put(eth0_node);
+ }
+
+ return ret;
+}
+
+static void prueth_remove(struct platform_device *pdev)
+{
+ struct prueth *prueth = platform_get_drvdata(pdev);
+ struct device_node *eth_node;
+ int i;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->registered_netdevs[i])
+ continue;
+ phy_stop(prueth->emac[i]->ndev->phydev);
+ phy_disconnect(prueth->emac[i]->ndev->phydev);
+ prueth->emac[i]->ndev->phydev = NULL;
+ unregister_netdev(prueth->registered_netdevs[i]);
+ }
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ eth_node = prueth->eth_node[i];
+ if (!eth_node)
+ continue;
+
+ prueth_netdev_exit(prueth, eth_node);
+ }
+
+ if (prueth->pdata.quirk_10m_link_issue)
+ icss_iep_exit_fw(prueth->iep1);
+
+ icss_iep_put(prueth->iep1);
+ icss_iep_put(prueth->iep0);
+
+ gen_pool_free(prueth->sram_pool,
+ (unsigned long)prueth->msmcram.va,
+ MSMC_RAM_SIZE);
+
+ pruss_release_mem_region(prueth->pruss, &prueth->shram);
+
+ pruss_put(prueth->pruss);
+
+ if (prueth->eth_node[PRUETH_MAC1])
+ prueth_put_cores(prueth, ICSS_SLICE1);
+
+ if (prueth->eth_node[PRUETH_MAC0])
+ prueth_put_cores(prueth, ICSS_SLICE0);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int prueth_suspend(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ ret = emac_ndo_stop(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to stop: %d", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int prueth_resume(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ ret = emac_ndo_open(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to start: %d", ret);
+ return ret;
+ }
+ netif_device_attach(ndev);
+ }
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops prueth_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume)
+};
+
+static const struct prueth_pdata am654_icssg_pdata = {
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
+ .quirk_10m_link_issue = 1,
+};
+
+static const struct of_device_id prueth_dt_match[] = {
+ { .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, prueth_dt_match);
+
+static struct platform_driver prueth_driver = {
+ .probe = prueth_probe,
+ .remove_new = prueth_remove,
+ .driver = {
+ .name = "icssg-prueth",
+ .of_match_table = prueth_dt_match,
+ .pm = &prueth_dev_pm_ops,
+ },
+};
+module_platform_driver(prueth_driver);
+
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
+MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
new file mode 100644
index 000000000000..3fe80a8758d3
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments ICSSG Ethernet driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#ifndef __NET_TI_ICSSG_PRUETH_H
+#define __NET_TI_ICSSG_PRUETH_H
+
+#include <linux/etherdevice.h>
+#include <linux/genalloc.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/remoteproc/pruss.h>
+#include <linux/pruss_driver.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/remoteproc.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/dma/k3-udma-glue.h>
+
+#include <net/devlink.h>
+
+#include "icssg_config.h"
+#include "icss_iep.h"
+#include "icssg_switch_map.h"
+
+#define PRUETH_MAX_MTU (2000 - ETH_HLEN - ETH_FCS_LEN)
+#define PRUETH_MIN_PKT_SIZE (VLAN_ETH_ZLEN)
+#define PRUETH_MAX_PKT_SIZE (PRUETH_MAX_MTU + ETH_HLEN + ETH_FCS_LEN)
+
+#define ICSS_SLICE0 0
+#define ICSS_SLICE1 1
+
+#define ICSS_FW_PRU 0
+#define ICSS_FW_RTU 1
+
+#define ICSSG_MAX_RFLOWS 8 /* per slice */
+
+/* Number of ICSSG related stats */
+#define ICSSG_NUM_STATS 60
+#define ICSSG_NUM_STANDARD_STATS 31
+#define ICSSG_NUM_ETHTOOL_STATS (ICSSG_NUM_STATS - ICSSG_NUM_STANDARD_STATS)
+
+/* Firmware status codes */
+#define ICSS_HS_FW_READY 0x55555555
+#define ICSS_HS_FW_DEAD 0xDEAD0000 /* lower 16 bits contain error code */
+
+/* Firmware command codes */
+#define ICSS_HS_CMD_BUSY 0x40000000
+#define ICSS_HS_CMD_DONE 0x80000000
+#define ICSS_HS_CMD_CANCEL 0x10000000
+
+/* Firmware commands */
+#define ICSS_CMD_SPAD 0x20
+#define ICSS_CMD_RXTX 0x10
+#define ICSS_CMD_ADD_FDB 0x1
+#define ICSS_CMD_DEL_FDB 0x2
+#define ICSS_CMD_SET_RUN 0x4
+#define ICSS_CMD_GET_FDB_SLOT 0x5
+#define ICSS_CMD_ENABLE_VLAN 0x5
+#define ICSS_CMD_DISABLE_VLAN 0x6
+#define ICSS_CMD_ADD_FILTER 0x7
+#define ICSS_CMD_ADD_MAC 0x8
+
+/* In switch mode there are 3 real ports i.e. 3 mac addrs.
+ * however Linux sees only the host side port. The other 2 ports
+ * are the switch ports.
+ * In emac mode there are 2 real ports i.e. 2 mac addrs.
+ * Linux sees both the ports.
+ */
+enum prueth_port {
+ PRUETH_PORT_HOST = 0, /* host side port */
+ PRUETH_PORT_MII0, /* physical port RG/SG MII 0 */
+ PRUETH_PORT_MII1, /* physical port RG/SG MII 1 */
+ PRUETH_PORT_INVALID, /* Invalid prueth port */
+};
+
+enum prueth_mac {
+ PRUETH_MAC0 = 0,
+ PRUETH_MAC1,
+ PRUETH_NUM_MACS,
+ PRUETH_MAC_INVALID,
+};
+
+struct prueth_tx_chn {
+ struct device *dma_dev;
+ struct napi_struct napi_tx;
+ struct k3_cppi_desc_pool *desc_pool;
+ struct k3_udma_glue_tx_channel *tx_chn;
+ struct prueth_emac *emac;
+ u32 id;
+ u32 descs_num;
+ unsigned int irq;
+ char name[32];
+};
+
+struct prueth_rx_chn {
+ struct device *dev;
+ struct device *dma_dev;
+ struct k3_cppi_desc_pool *desc_pool;
+ struct k3_udma_glue_rx_channel *rx_chn;
+ u32 descs_num;
+ unsigned int irq[ICSSG_MAX_RFLOWS]; /* separate irq per flow */
+ char name[32];
+};
+
+/* There are 4 Tx DMA channels, but the highest priority is CH3 (thread 3)
+ * and lower three are lower priority channels or threads.
+ */
+#define PRUETH_MAX_TX_QUEUES 4
+
+#define PRUETH_MAX_TX_TS_REQUESTS 50 /* Max simultaneous TX_TS requests */
+
+/* data for each emac port */
+struct prueth_emac {
+ bool fw_running;
+ struct prueth *prueth;
+ struct net_device *ndev;
+ u8 mac_addr[6];
+ struct napi_struct napi_rx;
+ u32 msg_enable;
+
+ int link;
+ int speed;
+ int duplex;
+
+ const char *phy_id;
+ struct device_node *phy_node;
+ phy_interface_t phy_if;
+ enum prueth_port port_id;
+ struct icss_iep *iep;
+ unsigned int rx_ts_enabled : 1;
+ unsigned int tx_ts_enabled : 1;
+
+ /* DMA related */
+ struct prueth_tx_chn tx_chns[PRUETH_MAX_TX_QUEUES];
+ struct completion tdown_complete;
+ atomic_t tdown_cnt;
+ struct prueth_rx_chn rx_chns;
+ int rx_flow_id_base;
+ int tx_ch_num;
+
+ spinlock_t lock; /* serialize access */
+
+ /* TX HW Timestamping */
+ /* TX TS cookie will be index to the tx_ts_skb array */
+ struct sk_buff *tx_ts_skb[PRUETH_MAX_TX_TS_REQUESTS];
+ atomic_t tx_ts_pending;
+ int tx_ts_irq;
+
+ u8 cmd_seq;
+ /* shutdown related */
+ u32 cmd_data[4];
+ struct completion cmd_complete;
+ /* Mutex to serialize access to firmware command interface */
+ struct mutex cmd_lock;
+ struct work_struct rx_mode_work;
+ struct workqueue_struct *cmd_wq;
+
+ struct pruss_mem_region dram;
+
+ struct delayed_work stats_work;
+ u64 stats[ICSSG_NUM_STATS];
+};
+
+/**
+ * struct prueth_pdata - PRUeth platform data
+ * @fdqring_mode: Free desc queue mode
+ * @quirk_10m_link_issue: 10M link detect errata
+ */
+struct prueth_pdata {
+ enum k3_ring_mode fdqring_mode;
+ u32 quirk_10m_link_issue:1;
+};
+
+/**
+ * struct prueth - PRUeth structure
+ * @dev: device
+ * @pruss: pruss handle
+ * @pru: rproc instances of PRUs
+ * @rtu: rproc instances of RTUs
+ * @txpru: rproc instances of TX_PRUs
+ * @shram: PRUSS shared RAM region
+ * @sram_pool: MSMC RAM pool for buffers
+ * @msmcram: MSMC RAM region
+ * @eth_node: DT node for the port
+ * @emac: private EMAC data structure
+ * @registered_netdevs: list of registered netdevs
+ * @miig_rt: regmap to mii_g_rt block
+ * @mii_rt: regmap to mii_rt block
+ * @pru_id: ID for each of the PRUs
+ * @pdev: pointer to ICSSG platform device
+ * @pdata: pointer to platform data for ICSSG driver
+ * @icssg_hwcmdseq: seq counter or HWQ messages
+ * @emacs_initialized: num of EMACs/ext ports that are up/running
+ * @iep0: pointer to IEP0 device
+ * @iep1: pointer to IEP1 device
+ */
+struct prueth {
+ struct device *dev;
+ struct pruss *pruss;
+ struct rproc *pru[PRUSS_NUM_PRUS];
+ struct rproc *rtu[PRUSS_NUM_PRUS];
+ struct rproc *txpru[PRUSS_NUM_PRUS];
+ struct pruss_mem_region shram;
+ struct gen_pool *sram_pool;
+ struct pruss_mem_region msmcram;
+
+ struct device_node *eth_node[PRUETH_NUM_MACS];
+ struct prueth_emac *emac[PRUETH_NUM_MACS];
+ struct net_device *registered_netdevs[PRUETH_NUM_MACS];
+ struct regmap *miig_rt;
+ struct regmap *mii_rt;
+
+ enum pruss_pru_id pru_id[PRUSS_NUM_PRUS];
+ struct platform_device *pdev;
+ struct prueth_pdata pdata;
+ u8 icssg_hwcmdseq;
+ int emacs_initialized;
+ struct icss_iep *iep0;
+ struct icss_iep *iep1;
+};
+
+struct emac_tx_ts_response {
+ u32 reserved[2];
+ u32 cookie;
+ u32 lo_ts;
+ u32 hi_ts;
+};
+
+/* get PRUSS SLICE number from prueth_emac */
+static inline int prueth_emac_slice(struct prueth_emac *emac)
+{
+ switch (emac->port_id) {
+ case PRUETH_PORT_MII0:
+ return ICSS_SLICE0;
+ case PRUETH_PORT_MII1:
+ return ICSS_SLICE1;
+ default:
+ return -EINVAL;
+ }
+}
+
+extern const struct ethtool_ops icssg_ethtool_ops;
+
+/* Classifier helpers */
+void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac);
+void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac);
+void icssg_class_disable(struct regmap *miig_rt, int slice);
+void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti);
+void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr);
+
+/* config helpers */
+void icssg_config_ipg(struct prueth_emac *emac);
+int icssg_config(struct prueth *prueth, struct prueth_emac *emac,
+ int slice);
+int emac_set_port_state(struct prueth_emac *emac,
+ enum icssg_port_state_cmd state);
+void icssg_config_set_speed(struct prueth_emac *emac);
+
+/* Buffer queue helpers */
+int icssg_queue_pop(struct prueth *prueth, u8 queue);
+void icssg_queue_push(struct prueth *prueth, int queue, u16 addr);
+u32 icssg_queue_level(struct prueth *prueth, int queue);
+
+#define prueth_napi_to_tx_chn(pnapi) \
+ container_of(pnapi, struct prueth_tx_chn, napi_tx)
+
+void emac_stats_work_handler(struct work_struct *work);
+void emac_update_hardware_stats(struct prueth_emac *emac);
+int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name);
+#endif /* __NET_TI_ICSSG_PRUETH_H */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_queues.c b/drivers/net/ethernet/ti/icssg/icssg_queues.c
new file mode 100644
index 000000000000..3c34f61ad40b
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_queues.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/* ICSSG Buffer queue helpers
+ *
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/regmap.h>
+#include "icssg_prueth.h"
+
+#define ICSSG_QUEUES_MAX 64
+#define ICSSG_QUEUE_OFFSET 0xd00
+#define ICSSG_QUEUE_PEEK_OFFSET 0xe00
+#define ICSSG_QUEUE_CNT_OFFSET 0xe40
+#define ICSSG_QUEUE_RESET_OFFSET 0xf40
+
+int icssg_queue_pop(struct prueth *prueth, u8 queue)
+{
+ u32 val, cnt;
+
+ if (queue >= ICSSG_QUEUES_MAX)
+ return -EINVAL;
+
+ regmap_read(prueth->miig_rt, ICSSG_QUEUE_CNT_OFFSET + 4 * queue, &cnt);
+ if (!cnt)
+ return -EINVAL;
+
+ regmap_read(prueth->miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue, &val);
+
+ return val;
+}
+
+void icssg_queue_push(struct prueth *prueth, int queue, u16 addr)
+{
+ if (queue >= ICSSG_QUEUES_MAX)
+ return;
+
+ regmap_write(prueth->miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue, addr);
+}
+
+u32 icssg_queue_level(struct prueth *prueth, int queue)
+{
+ u32 reg;
+
+ if (queue >= ICSSG_QUEUES_MAX)
+ return 0;
+
+ regmap_read(prueth->miig_rt, ICSSG_QUEUE_CNT_OFFSET + 4 * queue, &reg);
+
+ return reg;
+}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
new file mode 100644
index 000000000000..bb0b33927e3b
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments ICSSG Ethernet driver
+ *
+ * Copyright (C) 2018-2021 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include "icssg_prueth.h"
+#include "icssg_stats.h"
+#include <linux/regmap.h>
+
+static u32 stats_base[] = { 0x54c, /* Slice 0 stats start */
+ 0xb18, /* Slice 1 stats start */
+};
+
+void emac_update_hardware_stats(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ u32 base = stats_base[slice];
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) {
+ regmap_read(prueth->miig_rt,
+ base + icssg_all_stats[i].offset,
+ &val);
+ regmap_write(prueth->miig_rt,
+ base + icssg_all_stats[i].offset,
+ val);
+
+ emac->stats[i] += val;
+ }
+}
+
+void emac_stats_work_handler(struct work_struct *work)
+{
+ struct prueth_emac *emac = container_of(work, struct prueth_emac,
+ stats_work.work);
+ emac_update_hardware_stats(emac);
+
+ queue_delayed_work(system_long_wq, &emac->stats_work,
+ msecs_to_jiffies((STATS_TIME_LIMIT_1G_MS * 1000) / emac->speed));
+}
+
+int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) {
+ if (!strcmp(icssg_all_stats[i].name, stat_name))
+ return emac->stats[icssg_all_stats[i].offset / sizeof(u32)];
+ }
+
+ netdev_err(emac->ndev, "Invalid stats %s\n", stat_name);
+ return -EINVAL;
+}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.h b/drivers/net/ethernet/ti/icssg/icssg_stats.h
new file mode 100644
index 000000000000..999a4a91276c
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments ICSSG Ethernet driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#ifndef __NET_TI_ICSSG_STATS_H
+#define __NET_TI_ICSSG_STATS_H
+
+#include "icssg_prueth.h"
+
+#define STATS_TIME_LIMIT_1G_MS 25000 /* 25 seconds @ 1G */
+
+struct miig_stats_regs {
+ /* Rx */
+ u32 rx_packets;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_crc_errors;
+ u32 rx_mii_error_frames;
+ u32 rx_odd_nibble_frames;
+ u32 rx_frame_max_size;
+ u32 rx_max_size_error_frames;
+ u32 rx_frame_min_size;
+ u32 rx_min_size_error_frames;
+ u32 rx_over_errors;
+ u32 rx_class0_hits;
+ u32 rx_class1_hits;
+ u32 rx_class2_hits;
+ u32 rx_class3_hits;
+ u32 rx_class4_hits;
+ u32 rx_class5_hits;
+ u32 rx_class6_hits;
+ u32 rx_class7_hits;
+ u32 rx_class8_hits;
+ u32 rx_class9_hits;
+ u32 rx_class10_hits;
+ u32 rx_class11_hits;
+ u32 rx_class12_hits;
+ u32 rx_class13_hits;
+ u32 rx_class14_hits;
+ u32 rx_class15_hits;
+ u32 rx_smd_frags;
+ u32 rx_bucket1_size;
+ u32 rx_bucket2_size;
+ u32 rx_bucket3_size;
+ u32 rx_bucket4_size;
+ u32 rx_64B_frames;
+ u32 rx_bucket1_frames;
+ u32 rx_bucket2_frames;
+ u32 rx_bucket3_frames;
+ u32 rx_bucket4_frames;
+ u32 rx_bucket5_frames;
+ u32 rx_bytes;
+ u32 rx_tx_total_bytes;
+ /* Tx */
+ u32 tx_packets;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_odd_nibble_frames;
+ u32 tx_underflow_errors;
+ u32 tx_frame_max_size;
+ u32 tx_max_size_error_frames;
+ u32 tx_frame_min_size;
+ u32 tx_min_size_error_frames;
+ u32 tx_bucket1_size;
+ u32 tx_bucket2_size;
+ u32 tx_bucket3_size;
+ u32 tx_bucket4_size;
+ u32 tx_64B_frames;
+ u32 tx_bucket1_frames;
+ u32 tx_bucket2_frames;
+ u32 tx_bucket3_frames;
+ u32 tx_bucket4_frames;
+ u32 tx_bucket5_frames;
+ u32 tx_bytes;
+};
+
+#define ICSSG_STATS(field, stats_type) \
+{ \
+ #field, \
+ offsetof(struct miig_stats_regs, field), \
+ stats_type \
+}
+
+struct icssg_stats {
+ char name[ETH_GSTRING_LEN];
+ u32 offset;
+ bool standard_stats;
+};
+
+static const struct icssg_stats icssg_all_stats[] = {
+ /* Rx */
+ ICSSG_STATS(rx_packets, true),
+ ICSSG_STATS(rx_broadcast_frames, false),
+ ICSSG_STATS(rx_multicast_frames, true),
+ ICSSG_STATS(rx_crc_errors, true),
+ ICSSG_STATS(rx_mii_error_frames, false),
+ ICSSG_STATS(rx_odd_nibble_frames, false),
+ ICSSG_STATS(rx_frame_max_size, true),
+ ICSSG_STATS(rx_max_size_error_frames, false),
+ ICSSG_STATS(rx_frame_min_size, true),
+ ICSSG_STATS(rx_min_size_error_frames, false),
+ ICSSG_STATS(rx_over_errors, true),
+ ICSSG_STATS(rx_class0_hits, false),
+ ICSSG_STATS(rx_class1_hits, false),
+ ICSSG_STATS(rx_class2_hits, false),
+ ICSSG_STATS(rx_class3_hits, false),
+ ICSSG_STATS(rx_class4_hits, false),
+ ICSSG_STATS(rx_class5_hits, false),
+ ICSSG_STATS(rx_class6_hits, false),
+ ICSSG_STATS(rx_class7_hits, false),
+ ICSSG_STATS(rx_class8_hits, false),
+ ICSSG_STATS(rx_class9_hits, false),
+ ICSSG_STATS(rx_class10_hits, false),
+ ICSSG_STATS(rx_class11_hits, false),
+ ICSSG_STATS(rx_class12_hits, false),
+ ICSSG_STATS(rx_class13_hits, false),
+ ICSSG_STATS(rx_class14_hits, false),
+ ICSSG_STATS(rx_class15_hits, false),
+ ICSSG_STATS(rx_smd_frags, false),
+ ICSSG_STATS(rx_bucket1_size, true),
+ ICSSG_STATS(rx_bucket2_size, true),
+ ICSSG_STATS(rx_bucket3_size, true),
+ ICSSG_STATS(rx_bucket4_size, true),
+ ICSSG_STATS(rx_64B_frames, true),
+ ICSSG_STATS(rx_bucket1_frames, true),
+ ICSSG_STATS(rx_bucket2_frames, true),
+ ICSSG_STATS(rx_bucket3_frames, true),
+ ICSSG_STATS(rx_bucket4_frames, true),
+ ICSSG_STATS(rx_bucket5_frames, true),
+ ICSSG_STATS(rx_bytes, true),
+ ICSSG_STATS(rx_tx_total_bytes, false),
+ /* Tx */
+ ICSSG_STATS(tx_packets, true),
+ ICSSG_STATS(tx_broadcast_frames, false),
+ ICSSG_STATS(tx_multicast_frames, false),
+ ICSSG_STATS(tx_odd_nibble_frames, false),
+ ICSSG_STATS(tx_underflow_errors, false),
+ ICSSG_STATS(tx_frame_max_size, true),
+ ICSSG_STATS(tx_max_size_error_frames, false),
+ ICSSG_STATS(tx_frame_min_size, true),
+ ICSSG_STATS(tx_min_size_error_frames, false),
+ ICSSG_STATS(tx_bucket1_size, true),
+ ICSSG_STATS(tx_bucket2_size, true),
+ ICSSG_STATS(tx_bucket3_size, true),
+ ICSSG_STATS(tx_bucket4_size, true),
+ ICSSG_STATS(tx_64B_frames, true),
+ ICSSG_STATS(tx_bucket1_frames, true),
+ ICSSG_STATS(tx_bucket2_frames, true),
+ ICSSG_STATS(tx_bucket3_frames, true),
+ ICSSG_STATS(tx_bucket4_frames, true),
+ ICSSG_STATS(tx_bucket5_frames, true),
+ ICSSG_STATS(tx_bytes, true),
+};
+
+#endif /* __NET_TI_ICSSG_STATS_H */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
new file mode 100644
index 000000000000..424a7e945ea8
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments ICSSG Ethernet driver
+ *
+ * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#ifndef __NET_TI_ICSSG_SWITCH_MAP_H
+#define __NET_TI_ICSSG_SWITCH_MAP_H
+
+/************************* Ethernet Switch Constants *********************/
+
+/* if bucket size is changed in firmware then this too should be changed
+ * because it directly impacts FDB ageing calculation
+ */
+#define NUMBER_OF_FDB_BUCKET_ENTRIES (4)
+
+/* This is fixed in ICSSG */
+#define SIZE_OF_FDB (2048)
+
+#define FW_LINK_SPEED_1G (0x00)
+#define FW_LINK_SPEED_100M (0x01)
+#define FW_LINK_SPEED_10M (0x02)
+#define FW_LINK_SPEED_HD (0x80)
+
+/* Time after which FDB entries are checked for aged out values.
+ * Values are in nanoseconds
+ */
+#define FDB_AGEING_TIMEOUT_OFFSET 0x0014
+
+/* Default VLAN tag for Host Port */
+#define HOST_PORT_DF_VLAN_OFFSET 0x001C
+
+/* Same as HOST_PORT_DF_VLAN_OFFSET */
+#define EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET HOST_PORT_DF_VLAN_OFFSET
+
+/* Default VLAN tag for P1 Port */
+#define P1_PORT_DF_VLAN_OFFSET 0x0020
+
+/* Same as P1_PORT_DF_VLAN_OFFSET */
+#define EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET P1_PORT_DF_VLAN_OFFSET
+
+/* default VLAN tag for P2 Port */
+#define P2_PORT_DF_VLAN_OFFSET 0x0024
+
+/* Same as P2_PORT_DF_VLAN_OFFSET */
+#define EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET P2_PORT_DF_VLAN_OFFSET
+
+/* VLAN-FID Table offset. 4096 VIDs. 2B per VID = 8KB = 0x2000 */
+#define VLAN_STATIC_REG_TABLE_OFFSET 0x0100
+
+/* VLAN-FID Table offset for EMAC */
+#define EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET VLAN_STATIC_REG_TABLE_OFFSET
+
+/* Packet descriptor Q reserved memory */
+#define PORT_DESC0_HI 0x2104
+
+/* Packet descriptor Q reserved memory */
+#define PORT_DESC0_LO 0x2F6C
+
+/* Packet descriptor Q reserved memory */
+#define PORT_DESC1_HI 0x3DD4
+
+/* Packet descriptor Q reserved memory */
+#define PORT_DESC1_LO 0x4C3C
+
+/* Packet descriptor Q reserved memory */
+#define HOST_DESC0_HI 0x5AA4
+
+/* Packet descriptor Q reserved memory */
+#define HOST_DESC0_LO 0x5F0C
+
+/* Packet descriptor Q reserved memory */
+#define HOST_DESC1_HI 0x6374
+
+/* Packet descriptor Q reserved memory */
+#define HOST_DESC1_LO 0x67DC
+
+/* Special packet descriptor Q reserved memory */
+#define HOST_SPPD0 0x7AAC
+
+/* Special acket descriptor Q reserved memory */
+#define HOST_SPPD1 0x7EAC
+
+/* IEP count cycle counter*/
+#define TIMESYNC_FW_WC_CYCLECOUNT_OFFSET 0x83EC
+
+/* IEP count hi roll over count */
+#define TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET 0x83F4
+
+/* IEP count hi sw counter */
+#define TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET 0x83F8
+
+/* Set clock descriptor */
+#define TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET 0x83FC
+
+/* IEP count syncout reduction factor */
+#define TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET 0x843C
+
+/* IEP count syncout reduction counter */
+#define TIMESYNC_FW_WC_SYNCOUT_REDUCTION_COUNT_OFFSET 0x8440
+
+/* IEP count syncout start time cycle counter */
+#define TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET 0x8444
+
+/* Control variable to generate SYNC1 */
+#define TIMESYNC_FW_WC_ISOM_PIN_SIGNAL_EN_OFFSET 0x844C
+
+/* SystemTime Sync0 periodicity */
+#define TIMESYNC_FW_ST_SYNCOUT_PERIOD_OFFSET 0x8450
+
+/* pktTxDelay for P1 = link speed dependent p1 mac delay + p1 phy delay */
+#define TIMESYNC_FW_WC_PKTTXDELAY_P1_OFFSET 0x8454
+
+/* pktTxDelay for P2 = link speed dependent p2 mac delay + p2 phy delay */
+#define TIMESYNC_FW_WC_PKTTXDELAY_P2_OFFSET 0x8458
+
+/* Set clock operation done signal for next task */
+#define TIMESYNC_FW_SIG_PNFW_OFFSET 0x845C
+
+/* Set clock operation done signal for next task */
+#define TIMESYNC_FW_SIG_TIMESYNCFW_OFFSET 0x8460
+
+/* New list is copied at this time */
+#define TAS_CONFIG_CHANGE_TIME 0x000C
+
+/* config change error counter */
+#define TAS_CONFIG_CHANGE_ERROR_COUNTER 0x0014
+
+/* TAS List update pending flag */
+#define TAS_CONFIG_PENDING 0x0018
+
+/* TAS list update trigger flag */
+#define TAS_CONFIG_CHANGE 0x0019
+
+/* List length for new TAS schedule */
+#define TAS_ADMIN_LIST_LENGTH 0x001A
+
+/* Currently active TAS list index */
+#define TAS_ACTIVE_LIST_INDEX 0x001B
+
+/* Cycle time for the new TAS schedule */
+#define TAS_ADMIN_CYCLE_TIME 0x001C
+
+/* Cycle counts remaining till the TAS list update */
+#define TAS_CONFIG_CHANGE_CYCLE_COUNT 0x0020
+
+/* Base Flow ID for sending Packets to Host for Slice0 */
+#define PSI_L_REGULAR_FLOW_ID_BASE_OFFSET 0x0024
+
+/* Same as PSI_L_REGULAR_FLOW_ID_BASE_OFFSET */
+#define EMAC_ICSSG_SWITCH_PSI_L_REGULAR_FLOW_ID_BASE_OFFSET PSI_L_REGULAR_FLOW_ID_BASE_OFFSET
+
+/* Base Flow ID for sending mgmt and Tx TS to Host for Slice0 */
+#define PSI_L_MGMT_FLOW_ID_OFFSET 0x0026
+
+/* Same as PSI_L_MGMT_FLOW_ID_OFFSET */
+#define EMAC_ICSSG_SWITCH_PSI_L_MGMT_FLOW_ID_BASE_OFFSET PSI_L_MGMT_FLOW_ID_OFFSET
+
+/* Queue number for Special Packets written here */
+#define SPL_PKT_DEFAULT_PRIORITY 0x0028
+
+/* Express Preemptible Queue Mask */
+#define EXPRESS_PRE_EMPTIVE_Q_MASK 0x0029
+
+/* Port1/Port2 Default Queue number for untagged Packets, only 1B is used */
+#define QUEUE_NUM_UNTAGGED 0x002A
+
+/* Stores the table used for priority regeneration. 1B per PCP/Queue */
+#define PORT_Q_PRIORITY_REGEN_OFFSET 0x002C
+
+/* For marking Packet as priority/express (this feature is disabled) or
+ * cut-through/S&F.
+ */
+#define EXPRESS_PRE_EMPTIVE_Q_MAP 0x0034
+
+/* Stores the table used for priority mapping. 1B per PCP/Queue */
+#define PORT_Q_PRIORITY_MAPPING_OFFSET 0x003C
+
+/* Used to notify the FW of the current link speed */
+#define PORT_LINK_SPEED_OFFSET 0x00A8
+
+/* TAS gate mask for windows list0 */
+#define TAS_GATE_MASK_LIST0 0x0100
+
+/* TAS gate mask for windows list1 */
+#define TAS_GATE_MASK_LIST1 0x0350
+
+/* Memory to Enable/Disable Preemption on TX side */
+#define PRE_EMPTION_ENABLE_TX 0x05A0
+
+/* Active State of Preemption on TX side */
+#define PRE_EMPTION_ACTIVE_TX 0x05A1
+
+/* Memory to Enable/Disable Verify State Machine Preemption */
+#define PRE_EMPTION_ENABLE_VERIFY 0x05A2
+
+/* Verify Status of State Machine */
+#define PRE_EMPTION_VERIFY_STATUS 0x05A3
+
+/* Non Final Fragment Size supported by Link Partner */
+#define PRE_EMPTION_ADD_FRAG_SIZE_REMOTE 0x05A4
+
+/* Non Final Fragment Size supported by Firmware */
+#define PRE_EMPTION_ADD_FRAG_SIZE_LOCAL 0x05A6
+
+/* Time in ms the State machine waits for respond Packet */
+#define PRE_EMPTION_VERIFY_TIME 0x05A8
+
+/* Memory used for R30 related management commands */
+#define MGR_R30_CMD_OFFSET 0x05AC
+
+/* HW Buffer Pool0 base address */
+#define BUFFER_POOL_0_ADDR_OFFSET 0x05BC
+
+/* 16B for Host Egress MSMC Q (Pre-emptible) context */
+#define HOST_RX_Q_PRE_CONTEXT_OFFSET 0x0684
+
+/* Buffer for 8 FDB entries to be added by 'Add Multiple FDB entries IOCTL' */
+#define FDB_CMD_BUFFER 0x0894
+
+/* TAS queue max sdu length list */
+#define TAS_QUEUE_MAX_SDU_LIST 0x08FA
+
+/* Used by FW to generate random number with the SEED value */
+#define HD_RAND_SEED_OFFSET 0x0934
+
+/* 16B for Host Egress MSMC Q (Express) context */
+#define HOST_RX_Q_EXP_CONTEXT_OFFSET 0x0940
+
+/* Start of 32 bits PA_STAT counters */
+#define PA_STAT_32b_START_OFFSET 0x0080
+
+#endif /* __NET_TI_ICSSG_SWITCH_MAP_H */
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index 43d5cd59b56b..7007eb8bed36 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -233,8 +233,6 @@ int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
netcp_hook_rtn *hook_rtn, void *hook_data);
int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
netcp_hook_rtn *hook_rtn, void *hook_data);
-void *netcp_device_find_module(struct netcp_device *netcp_device,
- const char *name);
/* SGMII functions */
int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index b50be67b398b..14cf6ecf6d0d 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -667,8 +667,7 @@ static int tc_mii_init(struct net_device *dev)
lp->mii_bus->name = "tc35815_mii_bus";
lp->mii_bus->read = tc_mdio_read;
lp->mii_bus->write = tc_mdio_write;
- snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x",
- (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn);
+ snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(lp->pci_dev));
lp->mii_bus->priv = dev;
lp->mii_bus->parent = &lp->pci_dev->dev;
err = mdiobus_register(lp->mii_bus);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index d716e6fe26e1..3e09e5036490 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -94,7 +94,7 @@ static const int multicast_filter_limit = 32;
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 86f7843b4591..731f689412e6 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -57,8 +57,8 @@
#include <linux/if.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/inetdevice.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index 39596cd13539..23cd610bd376 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -41,6 +41,7 @@ config TXGBE
tristate "Wangxun(R) 10GbE PCI Express adapters support"
depends on PCI
depends on COMMON_CLK
+ select MARVELL_10G_PHY
select REGMAP
select I2C
select I2C_DESIGNWARE_PLATFORM
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 6321178fc814..85dc16faca54 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -432,71 +432,6 @@ out:
EXPORT_SYMBOL(wx_read_ee_hostif_buffer);
/**
- * wx_calculate_checksum - Calculate checksum for buffer
- * @buffer: pointer to EEPROM
- * @length: size of EEPROM to calculate a checksum for
- * Calculates the checksum for some buffer on a specified length. The
- * checksum calculated is returned.
- **/
-static u8 wx_calculate_checksum(u8 *buffer, u32 length)
-{
- u8 sum = 0;
- u32 i;
-
- if (!buffer)
- return 0;
-
- for (i = 0; i < length; i++)
- sum += buffer[i];
-
- return (u8)(0 - sum);
-}
-
-/**
- * wx_reset_hostif - send reset cmd to fw
- * @wx: pointer to hardware structure
- *
- * Sends reset cmd to firmware through the manageability
- * block.
- **/
-int wx_reset_hostif(struct wx *wx)
-{
- struct wx_hic_reset reset_cmd;
- int ret_val = 0;
- int i;
-
- reset_cmd.hdr.cmd = FW_RESET_CMD;
- reset_cmd.hdr.buf_len = FW_RESET_LEN;
- reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
- reset_cmd.lan_id = wx->bus.func;
- reset_cmd.reset_type = (u16)wx->reset_type;
- reset_cmd.hdr.checksum = 0;
- reset_cmd.hdr.checksum = wx_calculate_checksum((u8 *)&reset_cmd,
- (FW_CEM_HDR_LEN +
- reset_cmd.hdr.buf_len));
-
- for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
- ret_val = wx_host_interface_command(wx, (u32 *)&reset_cmd,
- sizeof(reset_cmd),
- WX_HI_COMMAND_TIMEOUT,
- true);
- if (ret_val != 0)
- continue;
-
- if (reset_cmd.hdr.cmd_or_resp.ret_status ==
- FW_CEM_RESP_STATUS_SUCCESS)
- ret_val = 0;
- else
- ret_val = -EFAULT;
-
- break;
- }
-
- return ret_val;
-}
-EXPORT_SYMBOL(wx_reset_hostif);
-
-/**
* wx_init_eeprom_params - Initialize EEPROM params
* @wx: pointer to hardware structure
*
@@ -1501,7 +1436,7 @@ static void wx_restore_vlan(struct wx *wx)
*
* Configure the Rx unit of the MAC after a reset.
**/
-static void wx_configure_rx(struct wx *wx)
+void wx_configure_rx(struct wx *wx)
{
u32 psrtype, i;
int ret;
@@ -1544,6 +1479,7 @@ static void wx_configure_rx(struct wx *wx)
wx_enable_rx(wx);
wx_enable_sec_rx_path(wx);
}
+EXPORT_SYMBOL(wx_configure_rx);
static void wx_configure_isb(struct wx *wx)
{
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 1f93ca32c921..0b3447bc6f2f 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -14,7 +14,6 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer,
int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data);
int wx_read_ee_hostif_buffer(struct wx *wx,
u16 offset, u16 words, u16 *data);
-int wx_reset_hostif(struct wx *wx);
void wx_init_eeprom_params(struct wx *wx);
void wx_get_mac_addr(struct wx *wx, u8 *mac_addr);
void wx_init_rx_addrs(struct wx *wx);
@@ -25,6 +24,7 @@ void wx_disable_rx(struct wx *wx);
void wx_set_rx_mode(struct net_device *netdev);
int wx_change_mtu(struct net_device *netdev, int new_mtu);
void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring);
+void wx_configure_rx(struct wx *wx);
void wx_configure(struct wx *wx);
void wx_start_hw(struct wx *wx);
int wx_disable_pcie_master(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 2c3f08be8c37..e04d4a5eed7b 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -3,7 +3,7 @@
#include <linux/etherdevice.h>
#include <net/ip6_checksum.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
#include <net/inet_ecn.h>
#include <linux/iopoll.h>
#include <linux/sctp.h>
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 29dfb561887d..c5cbd177ef62 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -160,6 +160,10 @@
#define WX_PSR_LAN_FLEX_DW_H(_i) (0x15C04 + ((_i) * 16))
#define WX_PSR_LAN_FLEX_MSK(_i) (0x15C08 + ((_i) * 16))
+#define WX_PSR_WKUP_CTL 0x15B80
+/* Wake Up Filter Control Bit */
+#define WX_PSR_WKUP_CTL_MAG BIT(1) /* Magic Packet Wakeup Enable */
+
/* vlan tbl */
#define WX_PSR_VLAN_TBL(_i) (0x16000 + ((_i) * 4))
@@ -201,6 +205,8 @@
#define WX_TSC_CTL 0x1D000
#define WX_TSC_CTL_TX_DIS BIT(1)
#define WX_TSC_CTL_TSEC_DIS BIT(0)
+#define WX_TSC_ST 0x1D004
+#define WX_TSC_ST_SECTX_RDY BIT(0)
#define WX_TSC_BUF_AE 0x1D00C
#define WX_TSC_BUF_AE_THR GENMASK(9, 0)
@@ -227,6 +233,24 @@
#define WX_MAC_WDG_TIMEOUT 0x1100C
#define WX_MAC_RX_FLOW_CTRL 0x11090
#define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */
+/* MDIO Registers */
+#define WX_MSCA 0x11200
+#define WX_MSCA_RA(v) FIELD_PREP(U16_MAX, v)
+#define WX_MSCA_PA(v) FIELD_PREP(GENMASK(20, 16), v)
+#define WX_MSCA_DA(v) FIELD_PREP(GENMASK(25, 21), v)
+#define WX_MSCC 0x11204
+#define WX_MSCC_CMD(v) FIELD_PREP(GENMASK(17, 16), v)
+
+enum WX_MSCA_CMD_value {
+ WX_MSCA_CMD_RSV = 0,
+ WX_MSCA_CMD_WRITE,
+ WX_MSCA_CMD_POST_READ,
+ WX_MSCA_CMD_READ,
+};
+
+#define WX_MSCC_SADDR BIT(18)
+#define WX_MSCC_BUSY BIT(22)
+#define WX_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v)
#define WX_MMC_CONTROL 0x11800
#define WX_MMC_CONTROL_RSTONRD BIT(2) /* reset on read */
@@ -576,6 +600,13 @@ enum wx_mac_type {
wx_mac_em
};
+enum sp_media_type {
+ sp_media_unknown = 0,
+ sp_media_fiber,
+ sp_media_copper,
+ sp_media_backplane
+};
+
enum em_mac_type {
em_mac_type_unknown = 0,
em_mac_type_mdi,
@@ -823,6 +854,7 @@ struct wx {
struct wx_bus_info bus;
struct wx_mac_info mac;
enum em_mac_type mac_type;
+ enum sp_media_type media_type;
struct wx_eeprom_info eeprom;
struct wx_addr_filter_info addr_ctrl;
struct wx_mac_addr *mac_table;
@@ -846,7 +878,7 @@ struct wx {
int duplex;
struct phy_device *phydev;
- bool wol_enabled;
+ bool wol_hw_supported;
bool ncsi_enabled;
bool gpio_ctrl;
raw_spinlock_t gpio_lock;
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
index 5b25834baf38..ec0e869e9aac 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -6,14 +6,49 @@
#include <linux/netdevice.h>
#include "../libwx/wx_ethtool.h"
+#include "../libwx/wx_type.h"
#include "ngbe_ethtool.h"
+static void ngbe_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ if (!wx->wol_hw_supported)
+ return;
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+ if (wx->wol & WX_PSR_WKUP_CTL_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int ngbe_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct wx *wx = netdev_priv(netdev);
+ struct pci_dev *pdev = wx->pdev;
+
+ if (!wx->wol_hw_supported)
+ return -EOPNOTSUPP;
+
+ wx->wol = 0;
+ if (wol->wolopts & WAKE_MAGIC)
+ wx->wol = WX_PSR_WKUP_CTL_MAG;
+ netdev->wol_enabled = !!(wx->wol);
+ wr32(wx, WX_PSR_WKUP_CTL, wx->wol);
+ device_set_wakeup_enable(&pdev->dev, netdev->wol_enabled);
+
+ return 0;
+}
+
static const struct ethtool_ops ngbe_ethtool_ops = {
.get_drvinfo = wx_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.nway_reset = phy_ethtool_nway_reset,
+ .get_wol = ngbe_get_wol,
+ .set_wol = ngbe_set_wol,
};
void ngbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index c99a5d3de72e..2b431db6085a 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -62,7 +62,7 @@ static void ngbe_init_type_code(struct wx *wx)
em_mac_type_rgmii :
em_mac_type_mdi;
- wx->wol_enabled = (wol_mask == NGBE_WOL_SUP) ? 1 : 0;
+ wx->wol_hw_supported = (wol_mask == NGBE_WOL_SUP) ? 1 : 0;
wx->ncsi_enabled = (ncsi_mask == NGBE_NCSI_MASK ||
type_mask == NGBE_SUBID_OCP_CARD) ? 1 : 0;
@@ -440,14 +440,26 @@ static void ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
struct wx *wx = pci_get_drvdata(pdev);
struct net_device *netdev;
+ u32 wufc = wx->wol;
netdev = wx->netdev;
+ rtnl_lock();
netif_device_detach(netdev);
- rtnl_lock();
if (netif_running(netdev))
- ngbe_down(wx);
+ ngbe_close(netdev);
+ wx_clear_interrupt_scheme(wx);
rtnl_unlock();
+
+ if (wufc) {
+ wx_set_rx_mode(netdev);
+ wx_configure_rx(wx);
+ wr32(wx, NGBE_PSR_WKUP_CTL, wufc);
+ } else {
+ wr32(wx, NGBE_PSR_WKUP_CTL, 0);
+ }
+ pci_wake_from_d3(pdev, !!wufc);
+ *enable_wake = !!wufc;
wx_control_hw(wx, false);
pci_disable_device(pdev);
@@ -621,12 +633,11 @@ static int ngbe_probe(struct pci_dev *pdev,
}
wx->wol = 0;
- if (wx->wol_enabled)
+ if (wx->wol_hw_supported)
wx->wol = NGBE_PSR_WKUP_CTL_MAG;
- wx->wol_enabled = !!(wx->wol);
+ netdev->wol_enabled = !!(wx->wol);
wr32(wx, NGBE_PSR_WKUP_CTL, wx->wol);
-
device_set_wakeup_enable(&pdev->dev, wx->wol);
/* Save off EEPROM version number and Option Rom version which
@@ -712,11 +723,52 @@ static void ngbe_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static int ngbe_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ bool wake;
+
+ ngbe_dev_shutdown(pdev, &wake);
+ device_set_wakeup_enable(&pdev->dev, wake);
+
+ return 0;
+}
+
+static int ngbe_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev;
+ struct wx *wx;
+ u32 err;
+
+ wx = pci_get_drvdata(pdev);
+ netdev = wx->netdev;
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ wx_err(wx, "Cannot enable PCI device from suspend\n");
+ return err;
+ }
+ pci_set_master(pdev);
+ device_wakeup_disable(&pdev->dev);
+
+ ngbe_reset_hw(wx);
+ rtnl_lock();
+ err = wx_init_interrupt_scheme(wx);
+ if (!err && netif_running(netdev))
+ err = ngbe_open(netdev);
+ if (!err)
+ netif_device_attach(netdev);
+ rtnl_unlock();
+
+ return 0;
+}
+
static struct pci_driver ngbe_driver = {
.name = ngbe_driver_name,
.id_table = ngbe_pci_tbl,
.probe = ngbe_probe,
.remove = ngbe_remove,
+ .suspend = ngbe_suspend,
+ .resume = ngbe_resume,
.shutdown = ngbe_shutdown,
};
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
index c9ddbbc3fa4f..591f5b7b6da6 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -37,24 +37,24 @@ static int ngbe_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regn
wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF);
/* setup and write the address cycle command */
- command = NGBE_MSCA_RA(regnum) |
- NGBE_MSCA_PA(phy_addr) |
- NGBE_MSCA_DA(device_type);
- wr32(wx, NGBE_MSCA, command);
- command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) |
- NGBE_MSCC_BUSY |
- NGBE_MDIO_CLK(6);
- wr32(wx, NGBE_MSCC, command);
+ command = WX_MSCA_RA(regnum) |
+ WX_MSCA_PA(phy_addr) |
+ WX_MSCA_DA(device_type);
+ wr32(wx, WX_MSCA, command);
+ command = WX_MSCC_CMD(WX_MSCA_CMD_READ) |
+ WX_MSCC_BUSY |
+ WX_MDIO_CLK(6);
+ wr32(wx, WX_MSCC, command);
/* wait to complete */
- ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000,
- 100000, false, wx, NGBE_MSCC);
+ ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
+ 100000, false, wx, WX_MSCC);
if (ret) {
wx_err(wx, "Mdio read c22 command did not complete.\n");
return ret;
}
- return (u16)rd32(wx, NGBE_MSCC);
+ return (u16)rd32(wx, WX_MSCC);
}
static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
@@ -65,19 +65,19 @@ static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int reg
wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF);
/* setup and write the address cycle command */
- command = NGBE_MSCA_RA(regnum) |
- NGBE_MSCA_PA(phy_addr) |
- NGBE_MSCA_DA(device_type);
- wr32(wx, NGBE_MSCA, command);
+ command = WX_MSCA_RA(regnum) |
+ WX_MSCA_PA(phy_addr) |
+ WX_MSCA_DA(device_type);
+ wr32(wx, WX_MSCA, command);
command = value |
- NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) |
- NGBE_MSCC_BUSY |
- NGBE_MDIO_CLK(6);
- wr32(wx, NGBE_MSCC, command);
+ WX_MSCC_CMD(WX_MSCA_CMD_WRITE) |
+ WX_MSCC_BUSY |
+ WX_MDIO_CLK(6);
+ wr32(wx, WX_MSCC, command);
/* wait to complete */
- ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000,
- 100000, false, wx, NGBE_MSCC);
+ ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
+ 100000, false, wx, WX_MSCC);
if (ret)
wx_err(wx, "Mdio write c22 command did not complete.\n");
@@ -92,24 +92,24 @@ static int ngbe_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devn
wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0);
/* setup and write the address cycle command */
- command = NGBE_MSCA_RA(regnum) |
- NGBE_MSCA_PA(phy_addr) |
- NGBE_MSCA_DA(devnum);
- wr32(wx, NGBE_MSCA, command);
- command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) |
- NGBE_MSCC_BUSY |
- NGBE_MDIO_CLK(6);
- wr32(wx, NGBE_MSCC, command);
+ command = WX_MSCA_RA(regnum) |
+ WX_MSCA_PA(phy_addr) |
+ WX_MSCA_DA(devnum);
+ wr32(wx, WX_MSCA, command);
+ command = WX_MSCC_CMD(WX_MSCA_CMD_READ) |
+ WX_MSCC_BUSY |
+ WX_MDIO_CLK(6);
+ wr32(wx, WX_MSCC, command);
/* wait to complete */
- ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000,
- 100000, false, wx, NGBE_MSCC);
+ ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
+ 100000, false, wx, WX_MSCC);
if (ret) {
wx_err(wx, "Mdio read c45 command did not complete.\n");
return ret;
}
- return (u16)rd32(wx, NGBE_MSCC);
+ return (u16)rd32(wx, WX_MSCC);
}
static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr,
@@ -121,19 +121,19 @@ static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr,
wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0);
/* setup and write the address cycle command */
- command = NGBE_MSCA_RA(regnum) |
- NGBE_MSCA_PA(phy_addr) |
- NGBE_MSCA_DA(devnum);
- wr32(wx, NGBE_MSCA, command);
+ command = WX_MSCA_RA(regnum) |
+ WX_MSCA_PA(phy_addr) |
+ WX_MSCA_DA(devnum);
+ wr32(wx, WX_MSCA, command);
command = value |
- NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) |
- NGBE_MSCC_BUSY |
- NGBE_MDIO_CLK(6);
- wr32(wx, NGBE_MSCC, command);
+ WX_MSCC_CMD(WX_MSCA_CMD_WRITE) |
+ WX_MSCC_BUSY |
+ WX_MDIO_CLK(6);
+ wr32(wx, WX_MSCC, command);
/* wait to complete */
- ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000,
- 100000, false, wx, NGBE_MSCC);
+ ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
+ 100000, false, wx, WX_MSCC);
if (ret)
wx_err(wx, "Mdio write c45 command did not complete.\n");
@@ -236,6 +236,7 @@ static void ngbe_phy_fixup(struct wx *wx)
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phydev->mac_managed_pm = true;
if (wx->mac_type != em_mac_type_mdi)
return;
/* disable EEE, internal phy does not support eee */
@@ -265,8 +266,7 @@ int ngbe_mdio_init(struct wx *wx)
mii_bus->write_c45 = ngbe_phy_write_reg_mdi_c45;
}
- snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x",
- (pdev->bus->number << 8) | pdev->devfn);
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x", pci_dev_id(pdev));
ret = devm_mdiobus_register(&pdev->dev, mii_bus);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index b70eca397b67..72c8cd2d5575 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -59,25 +59,6 @@
#define NGBE_EEPROM_VERSION_L 0x1D
#define NGBE_EEPROM_VERSION_H 0x1E
-/* mdio access */
-#define NGBE_MSCA 0x11200
-#define NGBE_MSCA_RA(v) FIELD_PREP(U16_MAX, v)
-#define NGBE_MSCA_PA(v) FIELD_PREP(GENMASK(20, 16), v)
-#define NGBE_MSCA_DA(v) FIELD_PREP(GENMASK(25, 21), v)
-#define NGBE_MSCC 0x11204
-#define NGBE_MSCC_CMD(v) FIELD_PREP(GENMASK(17, 16), v)
-
-enum NGBE_MSCA_CMD_value {
- NGBE_MSCA_CMD_RSV = 0,
- NGBE_MSCA_CMD_WRITE,
- NGBE_MSCA_CMD_POST_READ,
- NGBE_MSCA_CMD_READ,
-};
-
-#define NGBE_MSCC_SADDR BIT(18)
-#define NGBE_MSCC_BUSY BIT(22)
-#define NGBE_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v)
-
/* Media-dependent registers. */
#define NGBE_MDIO_CLAUSE_SELECT 0x11220
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index 0772eb14eabf..372745250270 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -14,6 +14,34 @@
#include "txgbe_hw.h"
/**
+ * txgbe_disable_sec_tx_path - Stops the transmit data path
+ * @wx: pointer to hardware structure
+ *
+ * Stops the transmit data path and waits for the HW to internally empty
+ * the tx security block
+ **/
+int txgbe_disable_sec_tx_path(struct wx *wx)
+{
+ int val;
+
+ wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS, WX_TSC_CTL_TX_DIS);
+ return read_poll_timeout(rd32, val, val & WX_TSC_ST_SECTX_RDY,
+ 1000, 20000, false, wx, WX_TSC_ST);
+}
+
+/**
+ * txgbe_enable_sec_tx_path - Enables the transmit data path
+ * @wx: pointer to hardware structure
+ *
+ * Enables the transmit data path.
+ **/
+void txgbe_enable_sec_tx_path(struct wx *wx)
+{
+ wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS, 0);
+ WX_WRITE_FLUSH(wx);
+}
+
+/**
* txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds
* @wx: pointer to hardware structure
*
@@ -263,11 +291,14 @@ int txgbe_reset_hw(struct wx *wx)
if (status != 0)
return status;
- if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
- ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP)))
- wx_reset_hostif(wx);
+ if (wx->media_type != sp_media_copper) {
+ u32 val;
- usleep_range(10, 100);
+ val = WX_MIS_RST_LAN_RST(wx->bus.func);
+ wr32(wx, WX_MIS_RST, val | rd32(wx, WX_MIS_RST));
+ WX_WRITE_FLUSH(wx);
+ usleep_range(10, 100);
+ }
status = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(wx->bus.func));
if (status != 0)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
index e82f65dff8a6..abc729eb187a 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
@@ -4,6 +4,8 @@
#ifndef _TXGBE_HW_H_
#define _TXGBE_HW_H_
+int txgbe_disable_sec_tx_path(struct wx *wx);
+void txgbe_enable_sec_tx_path(struct wx *wx);
int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size);
int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val);
int txgbe_reset_hw(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 46eba6d6188b..5c3aed516ac2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -301,6 +301,49 @@ static void txgbe_down(struct wx *wx)
}
/**
+ * txgbe_init_type_code - Initialize the shared code
+ * @wx: pointer to hardware structure
+ **/
+static void txgbe_init_type_code(struct wx *wx)
+{
+ u8 device_type = wx->subsystem_device_id & 0xF0;
+
+ switch (wx->device_id) {
+ case TXGBE_DEV_ID_SP1000:
+ case TXGBE_DEV_ID_WX1820:
+ wx->mac.type = wx_mac_sp;
+ break;
+ default:
+ wx->mac.type = wx_mac_unknown;
+ break;
+ }
+
+ switch (device_type) {
+ case TXGBE_ID_SFP:
+ wx->media_type = sp_media_fiber;
+ break;
+ case TXGBE_ID_XAUI:
+ case TXGBE_ID_SGMII:
+ wx->media_type = sp_media_copper;
+ break;
+ case TXGBE_ID_KR_KX_KX4:
+ case TXGBE_ID_MAC_XAUI:
+ case TXGBE_ID_MAC_SGMII:
+ wx->media_type = sp_media_backplane;
+ break;
+ case TXGBE_ID_SFI_XAUI:
+ if (wx->bus.func == 0)
+ wx->media_type = sp_media_fiber;
+ else
+ wx->media_type = sp_media_copper;
+ break;
+ default:
+ wx->media_type = sp_media_unknown;
+ break;
+ }
+}
+
+/**
* txgbe_sw_init - Initialize general software structures (struct wx)
* @wx: board private structure to initialize
**/
@@ -324,15 +367,7 @@ static int txgbe_sw_init(struct wx *wx)
return err;
}
- switch (wx->device_id) {
- case TXGBE_DEV_ID_SP1000:
- case TXGBE_DEV_ID_WX1820:
- wx->mac.type = wx_mac_sp;
- break;
- default:
- wx->mac.type = wx_mac_unknown;
- break;
- }
+ txgbe_init_type_code(wx);
/* Set common capability flags and settings */
wx->max_q_vectors = TXGBE_MAX_MSIX_VECTORS;
@@ -663,6 +698,9 @@ static int txgbe_probe(struct pci_dev *pdev,
"0x%08x", etrack_id);
}
+ if (etrack_id < 0x20010)
+ dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n");
+
txgbe = devm_kzalloc(&pdev->dev, sizeof(*txgbe), GFP_KERNEL);
if (!txgbe) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 8779645a54be..4159c84035fd 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -18,6 +18,7 @@
#include "../libwx/wx_hw.h"
#include "txgbe_type.h"
#include "txgbe_phy.h"
+#include "txgbe_hw.h"
static int txgbe_swnodes_register(struct txgbe *txgbe)
{
@@ -26,7 +27,7 @@ static int txgbe_swnodes_register(struct txgbe *txgbe)
struct software_node *swnodes;
u32 id;
- id = (pdev->bus->number << 8) | pdev->devfn;
+ id = pci_dev_id(pdev);
snprintf(nodes->gpio_name, sizeof(nodes->gpio_name), "txgbe_gpio-%x", id);
snprintf(nodes->i2c_name, sizeof(nodes->i2c_name), "txgbe_i2c-%x", id);
@@ -140,7 +141,7 @@ static int txgbe_mdio_pcs_init(struct txgbe *txgbe)
mii_bus->phy_mask = ~0;
mii_bus->priv = wx;
snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe_pcs-%x",
- (pdev->bus->number << 8) | pdev->devfn);
+ pci_dev_id(pdev));
ret = devm_mdiobus_register(&pdev->dev, mii_bus);
if (ret)
@@ -160,7 +161,10 @@ static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *confi
{
struct txgbe *txgbe = netdev_to_txgbe(to_net_dev(config->dev));
- return &txgbe->xpcs->pcs;
+ if (interface == PHY_INTERFACE_MODE_10GBASER)
+ return &txgbe->xpcs->pcs;
+
+ return NULL;
}
static void txgbe_mac_config(struct phylink_config *config, unsigned int mode,
@@ -210,8 +214,32 @@ static void txgbe_mac_link_up(struct phylink_config *config,
wr32(wx, WX_MAC_WDG_TIMEOUT, wdg);
}
+static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct wx *wx = netdev_priv(to_net_dev(config->dev));
+
+ wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
+ wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0);
+
+ return txgbe_disable_sec_tx_path(wx);
+}
+
+static int txgbe_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct wx *wx = netdev_priv(to_net_dev(config->dev));
+
+ txgbe_enable_sec_tx_path(wx);
+ wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
+
+ return 0;
+}
+
static const struct phylink_mac_ops txgbe_mac_ops = {
.mac_select_pcs = txgbe_phylink_mac_select,
+ .mac_prepare = txgbe_mac_prepare,
+ .mac_finish = txgbe_mac_finish,
.mac_config = txgbe_mac_config,
.mac_link_down = txgbe_mac_link_down,
.mac_link_up = txgbe_mac_link_up,
@@ -219,8 +247,8 @@ static const struct phylink_mac_ops txgbe_mac_ops = {
static int txgbe_phylink_init(struct txgbe *txgbe)
{
+ struct fwnode_handle *fwnode = NULL;
struct phylink_config *config;
- struct fwnode_handle *fwnode;
struct wx *wx = txgbe->wx;
phy_interface_t phy_mode;
struct phylink *phylink;
@@ -231,14 +259,34 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
config->dev = &wx->netdev->dev;
config->type = PHYLINK_NETDEV;
- config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
- phy_mode = PHY_INTERFACE_MODE_10GBASER;
- __set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces);
- fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_PHYLINK]);
+ config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD |
+ MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+
+ if (wx->media_type == sp_media_copper) {
+ phy_mode = PHY_INTERFACE_MODE_XAUI;
+ __set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces);
+ } else {
+ phy_mode = PHY_INTERFACE_MODE_10GBASER;
+ fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_PHYLINK]);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII, config->supported_interfaces);
+ }
+
phylink = phylink_create(config, fwnode, phy_mode, &txgbe_mac_ops);
if (IS_ERR(phylink))
return PTR_ERR(phylink);
+ if (wx->phydev) {
+ int ret;
+
+ ret = phylink_connect_phy(phylink, wx->phydev);
+ if (ret) {
+ phylink_destroy(phylink);
+ return ret;
+ }
+ }
+
txgbe->phylink = phylink;
return 0;
@@ -431,7 +479,8 @@ static void txgbe_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
- if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN)) {
+ if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
+ TXGBE_PX_MISC_ETH_AN)) {
u32 reg = rd32(wx, TXGBE_CFG_PORT_ST);
phylink_mac_change(txgbe->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP));
@@ -459,7 +508,7 @@ static int txgbe_gpio_init(struct txgbe *txgbe)
return -ENOMEM;
gc->label = devm_kasprintf(dev, GFP_KERNEL, "txgbe_gpio-%x",
- (wx->pdev->bus->number << 8) | wx->pdev->devfn);
+ pci_dev_id(wx->pdev));
if (!gc->label)
return -ENOMEM;
@@ -503,7 +552,7 @@ static int txgbe_clock_register(struct txgbe *txgbe)
struct clk *clk;
snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d",
- (pdev->bus->number << 8) | pdev->devfn);
+ pci_dev_id(pdev));
clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000);
if (IS_ERR(clk))
@@ -566,7 +615,7 @@ static int txgbe_i2c_register(struct txgbe *txgbe)
info.parent = &pdev->dev;
info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]);
info.name = "i2c_designware";
- info.id = (pdev->bus->number << 8) | pdev->devfn;
+ info.id = pci_dev_id(pdev);
info.res = &DEFINE_RES_IRQ(pdev->irq);
info.num_res = 1;
@@ -588,7 +637,7 @@ static int txgbe_sfp_register(struct txgbe *txgbe)
info.parent = &pdev->dev;
info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_SFP]);
info.name = "sfp";
- info.id = (pdev->bus->number << 8) | pdev->devfn;
+ info.id = pci_dev_id(pdev);
sfp_dev = platform_device_register_full(&info);
if (IS_ERR(sfp_dev))
return PTR_ERR(sfp_dev);
@@ -598,10 +647,117 @@ static int txgbe_sfp_register(struct txgbe *txgbe)
return 0;
}
+static int txgbe_phy_read(struct mii_bus *bus, int phy_addr,
+ int devnum, int regnum)
+{
+ struct wx *wx = bus->priv;
+ u32 val, command;
+ int ret;
+
+ /* setup and write the address cycle command */
+ command = WX_MSCA_RA(regnum) |
+ WX_MSCA_PA(phy_addr) |
+ WX_MSCA_DA(devnum);
+ wr32(wx, WX_MSCA, command);
+
+ command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY;
+ wr32(wx, WX_MSCC, command);
+
+ /* wait to complete */
+ ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
+ 100000, false, wx, WX_MSCC);
+ if (ret) {
+ wx_err(wx, "Mdio read c45 command did not complete.\n");
+ return ret;
+ }
+
+ return (u16)rd32(wx, WX_MSCC);
+}
+
+static int txgbe_phy_write(struct mii_bus *bus, int phy_addr,
+ int devnum, int regnum, u16 value)
+{
+ struct wx *wx = bus->priv;
+ int ret, command;
+ u16 val;
+
+ /* setup and write the address cycle command */
+ command = WX_MSCA_RA(regnum) |
+ WX_MSCA_PA(phy_addr) |
+ WX_MSCA_DA(devnum);
+ wr32(wx, WX_MSCA, command);
+
+ command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY;
+ wr32(wx, WX_MSCC, command);
+
+ /* wait to complete */
+ ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
+ 100000, false, wx, WX_MSCC);
+ if (ret)
+ wx_err(wx, "Mdio write c45 command did not complete.\n");
+
+ return ret;
+}
+
+static int txgbe_ext_phy_init(struct txgbe *txgbe)
+{
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
+ struct pci_dev *pdev;
+ struct wx *wx;
+ int ret = 0;
+
+ wx = txgbe->wx;
+ pdev = wx->pdev;
+
+ mii_bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!mii_bus)
+ return -ENOMEM;
+
+ mii_bus->name = "txgbe_mii_bus";
+ mii_bus->read_c45 = &txgbe_phy_read;
+ mii_bus->write_c45 = &txgbe_phy_write;
+ mii_bus->parent = &pdev->dev;
+ mii_bus->phy_mask = GENMASK(31, 1);
+ mii_bus->priv = wx;
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe-%x",
+ (pdev->bus->number << 8) | pdev->devfn);
+
+ ret = devm_mdiobus_register(&pdev->dev, mii_bus);
+ if (ret) {
+ wx_err(wx, "failed to register MDIO bus: %d\n", ret);
+ return ret;
+ }
+
+ phydev = phy_find_first(mii_bus);
+ if (!phydev) {
+ wx_err(wx, "no PHY found\n");
+ return -ENODEV;
+ }
+
+ phy_attached_info(phydev);
+
+ wx->link = 0;
+ wx->speed = 0;
+ wx->duplex = 0;
+ wx->phydev = phydev;
+
+ ret = txgbe_phylink_init(txgbe);
+ if (ret) {
+ wx_err(wx, "failed to init phylink: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
int txgbe_init_phy(struct txgbe *txgbe)
{
int ret;
+ if (txgbe->wx->media_type == sp_media_copper)
+ return txgbe_ext_phy_init(txgbe);
+
ret = txgbe_swnodes_register(txgbe);
if (ret) {
wx_err(txgbe->wx, "failed to register software nodes\n");
@@ -663,6 +819,12 @@ err_unregister_swnode:
void txgbe_remove_phy(struct txgbe *txgbe)
{
+ if (txgbe->wx->media_type == sp_media_copper) {
+ phylink_disconnect_phy(txgbe->phylink);
+ phylink_destroy(txgbe->phylink);
+ return;
+ }
+
platform_device_unregister(txgbe->sfp_dev);
platform_device_unregister(txgbe->i2c_dev);
clkdev_drop(txgbe->clock);
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 6668d1b760d8..90d122d5475c 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -5,6 +5,7 @@
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/spinlock.h>
#ifdef CONFIG_PPC_DCR
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 49f303353ecb..1444b855e7aa 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -35,12 +35,10 @@
#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/tcp.h> /* needed for sizeof(tcphdr) */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index 2371c072b53f..07a9fb49eda1 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -10,8 +10,8 @@
#include <linux/mutex.h>
#include <linux/phy.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of_mdio.h>
#include <linux/platform_data/xilinx-ll-temac.h>
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 8e32dc50a408..b7ec4dafae90 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -27,11 +27,12 @@
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include <linux/math64.h>
#include <linux/phy.h>
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index ad2c30d9a482..b358ecc67227 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -15,9 +16,8 @@
#include <linux/ethtool.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy.h>