summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c59x.c40
-rw-r--r--drivers/net/ethernet/8390/Kconfig2
-rw-r--r--drivers/net/ethernet/8390/mac8390.c32
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile3
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c31
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c8
-rw-r--r--drivers/net/ethernet/altera/altera_sgdmahw.h1
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h1
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amd/7990.c1
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c85
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c18
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c107
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c38
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h17
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c362
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h19
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h2
-rw-r--r--drivers/net/ethernet/apple/Kconfig2
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c10
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c1
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c4
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig29
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c35
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c20
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c81
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h64
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c102
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h71
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c49
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h204
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c254
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c595
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h79
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c337
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h77
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c358
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c212
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h37
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c5728
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1086
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c1149
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h104
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h4046
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h59
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c816
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h23
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c36
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h21
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c282
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h13
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c117
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c13
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c31
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c6
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/cavium/Kconfig7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h93
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c240
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c182
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c543
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c144
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h44
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c169
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c42
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c799
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c220
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c126
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c94
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c61
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c1790
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h197
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h49
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c113
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c264
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c281
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h46
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h28
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_intr.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_resource.h7
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c33
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h18
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c1
-rw-r--r--drivers/net/ethernet/ec_bhf.c14
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c100
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h26
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c20
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c298
-rw-r--r--drivers/net/ethernet/ethoc.c21
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c37
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.h20
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c64
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c12
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c34
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c687
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h89
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c398
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c8
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig35
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile4
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_mdio.c186
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/Makefile12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c457
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h585
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c783
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c704
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h45
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c902
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h456
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c2474
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h428
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c310
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h43
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c583
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h105
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c1021
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h139
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h972
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c837
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c1642
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h84
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c1214
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c521
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h8
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c145
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h18
-rw-r--r--drivers/net/ethernet/intel/e100.c12
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c92
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h19
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c220
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c52
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c48
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c39
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c198
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c105
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h142
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c92
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h97
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c512
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c296
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h47
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c176
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h55
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c557
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c67
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1921
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c557
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h1938
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c583
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h105
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h191
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c172
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c87
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h71
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c407
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h55
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h15
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h3155
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c398
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h85
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h179
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h19
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h71
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c44
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c533
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c107
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c38
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c109
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c34
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c173
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c80
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c293
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c241
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c67
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c413
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c366
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c96
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h86
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c107
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c685
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c51
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c139
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c41
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h1
-rw-r--r--drivers/net/ethernet/jme.c8
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c99
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c448
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c244
-rw-r--r--drivers/net/ethernet/marvell/sky2.c12
-rw-r--r--drivers/net/ethernet/mellanox/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c213
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c194
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h178
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c264
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c387
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c987
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c224
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c607
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig43
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h1115
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c1299
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h212
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h127
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h441
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c1847
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h226
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h2460
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c1949
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h122
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c422
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c903
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1557
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h81
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c5
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c2
-rw-r--r--drivers/net/ethernet/microchip/Kconfig9
-rw-r--r--drivers/net/ethernet/microchip/Makefile1
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c1
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c513
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c1129
-rw-r--r--drivers/net/ethernet/microchip/encx24j600_hw.h437
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/neterion/s2io.c28
-rw-r--r--drivers/net/ethernet/neterion/s2io.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c24
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c13
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c1
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig11
-rw-r--r--drivers/net/ethernet/qlogic/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h496
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c847
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h139
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c1797
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h283
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h5291
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c776
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h263
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c798
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c531
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.h110
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c1134
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h391
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c1704
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c1169
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c860
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h369
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h366
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h360
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c170
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c860
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h285
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c385
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c2584
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h22
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c37
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c33
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c41
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c7
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c1
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c133
-rw-r--r--drivers/net/ethernet/realtek/8139too.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c179
-rw-r--r--drivers/net/ethernet/renesas/ravb.h12
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c213
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c40
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/rocker/rocker.c674
-rw-r--r--drivers/net/ethernet/rocker/rocker.h2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c619
-rw-r--r--drivers/net/ethernet/sfc/efx.c61
-rw-r--r--drivers/net/ethernet/sfc/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/falcon.c1
-rw-r--r--drivers/net/ethernet/sfc/farch.c4
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c43
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h3
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h3429
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h135
-rw-r--r--drivers/net/ethernet/sfc/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c30
-rw-r--r--drivers/net/ethernet/sfc/selftest.c14
-rw-r--r--drivers/net/ethernet/sfc/siena.c7
-rw-r--r--drivers/net/ethernet/sfc/tx.c30
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c32
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c29
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h99
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c66
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c54
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c59
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c73
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c78
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c83
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c95
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c138
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h9
-rw-r--r--drivers/net/ethernet/sun/cassini.c3
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c17
-rw-r--r--drivers/net/ethernet/synopsys/Kconfig27
-rw-r--r--drivers/net/ethernet/synopsys/Makefile5
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c3019
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c5
-rw-r--r--drivers/net/ethernet/ti/cpmac.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c64
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw.c204
-rw-r--r--drivers/net/ethernet/ti/cpsw.h3
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c56
-rw-r--r--drivers/net/ethernet/ti/netcp.h1
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c125
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c456
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/tile/tilegx.c4
-rw-r--r--drivers/net/ethernet/via/Kconfig2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c3
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c9
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c5
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
478 files changed, 95615 insertions, 13341 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 2d1ce3c5d0dd..2839af00f20c 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1726,6 +1726,7 @@ vortex_up(struct net_device *dev)
if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
iowrite32(0x8000, vp->cb_fn_base + 4);
netif_start_queue (dev);
+ netdev_reset_queue(dev);
err_out:
return err;
}
@@ -1763,16 +1764,9 @@ vortex_open(struct net_device *dev)
vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
}
if (i != RX_RING_SIZE) {
- int j;
pr_emerg("%s: no memory for rx ring\n", dev->name);
- for (j = 0; j < i; j++) {
- if (vp->rx_skbuff[j]) {
- dev_kfree_skb(vp->rx_skbuff[j]);
- vp->rx_skbuff[j] = NULL;
- }
- }
retval = -ENOMEM;
- goto err_free_irq;
+ goto err_free_skb;
}
/* Wrap the ring. */
vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
@@ -1782,7 +1776,13 @@ vortex_open(struct net_device *dev)
if (!retval)
goto out;
-err_free_irq:
+err_free_skb:
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (vp->rx_skbuff[i]) {
+ dev_kfree_skb(vp->rx_skbuff[i]);
+ vp->rx_skbuff[i] = NULL;
+ }
+ }
free_irq(dev->irq, dev);
err:
if (vortex_debug > 1)
@@ -1936,16 +1936,18 @@ static void vortex_tx_timeout(struct net_device *dev)
if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0)
iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
ioaddr + DownListPtr);
- if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE)
+ if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) {
netif_wake_queue (dev);
+ netdev_reset_queue (dev);
+ }
if (vp->drv_flags & IS_BOOMERANG)
iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
iowrite16(DownUnstall, ioaddr + EL3_CMD);
} else {
dev->stats.tx_dropped++;
netif_wake_queue(dev);
+ netdev_reset_queue(dev);
}
-
/* Issue Tx Enable */
iowrite16(TxEnable, ioaddr + EL3_CMD);
dev->trans_start = jiffies; /* prevent tx timeout */
@@ -2064,6 +2066,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
+ int skblen = skb->len;
/* Put out the doubleword header... */
iowrite32(skb->len, ioaddr + TX_FIFO);
@@ -2095,6 +2098,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ netdev_sent_queue(dev, skblen);
/* Clear the Tx status stack. */
{
@@ -2126,6 +2130,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
void __iomem *ioaddr = vp->ioaddr;
/* Calculate the next Tx descriptor entry. */
int entry = vp->cur_tx % TX_RING_SIZE;
+ int skblen = skb->len;
struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
unsigned long flags;
dma_addr_t dma_addr;
@@ -2231,6 +2236,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
vp->cur_tx++;
+ netdev_sent_queue(dev, skblen);
+
if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
netif_stop_queue (dev);
} else { /* Clear previous interrupt enable. */
@@ -2268,6 +2275,7 @@ vortex_interrupt(int irq, void *dev_id)
int status;
int work_done = max_interrupt_work;
int handled = 0;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
ioaddr = vp->ioaddr;
spin_lock(&vp->lock);
@@ -2315,6 +2323,8 @@ vortex_interrupt(int irq, void *dev_id)
if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
+ pkts_compl++;
+ bytes_compl += vp->tx_skb->len;
dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
if (ioread16(ioaddr + TxFree) > 1536) {
/*
@@ -2359,6 +2369,7 @@ vortex_interrupt(int irq, void *dev_id)
iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
} while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
spin_unlock(&vp->window_lock);
if (vortex_debug > 4)
@@ -2383,6 +2394,7 @@ boomerang_interrupt(int irq, void *dev_id)
int status;
int work_done = max_interrupt_work;
int handled = 0;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
ioaddr = vp->ioaddr;
@@ -2456,6 +2468,8 @@ boomerang_interrupt(int irq, void *dev_id)
pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
#endif
+ pkts_compl++;
+ bytes_compl += skb->len;
dev_kfree_skb_irq(skb);
vp->tx_skbuff[entry] = NULL;
} else {
@@ -2496,6 +2510,7 @@ boomerang_interrupt(int irq, void *dev_id)
iowrite32(0x8000, vp->cb_fn_base + 4);
} while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
if (vortex_debug > 4)
pr_debug("%s: exiting interrupt, status %4.4x.\n",
@@ -2697,7 +2712,8 @@ vortex_down(struct net_device *dev, int final_down)
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
- netif_stop_queue (dev);
+ netdev_reset_queue(dev);
+ netif_stop_queue(dev);
del_timer_sync(&vp->rx_oom_timer);
del_timer_sync(&vp->timer);
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index edf72258ab1d..29c3075bfb05 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -64,7 +64,7 @@ config ARM_ETHERH
should say Y to this option if you wish to use it with Linux.
config MAC8390
- bool "Macintosh NS 8390 based ethernet cards"
+ tristate "Macintosh NS 8390 based ethernet cards"
depends on MAC
select CRC32
---help---
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 65cf60f6718c..b9283901136e 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -454,34 +454,22 @@ MODULE_AUTHOR("David Huggins-Daines <dhd@debian.org> and others");
MODULE_DESCRIPTION("Macintosh NS8390-based Nubus Ethernet driver");
MODULE_LICENSE("GPL");
-/* overkill, of course */
-static struct net_device *dev_mac8390[15];
-int init_module(void)
+static struct net_device *dev_mac8390;
+
+int __init init_module(void)
{
- int i;
- for (i = 0; i < 15; i++) {
- struct net_device *dev = mac8390_probe(-1);
- if (IS_ERR(dev))
- break;
- dev_mac890[i] = dev;
- }
- if (!i) {
- pr_notice("No useable cards found, driver NOT installed.\n");
- return -ENODEV;
+ dev_mac8390 = mac8390_probe(-1);
+ if (IS_ERR(dev_mac8390)) {
+ pr_warn("mac8390: No card found\n");
+ return PTR_ERR(dev_mac8390);
}
return 0;
}
-void cleanup_module(void)
+void __exit cleanup_module(void)
{
- int i;
- for (i = 0; i < 15; i++) {
- struct net_device *dev = dev_mac890[i];
- if (dev) {
- unregister_netdev(dev);
- free_netdev(dev);
- }
- }
+ unregister_netdev(dev_mac8390);
+ free_netdev(dev_mac8390);
}
#endif /* MODULE */
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index f3bb1784066b..05aa7597dab9 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -167,6 +167,7 @@ source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
+source "drivers/net/ethernet/synopsys/Kconfig"
source "drivers/net/ethernet/tehuti/Kconfig"
source "drivers/net/ethernet/ti/Kconfig"
source "drivers/net/ethernet/tile/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c51014b0464f..ddfc808110a1 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -65,7 +65,7 @@ obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
-obj-$(CONFIG_SH_ETH) += renesas/
+obj-$(CONFIG_NET_VENDOR_RENESAS) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
@@ -77,6 +77,7 @@ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
obj-$(CONFIG_NET_VENDOR_SUN) += sun/
+obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
obj-$(CONFIG_NET_VENDOR_TI) += ti/
obj-$(CONFIG_TILE_NET) += tile/
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index ae89de7deb13..20bf55dbd76f 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1141,8 +1141,6 @@ static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
strlcpy(info->version, "revision: 1.0", sizeof(info->version));
strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
- info->eedump_len = 0;
- info->regdump_len = sizeof(struct greth_regs);
}
static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index bab01c849165..8d50314ac3eb 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -28,6 +28,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
+#include <linux/soc/sunxi/sunxi_sram.h>
#include "sun4i-emac.h"
@@ -846,22 +847,32 @@ static int emac_probe(struct platform_device *pdev)
if (ndev->irq == -ENXIO) {
netdev_err(ndev, "No irq resource\n");
ret = ndev->irq;
- goto out;
+ goto out_iounmap;
}
db->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(db->clk)) {
ret = PTR_ERR(db->clk);
- goto out;
+ goto out_iounmap;
+ }
+
+ ret = clk_prepare_enable(db->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret);
+ goto out_iounmap;
}
- clk_prepare_enable(db->clk);
+ ret = sunxi_sram_claim(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Error couldn't map SRAM to device\n");
+ goto out_clk_disable_unprepare;
+ }
db->phy_node = of_parse_phandle(np, "phy", 0);
if (!db->phy_node) {
dev_err(&pdev->dev, "no associated PHY\n");
ret = -ENODEV;
- goto out;
+ goto out_release_sram;
}
/* Read MAC-address from DT */
@@ -893,7 +904,7 @@ static int emac_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Registering netdev failed!\n");
ret = -ENODEV;
- goto out;
+ goto out_release_sram;
}
dev_info(&pdev->dev, "%s: at %p, IRQ %d MAC: %pM\n",
@@ -901,6 +912,12 @@ static int emac_probe(struct platform_device *pdev)
return 0;
+out_release_sram:
+ sunxi_sram_release(&pdev->dev);
+out_clk_disable_unprepare:
+ clk_disable_unprepare(db->clk);
+out_iounmap:
+ iounmap(db->membase);
out:
dev_err(db->dev, "not found (%d).\n", ret);
@@ -912,8 +929,12 @@ out:
static int emac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct emac_board_info *db = netdev_priv(ndev);
unregister_netdev(ndev);
+ sunxi_sram_release(&pdev->dev);
+ clk_disable_unprepare(db->clk);
+ iounmap(db->membase);
free_netdev(ndev);
dev_dbg(&pdev->dev, "released and freed device\n");
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 580553d42d34..88ef67a998b4 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -71,8 +71,6 @@ int sgdma_initialize(struct altera_tse_private *priv)
SGDMA_CTRLREG_INTEN |
SGDMA_CTRLREG_ILASTD;
- priv->sgdmadesclen = sizeof(struct sgdma_descrip);
-
INIT_LIST_HEAD(&priv->txlisthd);
INIT_LIST_HEAD(&priv->rxlisthd);
@@ -254,7 +252,7 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
unsigned int pktstatus = 0;
dma_sync_single_for_cpu(priv->device,
priv->rxdescphys,
- priv->sgdmadesclen,
+ SGDMA_DESC_LEN,
DMA_FROM_DEVICE);
pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
@@ -374,7 +372,7 @@ static int sgdma_async_read(struct altera_tse_private *priv)
dma_sync_single_for_device(priv->device,
priv->rxdescphys,
- priv->sgdmadesclen,
+ SGDMA_DESC_LEN,
DMA_TO_DEVICE);
csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
@@ -402,7 +400,7 @@ static int sgdma_async_write(struct altera_tse_private *priv,
csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
dma_sync_single_for_device(priv->device, priv->txdescphys,
- priv->sgdmadesclen, DMA_TO_DEVICE);
+ SGDMA_DESC_LEN, DMA_TO_DEVICE);
csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
priv->tx_dma_csr,
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
index 85bc33b218d9..bbd52f02330b 100644
--- a/drivers/net/ethernet/altera/altera_sgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -50,6 +50,7 @@ struct sgdma_descrip {
u8 control;
} __packed;
+#define SGDMA_DESC_LEN sizeof(struct sgdma_descrip)
#define SGDMA_STATUS_ERR BIT(0)
#define SGDMA_STATUS_LENGTH_ERR BIT(1)
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 2adb24d4523c..103c30ddddf7 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -458,7 +458,6 @@ struct altera_tse_private {
u32 rxctrlreg;
dma_addr_t rxdescphys;
dma_addr_t txdescphys;
- size_t sgdmadesclen;
struct list_head txlisthd;
struct list_head rxlisthd;
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index da48e66377b5..fe644823ceaf 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -511,8 +511,7 @@ static int tse_poll(struct napi_struct *napi, int budget)
if (rxcomplete < budget) {
- napi_gro_flush(napi, false);
- __napi_complete(napi);
+ napi_complete(napi);
netdev_dbg(priv->dev,
"NAPI Complete, did %d packets with budget %d\n",
@@ -1518,6 +1517,7 @@ static int altera_tse_probe(struct platform_device *pdev)
spin_lock_init(&priv->tx_lock);
spin_lock_init(&priv->rxdma_irq_lock);
+ netif_carrier_off(ndev);
ret = register_netdev(ndev);
if (ret) {
dev_err(&pdev->dev, "failed to register TSE net device\n");
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 98a10d555b79..66d0b73c39c0 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -661,6 +661,7 @@ void lance_poll(struct net_device *dev)
spin_unlock(&lp->devlock);
lance_interrupt(dev->irq, dev);
}
+EXPORT_SYMBOL_GPL(lance_poll);
#endif
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index afc62ea804fc..0038709fd317 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -100,7 +100,7 @@ config DECLANCE
DEPCA series. (This chipset is better known via the NE2100 cards.)
config HPLANCE
- bool "HP on-board LANCE support"
+ tristate "HP on-board LANCE support"
depends on DIO
select CRC32
---help---
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index cb367cc59e0b..5330bcb8a944 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -714,7 +714,6 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
aup->mac_id);
- info->regdump_len = 0;
}
static void au1000_set_msglevel(struct net_device *dev, u32 value)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index 2c063b60db4b..96f485ab612e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -327,9 +327,13 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
pdata->debugfs_xpcs_reg = 0;
buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
+ if (!buf)
+ return;
+
pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
if (!pdata->xgbe_debugfs) {
netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
+ kfree(buf);
return;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index a4473d8ff4fa..970781a9e677 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1595,7 +1595,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
packet->rdesc_count, 1);
/* Make sure ownership is written to the descriptor */
- dma_wmb();
+ smp_wmb();
ring->cur = cur_index + 1;
if (!packet->skb->xmit_more ||
@@ -1940,84 +1940,31 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
unsigned int queue_count)
{
- unsigned int q_fifo_size = 0;
- enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+ unsigned int q_fifo_size;
+ unsigned int p_fifo;
- /* Calculate Tx/Rx fifo share per queue */
- switch (fifo_size) {
- case 0:
- q_fifo_size = XGBE_FIFO_SIZE_B(128);
- break;
- case 1:
- q_fifo_size = XGBE_FIFO_SIZE_B(256);
- break;
- case 2:
- q_fifo_size = XGBE_FIFO_SIZE_B(512);
- break;
- case 3:
- q_fifo_size = XGBE_FIFO_SIZE_KB(1);
- break;
- case 4:
- q_fifo_size = XGBE_FIFO_SIZE_KB(2);
- break;
- case 5:
- q_fifo_size = XGBE_FIFO_SIZE_KB(4);
- break;
- case 6:
- q_fifo_size = XGBE_FIFO_SIZE_KB(8);
- break;
- case 7:
- q_fifo_size = XGBE_FIFO_SIZE_KB(16);
- break;
- case 8:
- q_fifo_size = XGBE_FIFO_SIZE_KB(32);
- break;
- case 9:
- q_fifo_size = XGBE_FIFO_SIZE_KB(64);
- break;
- case 10:
- q_fifo_size = XGBE_FIFO_SIZE_KB(128);
- break;
- case 11:
- q_fifo_size = XGBE_FIFO_SIZE_KB(256);
- break;
- }
+ /* Calculate the configured fifo size */
+ q_fifo_size = 1 << (fifo_size + 7);
- /* The configured value is not the actual amount of fifo RAM */
+ /* The configured value may not be the actual amount of fifo RAM */
q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
q_fifo_size = q_fifo_size / queue_count;
- /* Set the queue fifo size programmable value */
- if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256))
- p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128))
- p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64))
- p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32))
- p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16))
- p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8))
- p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4))
- p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2))
- p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1))
- p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512))
- p_fifo = XGMAC_MTL_FIFO_SIZE_512;
- else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256))
- p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+ /* Each increment in the queue fifo size represents 256 bytes of
+ * fifo, with 0 representing 256 bytes. Distribute the fifo equally
+ * between the queues.
+ */
+ p_fifo = q_fifo_size / 256;
+ if (p_fifo)
+ p_fifo--;
return p_fifo;
}
static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
{
- enum xgbe_mtl_fifo_size fifo_size;
+ unsigned int fifo_size;
unsigned int i;
fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
@@ -2033,7 +1980,7 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
{
- enum xgbe_mtl_fifo_size fifo_size;
+ unsigned int fifo_size;
unsigned int i;
fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
@@ -2224,7 +2171,7 @@ static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
default:
read_hi = false;
- };
+ }
val = XGMAC_IOREAD(pdata, reg_lo);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index aae9d5ecd182..53ce1222b11d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -360,6 +360,9 @@ static irqreturn_t xgbe_isr(int irq, void *data)
}
}
+ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
+ pdata->ext_stats.rx_buffer_unavailable++;
+
/* Restart the device on a Fatal Bus Error */
if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
schedule_work(&pdata->restart_work);
@@ -384,7 +387,8 @@ static irqreturn_t xgbe_isr(int irq, void *data)
/* Read Tx Timestamp to clear interrupt */
pdata->tx_tstamp =
hw_if->get_tx_tstamp(pdata);
- schedule_work(&pdata->tx_tstamp_work);
+ queue_work(pdata->dev_workqueue,
+ &pdata->tx_tstamp_work);
}
}
}
@@ -450,7 +454,7 @@ static void xgbe_service_timer(unsigned long data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
- schedule_work(&pdata->service_work);
+ queue_work(pdata->dev_workqueue, &pdata->service_work);
mod_timer(&pdata->service_timer, jiffies + HZ);
}
@@ -891,7 +895,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
netif_tx_start_all_queues(netdev);
xgbe_start_timers(pdata);
- schedule_work(&pdata->service_work);
+ queue_work(pdata->dev_workqueue, &pdata->service_work);
DBGPR("<--xgbe_start\n");
@@ -1807,6 +1811,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
struct netdev_queue *txq;
int processed = 0;
unsigned int tx_packets = 0, tx_bytes = 0;
+ unsigned int cur;
DBGPR("-->xgbe_tx_poll\n");
@@ -1814,10 +1819,15 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
if (!ring)
return 0;
+ cur = ring->cur;
+
+ /* Be sure we get ring->cur before accessing descriptor data */
+ smp_rmb();
+
txq = netdev_get_tx_queue(netdev, channel->queue_index);
while ((processed < XGBE_TX_DESC_MAX_PROC) &&
- (ring->dirty != ring->cur)) {
+ (ring->dirty != cur)) {
rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
rdesc = rdata->rdesc;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 59e090e95c0e..6040293db9c1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -179,6 +179,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
+ XGMAC_EXT_STAT("rx_buffer_unavailable", rx_buffer_unavailable),
};
#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
@@ -187,8 +188,6 @@ static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
int i;
- DBGPR("-->%s\n", __func__);
-
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < XGBE_STATS_COUNT; i++) {
@@ -198,8 +197,6 @@ static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
break;
}
-
- DBGPR("<--%s\n", __func__);
}
static void xgbe_get_ethtool_stats(struct net_device *netdev,
@@ -209,23 +206,17 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
u8 *stat;
int i;
- DBGPR("-->%s\n", __func__);
-
pdata->hw_if.read_mmc_stats(pdata);
for (i = 0; i < XGBE_STATS_COUNT; i++) {
stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
*data++ = *(u64 *)stat;
}
-
- DBGPR("<--%s\n", __func__);
}
static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
{
int ret;
- DBGPR("-->%s\n", __func__);
-
switch (stringset) {
case ETH_SS_STATS:
ret = XGBE_STATS_COUNT;
@@ -235,8 +226,6 @@ static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
ret = -EOPNOTSUPP;
}
- DBGPR("<--%s\n", __func__);
-
return ret;
}
@@ -245,13 +234,9 @@ static void xgbe_get_pauseparam(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- DBGPR("-->xgbe_get_pauseparam\n");
-
pause->autoneg = pdata->phy.pause_autoneg;
pause->tx_pause = pdata->phy.tx_pause;
pause->rx_pause = pdata->phy.rx_pause;
-
- DBGPR("<--xgbe_get_pauseparam\n");
}
static int xgbe_set_pauseparam(struct net_device *netdev,
@@ -260,13 +245,11 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
int ret = 0;
- DBGPR("-->xgbe_set_pauseparam\n");
-
- DBGPR(" autoneg = %d, tx_pause = %d, rx_pause = %d\n",
- pause->autoneg, pause->tx_pause, pause->rx_pause);
-
- if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE))
+ if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) {
+ netdev_err(netdev,
+ "autoneg disabled, pause autoneg not avialable\n");
return -EINVAL;
+ }
pdata->phy.pause_autoneg = pause->autoneg;
pdata->phy.tx_pause = pause->tx_pause;
@@ -286,8 +269,6 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
if (netif_running(netdev))
ret = pdata->phy_if.phy_config_aneg(pdata);
- DBGPR("<--xgbe_set_pauseparam\n");
-
return ret;
}
@@ -296,8 +277,6 @@ static int xgbe_get_settings(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- DBGPR("-->xgbe_get_settings\n");
-
cmd->phy_address = pdata->phy.address;
cmd->supported = pdata->phy.supported;
@@ -311,8 +290,6 @@ static int xgbe_get_settings(struct net_device *netdev,
cmd->port = PORT_NONE;
cmd->transceiver = XCVR_INTERNAL;
- DBGPR("<--xgbe_get_settings\n");
-
return 0;
}
@@ -323,16 +300,20 @@ static int xgbe_set_settings(struct net_device *netdev,
u32 speed;
int ret;
- DBGPR("-->xgbe_set_settings\n");
-
speed = ethtool_cmd_speed(cmd);
- if (cmd->phy_address != pdata->phy.address)
+ if (cmd->phy_address != pdata->phy.address) {
+ netdev_err(netdev, "invalid phy address %hhu\n",
+ cmd->phy_address);
return -EINVAL;
+ }
if ((cmd->autoneg != AUTONEG_ENABLE) &&
- (cmd->autoneg != AUTONEG_DISABLE))
+ (cmd->autoneg != AUTONEG_DISABLE)) {
+ netdev_err(netdev, "unsupported autoneg %hhu\n",
+ cmd->autoneg);
return -EINVAL;
+ }
if (cmd->autoneg == AUTONEG_DISABLE) {
switch (speed) {
@@ -341,16 +322,27 @@ static int xgbe_set_settings(struct net_device *netdev,
case SPEED_1000:
break;
default:
+ netdev_err(netdev, "unsupported speed %u\n", speed);
return -EINVAL;
}
- if (cmd->duplex != DUPLEX_FULL)
+ if (cmd->duplex != DUPLEX_FULL) {
+ netdev_err(netdev, "unsupported duplex %hhu\n",
+ cmd->duplex);
return -EINVAL;
+ }
}
+ netif_dbg(pdata, link, netdev,
+ "requested advertisement %#x, phy supported %#x\n",
+ cmd->advertising, pdata->phy.supported);
+
cmd->advertising &= pdata->phy.supported;
- if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
+ if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising) {
+ netdev_err(netdev,
+ "unsupported requested advertisement\n");
return -EINVAL;
+ }
ret = 0;
pdata->phy.autoneg = cmd->autoneg;
@@ -366,8 +358,6 @@ static int xgbe_set_settings(struct net_device *netdev,
if (netif_running(netdev))
ret = pdata->phy_if.phy_config_aneg(pdata);
- DBGPR("<--xgbe_set_settings\n");
-
return ret;
}
@@ -385,7 +375,20 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
- drvinfo->n_stats = XGBE_STATS_COUNT;
+}
+
+static u32 xgbe_get_msglevel(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ return pdata->msg_enable;
+}
+
+static void xgbe_set_msglevel(struct net_device *netdev, u32 msglevel)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ pdata->msg_enable = msglevel;
}
static int xgbe_get_coalesce(struct net_device *netdev,
@@ -393,8 +396,6 @@ static int xgbe_get_coalesce(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- DBGPR("-->xgbe_get_coalesce\n");
-
memset(ec, 0, sizeof(struct ethtool_coalesce));
ec->rx_coalesce_usecs = pdata->rx_usecs;
@@ -402,8 +403,6 @@ static int xgbe_get_coalesce(struct net_device *netdev,
ec->tx_max_coalesced_frames = pdata->tx_frames;
- DBGPR("<--xgbe_get_coalesce\n");
-
return 0;
}
@@ -415,8 +414,6 @@ static int xgbe_set_coalesce(struct net_device *netdev,
unsigned int rx_frames, rx_riwt, rx_usecs;
unsigned int tx_frames;
- DBGPR("-->xgbe_set_coalesce\n");
-
/* Check for not supported parameters */
if ((ec->rx_coalesce_usecs_irq) ||
(ec->rx_max_coalesced_frames_irq) ||
@@ -436,8 +433,10 @@ static int xgbe_set_coalesce(struct net_device *netdev,
(ec->rx_max_coalesced_frames_high) ||
(ec->tx_coalesce_usecs_high) ||
(ec->tx_max_coalesced_frames_high) ||
- (ec->rate_sample_interval))
+ (ec->rate_sample_interval)) {
+ netdev_err(netdev, "unsupported coalescing parameter\n");
return -EOPNOTSUPP;
+ }
rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
rx_usecs = ec->rx_coalesce_usecs;
@@ -449,13 +448,13 @@ static int xgbe_set_coalesce(struct net_device *netdev,
/* Check the bounds of values for Rx */
if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
- netdev_alert(netdev, "rx-usec is limited to %d usecs\n",
- hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
+ netdev_err(netdev, "rx-usec is limited to %d usecs\n",
+ hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
return -EINVAL;
}
if (rx_frames > pdata->rx_desc_count) {
- netdev_alert(netdev, "rx-frames is limited to %d frames\n",
- pdata->rx_desc_count);
+ netdev_err(netdev, "rx-frames is limited to %d frames\n",
+ pdata->rx_desc_count);
return -EINVAL;
}
@@ -463,8 +462,8 @@ static int xgbe_set_coalesce(struct net_device *netdev,
/* Check the bounds of values for Tx */
if (tx_frames > pdata->tx_desc_count) {
- netdev_alert(netdev, "tx-frames is limited to %d frames\n",
- pdata->tx_desc_count);
+ netdev_err(netdev, "tx-frames is limited to %d frames\n",
+ pdata->tx_desc_count);
return -EINVAL;
}
@@ -476,8 +475,6 @@ static int xgbe_set_coalesce(struct net_device *netdev,
pdata->tx_frames = tx_frames;
hw_if->config_tx_coalesce(pdata);
- DBGPR("<--xgbe_set_coalesce\n");
-
return 0;
}
@@ -539,8 +536,10 @@ static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
struct xgbe_hw_if *hw_if = &pdata->hw_if;
unsigned int ret;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ netdev_err(netdev, "unsupported hash function\n");
return -EOPNOTSUPP;
+ }
if (indir) {
ret = hw_if->set_rss_lookup_table(pdata, indir);
@@ -594,6 +593,8 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
.get_settings = xgbe_get_settings,
.set_settings = xgbe_set_settings,
.get_drvinfo = xgbe_get_drvinfo,
+ .get_msglevel = xgbe_get_msglevel,
+ .set_msglevel = xgbe_set_msglevel,
.get_link = ethtool_op_get_link,
.get_coalesce = xgbe_get_coalesce,
.set_coalesce = xgbe_set_coalesce,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index e83bd76abce6..7dd893331785 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -371,7 +371,7 @@ static int xgbe_probe(struct platform_device *pdev)
set_bit(XGBE_DOWN, &pdata->dev_state);
/* Check if we should use ACPI or DT */
- pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
+ pdata->use_acpi = dev->of_node ? 0 : 1;
phy_pdev = xgbe_get_phy_pdev(pdata);
if (!phy_pdev) {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 9088c3a35a20..446058081866 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1115,8 +1115,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
unsigned int reg, link_aneg;
if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
- if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
- netif_carrier_off(pdata->netdev);
+ netif_carrier_off(pdata->netdev);
pdata->phy.link = 0;
goto adjust_link;
@@ -1142,10 +1141,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
- if (!test_bit(XGBE_LINK, &pdata->dev_state)) {
- set_bit(XGBE_LINK, &pdata->dev_state);
- netif_carrier_on(pdata->netdev);
- }
+ netif_carrier_on(pdata->netdev);
} else {
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
xgbe_check_link_timeout(pdata);
@@ -1156,10 +1152,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
xgbe_phy_status_aneg(pdata);
- if (test_bit(XGBE_LINK, &pdata->dev_state)) {
- clear_bit(XGBE_LINK, &pdata->dev_state);
- netif_carrier_off(pdata->netdev);
- }
+ netif_carrier_off(pdata->netdev);
}
adjust_link:
@@ -1179,8 +1172,7 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
devm_free_irq(pdata->dev, pdata->an_irq, pdata);
pdata->phy.link = 0;
- if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
- netif_carrier_off(pdata->netdev);
+ netif_carrier_off(pdata->netdev);
xgbe_phy_adjust_link(pdata);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 8c9d01ef730d..e234b9970318 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -209,8 +209,6 @@
#define XGMAC_IOCTL_CONTEXT 2
#define XGBE_FIFO_MAX 81920
-#define XGBE_FIFO_SIZE_B(x) (x)
-#define XGBE_FIFO_SIZE_KB(x) (x * 1024)
#define XGBE_TC_MIN_QUANTUM 10
@@ -461,7 +459,6 @@ struct xgbe_channel {
enum xgbe_state {
XGBE_DOWN,
- XGBE_LINK,
XGBE_LINK_INIT,
XGBE_LINK_ERR,
};
@@ -483,20 +480,6 @@ enum xgbe_int_state {
XGMAC_INT_STATE_RESTORE,
};
-enum xgbe_mtl_fifo_size {
- XGMAC_MTL_FIFO_SIZE_256 = 0x00,
- XGMAC_MTL_FIFO_SIZE_512 = 0x01,
- XGMAC_MTL_FIFO_SIZE_1K = 0x03,
- XGMAC_MTL_FIFO_SIZE_2K = 0x07,
- XGMAC_MTL_FIFO_SIZE_4K = 0x0f,
- XGMAC_MTL_FIFO_SIZE_8K = 0x1f,
- XGMAC_MTL_FIFO_SIZE_16K = 0x3f,
- XGMAC_MTL_FIFO_SIZE_32K = 0x7f,
- XGMAC_MTL_FIFO_SIZE_64K = 0xff,
- XGMAC_MTL_FIFO_SIZE_128K = 0x1ff,
- XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
-};
-
enum xgbe_speed {
XGBE_SPEED_1000 = 0,
XGBE_SPEED_2500,
@@ -598,6 +581,7 @@ struct xgbe_mmc_stats {
struct xgbe_ext_stats {
u64 tx_tso_packets;
u64 rx_split_header_packets;
+ u64 rx_buffer_unavailable;
};
struct xgbe_hw_if {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index a626c4315a89..33850a0f7e82 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -107,7 +107,8 @@ static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
{
xgene_enet_ring_set_type(ring);
- if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0)
+ if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 ||
+ xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1)
xgene_enet_ring_set_recombbuf(ring);
xgene_enet_ring_init(ring);
@@ -460,6 +461,7 @@ static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
{
+ struct device *dev = &pdata->pdev->dev;
u32 value, mc2;
u32 intf_ctl, rgmii;
u32 icm0, icm2;
@@ -489,7 +491,12 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
default:
ENET_INTERFACE_MODE2_SET(&mc2, 2);
intf_ctl |= ENET_GHD_MODE;
- CFG_TXCLK_MUXSEL0_SET(&rgmii, 4);
+
+ if (dev->of_node) {
+ CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay);
+ CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay);
+ }
+
xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
@@ -689,16 +696,24 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
netdev_dbg(ndev, "No phy-handle found in DT\n");
return -ENODEV;
}
- pdata->phy_dev = of_phy_find_device(phy_np);
- }
- phy_dev = pdata->phy_dev;
+ phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
+ 0, pdata->phy_mode);
+ if (!phy_dev) {
+ netdev_err(ndev, "Could not connect to PHY\n");
+ return -ENODEV;
+ }
+
+ pdata->phy_dev = phy_dev;
+ } else {
+ phy_dev = pdata->phy_dev;
- if (!phy_dev ||
- phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
- pdata->phy_mode)) {
- netdev_err(ndev, "Could not connect to PHY\n");
- return -ENODEV;
+ if (!phy_dev ||
+ phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
+ pdata->phy_mode)) {
+ netdev_err(ndev, "Could not connect to PHY\n");
+ return -ENODEV;
+ }
}
pdata->phy_speed = SPEED_UNKNOWN;
@@ -801,6 +816,9 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
{
+ if (pdata->phy_dev)
+ phy_disconnect(pdata->phy_dev);
+
mdiobus_unregister(pdata->mdio_bus);
mdiobus_free(pdata->mdio_bus);
pdata->mdio_bus = NULL;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 541bed056012..6dee73c3c1e1 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -144,6 +144,7 @@ enum xgene_enet_rm {
#define CFG_BYPASS_UNISEC_RX BIT(1)
#define CFG_CLE_BYPASS_EN0 BIT(31)
#define CFG_TXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 29, 3)
+#define CFG_RXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 26, 3)
#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2)
#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12)
@@ -193,12 +194,16 @@ enum xgene_enet_rm {
#define USERINFO_LEN 32
#define FPQNUM_POS 32
#define FPQNUM_LEN 12
+#define NV_POS 50
+#define NV_LEN 1
+#define LL_POS 51
+#define LL_LEN 1
#define LERR_POS 60
#define LERR_LEN 3
#define STASH_POS 52
#define STASH_LEN 2
#define BUFDATALEN_POS 48
-#define BUFDATALEN_LEN 12
+#define BUFDATALEN_LEN 15
#define DATAADDR_POS 0
#define DATAADDR_LEN 42
#define COHERENT_POS 63
@@ -215,9 +220,19 @@ enum xgene_enet_rm {
#define IPHDR_LEN 6
#define EC_POS 22 /* Enable checksum */
#define EC_LEN 1
+#define ET_POS 23 /* Enable TSO */
#define IS_POS 24 /* IP protocol select */
#define IS_LEN 1
#define TYPE_ETH_WORK_MESSAGE_POS 44
+#define LL_BYTES_MSB_POS 56
+#define LL_BYTES_MSB_LEN 8
+#define LL_BYTES_LSB_POS 48
+#define LL_BYTES_LSB_LEN 12
+#define LL_LEN_POS 48
+#define LL_LEN_LEN 8
+#define DATALEN_MASK GENMASK(11, 0)
+
+#define LAST_BUFFER (0x7800ULL << BUFDATALEN_POS)
struct xgene_enet_raw_desc {
__le64 m0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 299eb4315fe6..ce1068771b32 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -147,18 +147,27 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
{
struct sk_buff *skb;
struct device *dev;
+ skb_frag_t *frag;
+ dma_addr_t *frag_dma_addr;
u16 skb_index;
u8 status;
- int ret = 0;
+ int i, ret = 0;
skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
skb = cp_ring->cp_skb[skb_index];
+ frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
dev = ndev_to_dev(cp_ring->ndev);
dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
- GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)),
+ skb_headlen(skb),
DMA_TO_DEVICE);
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ }
+
/* Checking for error */
status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status > 2)) {
@@ -179,12 +188,16 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
static u64 xgene_enet_work_msg(struct sk_buff *skb)
{
+ struct net_device *ndev = skb->dev;
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct iphdr *iph;
- u8 l3hlen, l4hlen = 0;
- u8 csum_enable = 0;
- u8 proto = 0;
- u8 ethhdr;
- u64 hopinfo;
+ u8 l3hlen = 0, l4hlen = 0;
+ u8 ethhdr, proto = 0, csum_enable = 0;
+ u64 hopinfo = 0;
+ u32 hdr_len, mss = 0;
+ u32 i, len, nr_frags;
+
+ ethhdr = xgene_enet_hdr_len(skb->data);
if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
unlikely(skb->protocol != htons(ETH_P_8021Q)))
@@ -201,14 +214,40 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb)
l4hlen = tcp_hdrlen(skb) >> 2;
csum_enable = 1;
proto = TSO_IPPROTO_TCP;
+ if (ndev->features & NETIF_F_TSO) {
+ hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ mss = skb_shinfo(skb)->gso_size;
+
+ if (skb_is_nonlinear(skb)) {
+ len = skb_headlen(skb);
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ for (i = 0; i < 2 && i < nr_frags; i++)
+ len += skb_shinfo(skb)->frags[i].size;
+
+ /* HW requires header must reside in 3 buffer */
+ if (unlikely(hdr_len > len)) {
+ if (skb_linearize(skb))
+ return 0;
+ }
+ }
+
+ if (!mss || ((skb->len - hdr_len) <= mss))
+ goto out;
+
+ if (mss != pdata->mss) {
+ pdata->mss = mss;
+ pdata->mac_ops->set_mss(pdata);
+ }
+ hopinfo |= SET_BIT(ET);
+ }
} else if (iph->protocol == IPPROTO_UDP) {
l4hlen = UDP_HDR_SIZE;
csum_enable = 1;
}
out:
l3hlen = ip_hdrlen(skb) >> 2;
- ethhdr = xgene_enet_hdr_len(skb->data);
- hopinfo = SET_VAL(TCPHDR, l4hlen) |
+ hopinfo |= SET_VAL(TCPHDR, l4hlen) |
SET_VAL(IPHDR, l3hlen) |
SET_VAL(ETHHDR, ethhdr) |
SET_VAL(EC, csum_enable) |
@@ -219,35 +258,170 @@ out:
return hopinfo;
}
+static u16 xgene_enet_encode_len(u16 len)
+{
+ return (len == BUFLEN_16K) ? 0 : len;
+}
+
+static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
+{
+ desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
+ SET_VAL(BUFDATALEN, len));
+}
+
+static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
+{
+ __le64 *exp_bufs;
+
+ exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
+ memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
+ ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
+
+ return exp_bufs;
+}
+
+static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
+{
+ return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
+}
+
static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
struct sk_buff *skb)
{
struct device *dev = ndev_to_dev(tx_ring->ndev);
struct xgene_enet_raw_desc *raw_desc;
- dma_addr_t dma_addr;
+ __le64 *exp_desc = NULL, *exp_bufs = NULL;
+ dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
+ skb_frag_t *frag;
u16 tail = tx_ring->tail;
u64 hopinfo;
+ u32 len, hw_len;
+ u8 ll = 0, nv = 0, idx = 0;
+ bool split = false;
+ u32 size, offset, ell_bytes = 0;
+ u32 i, fidx, nr_frags, count = 1;
raw_desc = &tx_ring->raw_desc[tail];
+ tail = (tail + 1) & (tx_ring->slots - 1);
memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
- dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ hopinfo = xgene_enet_work_msg(skb);
+ if (!hopinfo)
+ return -EINVAL;
+ raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
+ hopinfo);
+
+ len = skb_headlen(skb);
+ hw_len = xgene_enet_encode_len(len);
+
+ dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
netdev_err(tx_ring->ndev, "DMA mapping error\n");
return -EINVAL;
}
/* Hardware expects descriptor in little endian format */
- raw_desc->m0 = cpu_to_le64(tail);
raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
- SET_VAL(BUFDATALEN, skb->len) |
+ SET_VAL(BUFDATALEN, hw_len) |
SET_BIT(COHERENT));
- hopinfo = xgene_enet_work_msg(skb);
- raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
- hopinfo);
- tx_ring->cp_ring->cp_skb[tail] = skb;
- return 0;
+ if (!skb_is_nonlinear(skb))
+ goto out;
+
+ /* scatter gather */
+ nv = 1;
+ exp_desc = (void *)&tx_ring->raw_desc[tail];
+ tail = (tail + 1) & (tx_ring->slots - 1);
+ memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ for (i = nr_frags; i < 4 ; i++)
+ exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
+
+ frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
+
+ for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
+ if (!split) {
+ frag = &skb_shinfo(skb)->frags[fidx];
+ size = skb_frag_size(frag);
+ offset = 0;
+
+ pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pbuf_addr))
+ return -EINVAL;
+
+ frag_dma_addr[fidx] = pbuf_addr;
+ fidx++;
+
+ if (size > BUFLEN_16K)
+ split = true;
+ }
+
+ if (size > BUFLEN_16K) {
+ len = BUFLEN_16K;
+ size -= BUFLEN_16K;
+ } else {
+ len = size;
+ split = false;
+ }
+
+ dma_addr = pbuf_addr + offset;
+ hw_len = xgene_enet_encode_len(len);
+
+ switch (i) {
+ case 0:
+ case 1:
+ case 2:
+ xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
+ break;
+ case 3:
+ if (split || (fidx != nr_frags)) {
+ exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
+ xgene_set_addr_len(exp_bufs, idx, dma_addr,
+ hw_len);
+ idx++;
+ ell_bytes += len;
+ } else {
+ xgene_set_addr_len(exp_desc, i, dma_addr,
+ hw_len);
+ }
+ break;
+ default:
+ xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
+ idx++;
+ ell_bytes += len;
+ break;
+ }
+
+ if (split)
+ offset += BUFLEN_16K;
+ }
+ count++;
+
+ if (idx) {
+ ll = 1;
+ dma_addr = dma_map_single(dev, exp_bufs,
+ sizeof(u64) * MAX_EXP_BUFFS,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+ i = ell_bytes >> LL_BYTES_LSB_LEN;
+ exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
+ SET_VAL(LL_BYTES_MSB, i) |
+ SET_VAL(LL_LEN, idx));
+ raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
+ }
+
+out:
+ raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
+ SET_VAL(USERINFO, tx_ring->tail));
+ tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
+ tx_ring->tail = tail;
+
+ return count;
}
static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
@@ -257,6 +431,7 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
u32 tx_level, cq_level;
+ int count;
tx_level = pdata->ring_ops->len(tx_ring);
cq_level = pdata->ring_ops->len(cp_ring);
@@ -266,14 +441,17 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (xgene_enet_setup_tx_desc(tx_ring, skb)) {
+ if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
+ return NETDEV_TX_OK;
+
+ count = xgene_enet_setup_tx_desc(tx_ring, skb);
+ if (count <= 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
- pdata->ring_ops->wr_cmd(tx_ring, 1);
+ pdata->ring_ops->wr_cmd(tx_ring, count);
skb_tx_timestamp(skb);
- tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
pdata->stats.tx_packets++;
pdata->stats.tx_bytes += skb->len;
@@ -326,7 +504,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
/* strip off CRC as HW isn't doing this */
datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
- datalen -= 4;
+ datalen = (datalen & DATALEN_MASK) - 4;
prefetch(skb->data - NET_IP_ALIGN);
skb_put(skb, datalen);
@@ -358,26 +536,41 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
int budget)
{
struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
- struct xgene_enet_raw_desc *raw_desc;
+ struct xgene_enet_raw_desc *raw_desc, *exp_desc;
u16 head = ring->head;
u16 slots = ring->slots - 1;
- int ret, count = 0;
+ int ret, count = 0, processed = 0;
do {
raw_desc = &ring->raw_desc[head];
+ exp_desc = NULL;
if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
break;
/* read fpqnum field after dataaddr field */
dma_rmb();
+ if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
+ head = (head + 1) & slots;
+ exp_desc = &ring->raw_desc[head];
+
+ if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
+ head = (head - 1) & slots;
+ break;
+ }
+ dma_rmb();
+ count++;
+ }
if (is_rx_desc(raw_desc))
ret = xgene_enet_rx_frame(ring, raw_desc);
else
ret = xgene_enet_tx_completion(ring, raw_desc);
xgene_enet_mark_desc_slot_empty(raw_desc);
+ if (exp_desc)
+ xgene_enet_mark_desc_slot_empty(exp_desc);
head = (head + 1) & slots;
count++;
+ processed++;
if (ret)
break;
@@ -393,7 +586,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
}
}
- return count;
+ return processed;
}
static int xgene_enet_napi(struct napi_struct *napi, const int budget)
@@ -738,12 +931,13 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
struct xgene_enet_desc_ring *buf_pool = NULL;
enum xgene_ring_owner owner;
+ dma_addr_t dma_exp_bufs;
u8 cpu_bufnum = pdata->cpu_bufnum;
u8 eth_bufnum = pdata->eth_bufnum;
u8 bp_bufnum = pdata->bp_bufnum;
u16 ring_num = pdata->ring_num;
u16 ring_id;
- int ret;
+ int ret, size;
/* allocate rx descriptor ring */
owner = xgene_derive_ring_owner(pdata);
@@ -794,6 +988,15 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
ret = -ENOMEM;
goto err;
}
+
+ size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
+ tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, &dma_exp_bufs,
+ GFP_KERNEL);
+ if (!tx_ring->exp_bufs) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
pdata->tx_ring = tx_ring;
if (!pdata->cq_cnt) {
@@ -818,6 +1021,16 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
ret = -ENOMEM;
goto err;
}
+
+ size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
+ cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
+ size, GFP_KERNEL);
+ if (!cp_ring->frag_dma_addr) {
+ devm_kfree(dev, cp_ring->cp_skb);
+ ret = -ENOMEM;
+ goto err;
+ }
+
pdata->tx_ring->cp_ring = cp_ring;
pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
@@ -905,39 +1118,46 @@ static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pda
return ret;
}
-static int xgene_get_mac_address(struct device *dev,
- unsigned char *addr)
+static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
{
- int ret;
+ struct device *dev = &pdata->pdev->dev;
+ int delay, ret;
- ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
- if (ret)
- ret = device_property_read_u8_array(dev, "mac-address",
- addr, 6);
- if (ret)
- return -ENODEV;
+ ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
+ if (ret) {
+ pdata->tx_delay = 4;
+ return 0;
+ }
+
+ if (delay < 0 || delay > 7) {
+ dev_err(dev, "Invalid tx-delay specified\n");
+ return -EINVAL;
+ }
- return ETH_ALEN;
+ pdata->tx_delay = delay;
+
+ return 0;
}
-static int xgene_get_phy_mode(struct device *dev)
+static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
{
- int i, ret;
- char *modestr;
+ struct device *dev = &pdata->pdev->dev;
+ int delay, ret;
- ret = device_property_read_string(dev, "phy-connection-type",
- (const char **)&modestr);
- if (ret)
- ret = device_property_read_string(dev, "phy-mode",
- (const char **)&modestr);
- if (ret)
- return -ENODEV;
+ ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
+ if (ret) {
+ pdata->rx_delay = 2;
+ return 0;
+ }
- for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
- if (!strcasecmp(modestr, phy_modes(i)))
- return i;
+ if (delay < 0 || delay > 7) {
+ dev_err(dev, "Invalid rx-delay specified\n");
+ return -EINVAL;
}
- return -ENODEV;
+
+ pdata->rx_delay = delay;
+
+ return 0;
}
static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
@@ -998,12 +1218,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
if (ret)
return ret;
- if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
+ if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
eth_hw_addr_random(ndev);
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
- pdata->phy_mode = xgene_get_phy_mode(dev);
+ pdata->phy_mode = device_get_phy_mode(dev);
if (pdata->phy_mode < 0) {
dev_err(dev, "Unable to get phy-connection-type\n");
return pdata->phy_mode;
@@ -1015,6 +1235,14 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
return -ENODEV;
}
+ ret = xgene_get_tx_delay(pdata);
+ if (ret)
+ return ret;
+
+ ret = xgene_get_rx_delay(pdata);
+ if (ret)
+ return ret;
+
ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(dev, "Unable to get ENET Rx IRQ\n");
@@ -1126,10 +1354,17 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
pdata->ring_num = START_RING_NUM_0;
break;
case 1:
- pdata->cpu_bufnum = START_CPU_BUFNUM_1;
- pdata->eth_bufnum = START_ETH_BUFNUM_1;
- pdata->bp_bufnum = START_BP_BUFNUM_1;
- pdata->ring_num = START_RING_NUM_1;
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+ pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
+ pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
+ pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
+ pdata->ring_num = XG_START_RING_NUM_1;
+ } else {
+ pdata->cpu_bufnum = START_CPU_BUFNUM_1;
+ pdata->eth_bufnum = START_ETH_BUFNUM_1;
+ pdata->bp_bufnum = START_BP_BUFNUM_1;
+ pdata->ring_num = START_RING_NUM_1;
+ }
break;
default:
break;
@@ -1207,7 +1442,8 @@ static int xgene_enet_probe(struct platform_device *pdev)
xgene_enet_set_ethtool_ops(ndev);
ndev->features |= NETIF_F_IP_CSUM |
NETIF_F_GSO |
- NETIF_F_GRO;
+ NETIF_F_GRO |
+ NETIF_F_SG;
of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
if (of_id) {
@@ -1233,6 +1469,12 @@ static int xgene_enet_probe(struct platform_device *pdev)
xgene_enet_setup_ops(pdata);
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+ ndev->features |= NETIF_F_TSO;
+ pdata->mss = XGENE_ENET_MSS;
+ }
+ ndev->hw_features = ndev->features;
+
ret = register_netdev(ndev);
if (ret) {
netdev_err(ndev, "Failed to register netdev\n");
@@ -1277,9 +1519,10 @@ static int xgene_enet_remove(struct platform_device *pdev)
mac_ops->tx_disable(pdata);
xgene_enet_napi_del(pdata);
- xgene_enet_mdio_remove(pdata);
- xgene_enet_delete_desc_rings(pdata);
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ xgene_enet_mdio_remove(pdata);
unregister_netdev(ndev);
+ xgene_enet_delete_desc_rings(pdata);
pdata->port_ops->shutdown(pdata);
free_netdev(ndev);
@@ -1291,6 +1534,7 @@ static const struct acpi_device_id xgene_enet_acpi_match[] = {
{ "APMC0D05", XGENE_ENET1},
{ "APMC0D30", XGENE_ENET1},
{ "APMC0D31", XGENE_ENET1},
+ { "APMC0D3F", XGENE_ENET1},
{ "APMC0D26", XGENE_ENET2},
{ "APMC0D25", XGENE_ENET2},
{ }
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 1c85fc87703a..a6e56b88c0a0 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -40,8 +40,12 @@
#define XGENE_DRV_VERSION "v1.0"
#define XGENE_ENET_MAX_MTU 1536
#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
+#define BUFLEN_16K (16 * 1024)
#define NUM_PKT_BUF 64
#define NUM_BUFPOOL 32
+#define MAX_EXP_BUFFS 256
+#define XGENE_ENET_MSS 1448
+#define XGENE_MIN_ENET_FRAME_SIZE 60
#define START_CPU_BUFNUM_0 0
#define START_ETH_BUFNUM_0 2
@@ -52,6 +56,11 @@
#define START_BP_BUFNUM_1 0x2A
#define START_RING_NUM_1 264
+#define XG_START_CPU_BUFNUM_1 12
+#define XG_START_ETH_BUFNUM_1 2
+#define XG_START_BP_BUFNUM_1 0x22
+#define XG_START_RING_NUM_1 264
+
#define X2_START_CPU_BUFNUM_0 0
#define X2_START_ETH_BUFNUM_0 0
#define X2_START_BP_BUFNUM_0 0x20
@@ -79,6 +88,7 @@ struct xgene_enet_desc_ring {
u16 num;
u16 head;
u16 tail;
+ u16 exp_buf_tail;
u16 slots;
u16 irq;
char irq_name[IRQ_ID_SIZE];
@@ -93,6 +103,7 @@ struct xgene_enet_desc_ring {
u8 nbufpool;
struct sk_buff *(*rx_skb);
struct sk_buff *(*cp_skb);
+ dma_addr_t *frag_dma_addr;
enum xgene_enet_ring_cfgsize cfgsize;
struct xgene_enet_desc_ring *cp_ring;
struct xgene_enet_desc_ring *buf_pool;
@@ -102,6 +113,7 @@ struct xgene_enet_desc_ring {
struct xgene_enet_raw_desc *raw_desc;
struct xgene_enet_raw_desc16 *raw_desc16;
};
+ __le64 *exp_bufs;
};
struct xgene_mac_ops {
@@ -112,6 +124,7 @@ struct xgene_mac_ops {
void (*tx_disable)(struct xgene_enet_pdata *pdata);
void (*rx_disable)(struct xgene_enet_pdata *pdata);
void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
+ void (*set_mss)(struct xgene_enet_pdata *pdata);
void (*link_state)(struct work_struct *work);
};
@@ -170,6 +183,9 @@ struct xgene_enet_pdata {
u8 eth_bufnum;
u8 bp_bufnum;
u16 ring_num;
+ u32 mss;
+ u8 tx_delay;
+ u8 rx_delay;
};
struct xgene_indirect_ctl {
@@ -204,6 +220,9 @@ static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src)
#define GET_VAL(field, src) \
xgene_enet_get_field_value(field ## _POS, field ## _LEN, src)
+#define GET_BIT(field, src) \
+ xgene_enet_get_field_value(field ## _POS, 1, src)
+
static inline struct device *ndev_to_dev(struct net_device *ndev)
{
return ndev->dev.parent;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 05edb847cf26..7a28a48cb2c7 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -184,6 +184,11 @@ static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1);
}
+static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata)
+{
+ xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR, pdata->mss);
+}
+
static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
{
u32 data;
@@ -204,8 +209,8 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
data &= ~HSTLENCHK;
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
- xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR, 0x06000600);
xgene_xgmac_set_mac_addr(pdata);
+ xgene_xgmac_set_mss(pdata);
xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
@@ -329,6 +334,7 @@ struct xgene_mac_ops xgene_xgmac_ops = {
.rx_disable = xgene_xgmac_rx_disable,
.tx_disable = xgene_xgmac_tx_disable,
.set_mac_addr = xgene_xgmac_set_mac_addr,
+ .set_mss = xgene_xgmac_set_mss,
.link_state = xgene_enet_link_state
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index bf0a99435737..f8f908dbf51c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -62,7 +62,9 @@
#define XCLE_BYPASS_REG0_ADDR 0x0160
#define XCLE_BYPASS_REG1_ADDR 0x0164
#define XG_CFG_BYPASS_ADDR 0x0204
+#define XG_CFG_LINK_AGGR_RESUME_0_ADDR 0x0214
#define XG_LINK_STATUS_ADDR 0x0228
+#define XG_TSIF_MSS_REG0_ADDR 0x02a4
#define XG_ENET_SPARE_CFG_REG_ADDR 0x040c
#define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410
#define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804
diff --git a/drivers/net/ethernet/apple/Kconfig b/drivers/net/ethernet/apple/Kconfig
index d19a41b0c6d2..31071297896c 100644
--- a/drivers/net/ethernet/apple/Kconfig
+++ b/drivers/net/ethernet/apple/Kconfig
@@ -51,7 +51,7 @@ config BMAC
will be called bmac.
config MACMACE
- bool "Macintosh (AV) onboard MACE ethernet"
+ tristate "Macintosh (AV) onboard MACE ethernet"
depends on MAC
select CRC32
---help---
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
index f9cb99bfb511..ffd180570920 100644
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -78,6 +78,7 @@ static const struct of_device_id emac_arc_dt_ids[] = {
{ .compatible = "snps,arc-emac" },
{ /* Sentinel */ }
};
+MODULE_DEVICE_TABLE(of, emac_arc_dt_ids);
static struct platform_driver emac_arc_driver = {
.probe = emac_arc_probe,
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 48694c239d5c..872b7abb0196 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -233,10 +233,6 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = 0;
- drvinfo->testinfo_len = 0;
- drvinfo->regdump_len = atl1c_get_regs_len(netdev);
- drvinfo->eedump_len = atl1c_get_eeprom_len(netdev);
}
static void atl1c_get_wol(struct net_device *netdev,
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 932bd1862f7a..2795d6db10e1 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -874,6 +874,8 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
atl1c_clean_buffer(pdev, buffer_info);
}
+ netdev_reset_queue(adapter->netdev);
+
/* Zero out Tx-buffers */
memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
ring_count);
@@ -1551,6 +1553,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
u16 hw_next_to_clean;
u16 reg;
+ unsigned int total_bytes = 0, total_packets = 0;
reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX;
@@ -1558,12 +1561,18 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
while (next_to_clean != hw_next_to_clean) {
buffer_info = &tpd_ring->buffer_info[next_to_clean];
+ if (buffer_info->skb) {
+ total_bytes += buffer_info->skb->len;
+ total_packets++;
+ }
atl1c_clean_buffer(pdev, buffer_info);
if (++next_to_clean == tpd_ring->count)
next_to_clean = 0;
atomic_set(&tpd_ring->next_to_clean, next_to_clean);
}
+ netdev_completed_queue(adapter->netdev, total_packets, total_bytes);
+
if (netif_queue_stopped(adapter->netdev) &&
netif_carrier_ok(adapter->netdev)) {
netif_wake_queue(adapter->netdev);
@@ -2256,6 +2265,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb_any(skb);
} else {
+ netdev_sent_queue(adapter->netdev, skb->len);
atl1c_tx_queue(adapter, skb, tpd, type);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 1be072f4afc2..8e3dbd4d9f79 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -316,10 +316,6 @@ static void atl1e_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = 0;
- drvinfo->testinfo_len = 0;
- drvinfo->regdump_len = atl1e_get_regs_len(netdev);
- drvinfo->eedump_len = atl1e_get_eeprom_len(netdev);
}
static void atl1e_get_wol(struct net_device *netdev,
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index eca1d113fee1..529bca718334 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3388,7 +3388,6 @@ static void atl1_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->eedump_len = ATL1_EEDUMP_LEN;
}
static void atl1_get_wol(struct net_device *netdev,
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 46a535318c7a..8f76f4558a88 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -2030,10 +2030,6 @@ static void atl2_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = 0;
- drvinfo->testinfo_len = 0;
- drvinfo->regdump_len = atl2_get_regs_len(netdev);
- drvinfo->eedump_len = atl2_get_eeprom_len(netdev);
}
static void atl2_get_wol(struct net_device *netdev,
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 8be9eab73320..67a7d520d9f5 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -139,6 +139,16 @@ config BNX2X_SRIOV
Virtualization support in the 578xx and 57712 products. This
allows for virtual function acceleration in virtual environments.
+config BNX2X_VXLAN
+ bool "Virtual eXtensible Local Area Network support"
+ default n
+ depends on BNX2X && VXLAN && !(BNX2X=y && VXLAN=m)
+ ---help---
+ This enables hardward offload support for VXLAN protocol over the
+ NetXtremeII series adapters.
+ Say Y here if you want to enable hardware offload support for
+ Virtual eXtensible Local Area Network (VXLAN) in the driver.
+
config BGMAC
tristate "BCMA bus GBit core support"
depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X)
@@ -160,4 +170,23 @@ config SYSTEMPORT
Broadcom BCM7xxx Set Top Box family chipset using an internal
Ethernet switch.
+config BNXT
+ tristate "Broadcom NetXtreme-C/E support"
+ depends on PCI
+ select FW_LOADER
+ select LIBCRC32C
+ ---help---
+ This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
+ Ethernet cards. To compile this driver as a module, choose M here:
+ the module will be called bnxt_en. This is recommended.
+
+config BNXT_SRIOV
+ bool "Broadcom NetXtreme-C/E SR-IOV support"
+ depends on BNXT && PCI_IOV
+ default y
+ ---help---
+ This configuration parameter enables Single Root Input Output
+ Virtualization support in the NetXtreme-C/E products. This
+ allows for virtual function acceleration in virtual environments.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index e2a958a657e0..00584d78b3e0 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BGMAC) += bgmac.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
+obj-$(CONFIG_BNXT) += bnxt/
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a7f2cc3e485e..8b1929e9f698 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1333,7 +1333,6 @@ static void bcm_enet_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
- drvinfo->n_stats = BCM_ENET_STATS_LEN;
}
static int bcm_enet_get_sset_count(struct net_device *netdev,
@@ -2049,7 +2048,7 @@ static void swphy_poll_timer(unsigned long data)
for (i = 0; i < priv->num_ports; i++) {
struct bcm63xx_enetsw_port *port;
- int val, j, up, advertise, lpa, lpa2, speed, duplex, media;
+ int val, j, up, advertise, lpa, speed, duplex, media;
int external_phy = bcm_enet_port_is_rgmii(i);
u8 override;
@@ -2092,22 +2091,27 @@ static void swphy_poll_timer(unsigned long data)
lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
MII_LPA);
- lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
- MII_STAT1000);
-
/* figure out media and duplex from advertise and LPA values */
media = mii_nway_result(lpa & advertise);
duplex = (media & ADVERTISE_FULL) ? 1 : 0;
- if (lpa2 & LPA_1000FULL)
- duplex = 1;
-
- if (lpa2 & (LPA_1000FULL | LPA_1000HALF))
- speed = 1000;
- else {
- if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
- speed = 100;
- else
- speed = 10;
+
+ if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
+ speed = 100;
+ else
+ speed = 10;
+
+ if (val & BMSR_ESTATEN) {
+ advertise = bcmenet_sw_mdio_read(priv, external_phy,
+ port->phy_id, MII_CTRL1000);
+
+ lpa = bcmenet_sw_mdio_read(priv, external_phy,
+ port->phy_id, MII_STAT1000);
+
+ if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
+ && lpa & (LPA_1000FULL | LPA_1000HALF)) {
+ speed = 1000;
+ duplex = (lpa & LPA_1000FULL);
+ }
}
dev_info(&priv->pdev->dev,
@@ -2597,7 +2601,6 @@ static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
strncpy(drvinfo->version, bcm_enet_driver_version, 32);
strncpy(drvinfo->fw_version, "N/A", 32);
strncpy(drvinfo->bus_info, "bcm63xx", 32);
- drvinfo->n_stats = BCM_ENETSW_STATS_LEN;
}
static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 4566cdf0bc39..858106352ce9 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -287,7 +287,6 @@ static void bcm_sysport_get_drvinfo(struct net_device *dev,
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->version, "0.1", sizeof(info->version));
strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
- info->n_stats = BCM_SYSPORT_STATS_LEN;
}
static u32 bcm_sysport_get_msglvl(struct net_device *dev)
@@ -933,6 +932,21 @@ static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bcm_sysport_poll_controller(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ disable_irq(priv->irq0);
+ bcm_sysport_rx_isr(priv->irq0, priv);
+ enable_irq(priv->irq0);
+
+ disable_irq(priv->irq1);
+ bcm_sysport_tx_isr(priv->irq1, priv);
+ enable_irq(priv->irq1);
+}
+#endif
+
static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1723,6 +1737,9 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
.ndo_set_features = bcm_sysport_set_features,
.ndo_set_rx_mode = bcm_sysport_set_rx_mode,
.ndo_set_mac_address = bcm_sysport_change_mac,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bcm_sysport_poll_controller,
+#endif
};
#define REV_FMT "v%2x.%02x"
@@ -2061,6 +2078,7 @@ static const struct of_device_id bcm_sysport_of_match[] = {
{ .compatible = "brcm,systemport" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
static struct platform_driver bcm_sysport_driver = {
.probe = bcm_sysport_probe,
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 21e3c38c7c75..28f7610b03fe 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1447,7 +1447,7 @@ static int bgmac_fixed_phy_register(struct bgmac *bgmac)
struct phy_device *phy_dev;
int err;
- phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+ phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
if (!phy_dev || IS_ERR(phy_dev)) {
bgmac_err(bgmac, "Failed to register fixed PHY device\n");
return -ENODEV;
@@ -1549,11 +1549,20 @@ static int bgmac_probe(struct bcma_device *core)
struct net_device *net_dev;
struct bgmac *bgmac;
struct ssb_sprom *sprom = &core->bus->sprom;
- u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac;
+ u8 *mac;
int err;
- /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */
- if (core->core_unit > 1) {
+ switch (core->core_unit) {
+ case 0:
+ mac = sprom->et0mac;
+ break;
+ case 1:
+ mac = sprom->et1mac;
+ break;
+ case 2:
+ mac = sprom->et2mac;
+ break;
+ default:
pr_err("Unsupported core_unit %d\n", core->core_unit);
return -ENOTSUPP;
}
@@ -1588,8 +1597,17 @@ static int bgmac_probe(struct bcma_device *core)
}
bgmac->cmn = core->bus->drv_gmac_cmn.core;
- bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr :
- sprom->et0phyaddr;
+ switch (core->core_unit) {
+ case 0:
+ bgmac->phyaddr = sprom->et0phyaddr;
+ break;
+ case 1:
+ bgmac->phyaddr = sprom->et1phyaddr;
+ break;
+ case 2:
+ bgmac->phyaddr = sprom->et2phyaddr;
+ break;
+ }
bgmac->phyaddr &= BGMAC_PHY_MASK;
if (bgmac->phyaddr == BGMAC_PHY_MASK) {
bgmac_err(bgmac, "No PHY found\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2b66ef3d8217..8fc3f3c137f8 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -813,6 +813,46 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
}
static void
+bnx2_free_stats_blk(struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ if (bp->status_blk) {
+ dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
+ bp->status_blk,
+ bp->status_blk_mapping);
+ bp->status_blk = NULL;
+ bp->stats_blk = NULL;
+ }
+}
+
+static int
+bnx2_alloc_stats_blk(struct net_device *dev)
+{
+ int status_blk_size;
+ void *status_blk;
+ struct bnx2 *bp = netdev_priv(dev);
+
+ /* Combine status and statistics blocks into one allocation. */
+ status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
+ if (bp->flags & BNX2_FLAG_MSIX_CAP)
+ status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
+ BNX2_SBLK_MSIX_ALIGN_SIZE);
+ bp->status_stats_size = status_blk_size +
+ sizeof(struct statistics_block);
+ status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
+ &bp->status_blk_mapping, GFP_KERNEL);
+ if (status_blk == NULL)
+ return -ENOMEM;
+
+ bp->status_blk = status_blk;
+ bp->stats_blk = status_blk + status_blk_size;
+ bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
+
+ return 0;
+}
+
+static void
bnx2_free_mem(struct bnx2 *bp)
{
int i;
@@ -829,37 +869,19 @@ bnx2_free_mem(struct bnx2 *bp)
bp->ctx_blk[i] = NULL;
}
}
- if (bnapi->status_blk.msi) {
- dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
- bnapi->status_blk.msi,
- bp->status_blk_mapping);
+
+ if (bnapi->status_blk.msi)
bnapi->status_blk.msi = NULL;
- bp->stats_blk = NULL;
- }
}
static int
bnx2_alloc_mem(struct bnx2 *bp)
{
- int i, status_blk_size, err;
+ int i, err;
struct bnx2_napi *bnapi;
- void *status_blk;
-
- /* Combine status and statistics blocks into one allocation. */
- status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
- if (bp->flags & BNX2_FLAG_MSIX_CAP)
- status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
- BNX2_SBLK_MSIX_ALIGN_SIZE);
- bp->status_stats_size = status_blk_size +
- sizeof(struct statistics_block);
-
- status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
- &bp->status_blk_mapping, GFP_KERNEL);
- if (status_blk == NULL)
- goto alloc_mem_err;
bnapi = &bp->bnx2_napi[0];
- bnapi->status_blk.msi = status_blk;
+ bnapi->status_blk.msi = bp->status_blk;
bnapi->hw_tx_cons_ptr =
&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
bnapi->hw_rx_cons_ptr =
@@ -870,7 +892,7 @@ bnx2_alloc_mem(struct bnx2 *bp)
bnapi = &bp->bnx2_napi[i];
- sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+ sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
bnapi->status_blk.msix = sblk;
bnapi->hw_tx_cons_ptr =
&sblk->status_tx_quick_consumer_index;
@@ -880,10 +902,6 @@ bnx2_alloc_mem(struct bnx2 *bp)
}
}
- bp->stats_blk = status_blk + status_blk_size;
-
- bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
-
if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
if (bp->ctx_pages == 0)
@@ -8330,6 +8348,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->phy_addr = 1;
+ /* allocate stats_blk */
+ rc = bnx2_alloc_stats_blk(dev);
+ if (rc)
+ goto err_out_unmap;
+
/* Disable WOL support if we are running on a SERDES chip. */
if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
bnx2_get_5709_media(bp);
@@ -8453,6 +8476,8 @@ err_out_disable:
pci_disable_device(pdev);
err_out:
+ kfree(bp->temp_stats_blk);
+
return rc;
}
@@ -8586,6 +8611,7 @@ error:
pci_release_regions(pdev);
pci_disable_device(pdev);
err_free:
+ bnx2_free_stats_blk(dev);
free_netdev(dev);
return rc;
}
@@ -8603,6 +8629,7 @@ bnx2_remove_one(struct pci_dev *pdev)
pci_iounmap(bp->pdev, bp->regview);
+ bnx2_free_stats_blk(dev);
kfree(bp->temp_stats_blk);
if (bp->flags & BNX2_FLAG_AER_ENABLED) {
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index f92f76c44756..380234d72b95 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6928,6 +6928,7 @@ struct bnx2 {
dma_addr_t status_blk_mapping;
+ void *status_blk;
struct statistics_block *stats_blk;
struct statistics_block *temp_stats_blk;
dma_addr_t stats_blk_mapping;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index cd4ae76bbff2..b5e64b02200c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1,6 +1,8 @@
-/* bnx2x.h: Broadcom Everest network driver.
+/* bnx2x.h: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -30,7 +32,7 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.710.51-0"
+#define DRV_MODULE_VERSION "1.712.30-0"
#define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200
@@ -1227,6 +1229,10 @@ struct bnx2x_slowpath {
} mac_rdata;
union {
+ struct eth_classify_rules_ramrod_data e2;
+ } vlan_rdata;
+
+ union {
struct tstorm_eth_mac_filter_config e1x;
struct eth_filter_rules_ramrod_data e2;
} rx_mode_rdata;
@@ -1386,6 +1392,8 @@ enum sp_rtnl_flag {
BNX2X_SP_RTNL_HYPERVISOR_VLAN,
BNX2X_SP_RTNL_TX_STOP,
BNX2X_SP_RTNL_GET_DRV_VERSION,
+ BNX2X_SP_RTNL_ADD_VXLAN_PORT,
+ BNX2X_SP_RTNL_DEL_VXLAN_PORT,
};
enum bnx2x_iov_flag {
@@ -1408,6 +1416,9 @@ struct bnx2x_sp_objs {
/* Queue State object */
struct bnx2x_queue_sp_obj q_obj;
+
+ /* VLANs object */
+ struct bnx2x_vlan_mac_obj vlan_obj;
};
struct bnx2x_fp_stats {
@@ -1422,6 +1433,13 @@ enum {
SUB_MF_MODE_UNKNOWN = 0,
SUB_MF_MODE_UFP,
SUB_MF_MODE_NPAR1_DOT_5,
+ SUB_MF_MODE_BD,
+};
+
+struct bnx2x_vlan_entry {
+ struct list_head link;
+ u16 vid;
+ bool hw;
};
struct bnx2x {
@@ -1636,6 +1654,8 @@ struct bnx2x {
u8 mf_sub_mode;
#define IS_MF_UFP(bp) (IS_MF_SD(bp) && \
bp->mf_sub_mode == SUB_MF_MODE_UFP)
+#define IS_MF_BD(bp) (IS_MF_SD(bp) && \
+ bp->mf_sub_mode == SUB_MF_MODE_BD)
u8 wol;
@@ -1860,8 +1880,6 @@ struct bnx2x {
int dcb_version;
/* CAM credit pools */
-
- /* used only in sriov */
struct bnx2x_credit_pool_obj vlans_pool;
struct bnx2x_credit_pool_obj macs_pool;
@@ -1924,6 +1942,12 @@ struct bnx2x {
u16 rx_filter;
struct bnx2x_link_report_data vf_link_vars;
+ struct list_head vlan_reg;
+ u16 vlan_cnt;
+ u16 vlan_credit;
+ u16 vxlan_dst_port;
+ u8 vxlan_dst_port_count;
+ bool accept_any_vlan;
};
/* Tx queues may be less or equal to Rx queues */
@@ -1951,23 +1975,14 @@ extern int num_queues;
#define RSS_IPV6_TCP_CAP_MASK \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
-/* func init flags */
-#define FUNC_FLG_RSS 0x0001
-#define FUNC_FLG_STATS 0x0002
-/* removed FUNC_FLG_UNMATCHED 0x0004 */
-#define FUNC_FLG_TPA 0x0008
-#define FUNC_FLG_SPQ 0x0010
-#define FUNC_FLG_LEADING 0x0020 /* PF only */
-#define FUNC_FLG_LEADING_STATS 0x0040
struct bnx2x_func_init_params {
/* dma */
- dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
- dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */
+ bool spq_active;
+ dma_addr_t spq_map;
+ u16 spq_prod;
- u16 func_flgs;
u16 func_id; /* abs fid */
u16 pf_id;
- u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
};
#define for_each_cnic_queue(bp, var) \
@@ -2077,6 +2092,11 @@ struct bnx2x_func_init_params {
int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags);
+
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+ struct bnx2x_vlan_mac_obj *obj, bool set,
+ unsigned long *ramrod_flags);
+
/**
* bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
*
@@ -2481,6 +2501,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define VF_ACQUIRE_THRESH 3
#define VF_ACQUIRE_MAC_FILTERS 1
#define VF_ACQUIRE_MC_FILTERS 10
+#define VF_ACQUIRE_VLAN_FILTERS 2 /* VLAN0 + 'real' VLAN */
#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
(!((me_reg) & ME_REG_VF_ERR)))
@@ -2553,6 +2574,10 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
(IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) || \
IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp))
+/* Determines whether BW configuration arrives in 100Mb units or in
+ * percentages from actual physical link speed.
+ */
+#define IS_MF_PERCENT_BW(bp) (IS_MF_SI(bp) || IS_MF_UFP(bp) || IS_MF_BD(bp))
#define SET_FLAG(value, mask, flag) \
do {\
@@ -2577,6 +2602,8 @@ void bnx2x_set_local_cmng(struct bnx2x *bp);
void bnx2x_update_mng_version(struct bnx2x *bp);
+void bnx2x_update_mfw_dump(struct bnx2x *bp);
+
#define MCPR_SCRATCH_BASE(bp) \
(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
@@ -2589,4 +2616,9 @@ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
#define BNX2X_MAX_PHC_DRIFT 31000000
#define BNX2X_PTP_TX_TIMEOUT
+/* Re-configure all previously configured vlan filters.
+ * Meant for implicit re-load flows.
+ */
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
+
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a90d7364334f..44173be5cbf0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1,6 +1,8 @@
-/* bnx2x_cmn.c: Broadcom Everest network driver.
+/* bnx2x_cmn.c: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -262,9 +264,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
if (likely(skb)) {
(*pkts_compl)++;
(*bytes_compl) += skb->len;
+ dev_kfree_skb_any(skb);
}
- dev_kfree_skb_any(skb);
tx_buf->first_bd = 0;
tx_buf->skb = NULL;
@@ -1188,7 +1190,7 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
/* Calculate the current MAX line speed limit for the MF
* devices
*/
- if (IS_MF_SI(bp))
+ if (IS_MF_PERCENT_BW(bp))
line_speed = (line_speed * maxCfg) / 100;
else { /* SD mode */
u16 vn_max_rate = maxCfg * 100;
@@ -2103,9 +2105,14 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
if (rss_obj->udp_rss_v6)
__set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
- if (!CHIP_IS_E1x(bp))
+ if (!CHIP_IS_E1x(bp)) {
+ /* valid only for TUNN_MODE_VXLAN tunnel mode */
+ __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
+
/* valid only for TUNN_MODE_GRE tunnel mode */
- __set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
+ __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
+ }
} else {
__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
}
@@ -2510,6 +2517,20 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
fp->mode = TPA_MODE_DISABLED;
}
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
+{
+ u32 cur;
+
+ if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
+ return;
+
+ cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
+ DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
+ cur, state);
+
+ SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
+}
+
int bnx2x_load_cnic(struct bnx2x *bp)
{
int i, rc, port = BP_PORT(bp);
@@ -2827,6 +2848,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Start fast path */
+ /* Re-configure vlan filters */
+ rc = bnx2x_vlan_reconfigure_vid(bp);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error3);
+
/* Initialize Rx filter. */
bnx2x_set_rx_mode_inner(bp);
@@ -2873,6 +2899,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* mark driver is loaded in shmem2 */
u32 val;
val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ val &= ~DRV_FLAGS_MTU_MASK;
+ val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
DRV_FLAGS_CAPABILITIES_LOADED_L2);
@@ -2885,10 +2913,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
return -EBUSY;
}
+ /* Update driver data for On-Chip MFW dump. */
+ if (IS_PF(bp))
+ bnx2x_update_mfw_dump(bp);
+
/* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
bnx2x_dcbx_init(bp, false);
+ if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+ bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
+
DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
return 0;
@@ -2956,6 +2991,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
+ if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+ bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
/* mark driver is unloaded in shmem2 */
if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
u32 val;
@@ -3677,7 +3715,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
pbd2->fw_ip_hdr_to_payload_w =
hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
pbd_e2->data.tunnel_data.flags |=
- ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
+ ETH_TUNNEL_DATA_IPV6_OUTER;
}
pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
@@ -4184,6 +4222,41 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
+{
+ int mfw_vn = BP_FW_MB_IDX(bp);
+ u32 tmp;
+
+ /* If the shmem shouldn't affect configuration, reflect */
+ if (!IS_MF_BD(bp)) {
+ int i;
+
+ for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
+ c2s_map[i] = i;
+ *c2s_default = 0;
+
+ return;
+ }
+
+ tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
+ tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+ c2s_map[0] = tmp & 0xff;
+ c2s_map[1] = (tmp >> 8) & 0xff;
+ c2s_map[2] = (tmp >> 16) & 0xff;
+ c2s_map[3] = (tmp >> 24) & 0xff;
+
+ tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
+ tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+ c2s_map[4] = tmp & 0xff;
+ c2s_map[5] = (tmp >> 8) & 0xff;
+ c2s_map[6] = (tmp >> 16) & 0xff;
+ c2s_map[7] = (tmp >> 24) & 0xff;
+
+ tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
+ tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+ *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
+}
+
/**
* bnx2x_setup_tc - routine to configure net_device for multi tc
*
@@ -4194,8 +4267,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
{
- int cos, prio, count, offset;
struct bnx2x *bp = netdev_priv(dev);
+ u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
+ int cos, prio, count, offset;
/* setup tc must be called under rtnl lock */
ASSERT_RTNL();
@@ -4219,12 +4293,16 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
return -EINVAL;
}
+ bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
+
/* configure priority to traffic class mapping */
for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
- netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
+ int outer_prio = c2s_map[prio];
+
+ netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
"mapping priority %d to tc %d\n",
- prio, bp->prio_to_cos[prio]);
+ outer_prio, bp->prio_to_cos[outer_prio]);
}
/* Use this configuration to differentiate tc0 from other COSes
@@ -4278,6 +4356,9 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
if (netif_running(dev))
rc = bnx2x_set_eth_mac(bp, true);
+ if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+ SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
return rc;
}
@@ -4831,6 +4912,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
*/
dev->mtu = new_mtu;
+ if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+ SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
return bnx2x_reload_if_running(dev);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 03b7404d5b9b..b7d32e8412f1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1,6 +1,8 @@
-/* bnx2x_cmn.h: Broadcom Everest network driver.
+/* bnx2x_cmn.h: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -620,6 +622,14 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
*/
void bnx2x_tx_timeout(struct net_device *dev);
+/** bnx2x_get_c2s_mapping - read inner-to-outer vlan configuration
+ * c2s_map should have BNX2X_MAX_PRIORITY entries.
+ * @bp: driver handle
+ * @c2s_map: should have BNX2X_MAX_PRIORITY entries for mapping
+ * @c2s_default: entry for non-tagged configuration
+ */
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default);
+
/*********************** Inlines **********************************/
/*********************** Fast path ********************************/
static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
@@ -931,14 +941,35 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
start_params->mf_mode = bp->mf_mode;
start_params->sd_vlan_tag = bp->mf_ov;
+ /* Configure Ethertype for BD mode */
+ if (IS_MF_BD(bp)) {
+ DP(NETIF_MSG_IFUP, "Configuring ethertype 0x88a8 for BD\n");
+ start_params->sd_vlan_eth_type = ETH_P_8021AD;
+ REG_WR(bp, PRS_REG_VLAN_TYPE_0, ETH_P_8021AD);
+ REG_WR(bp, PBF_REG_VLAN_TYPE_0, ETH_P_8021AD);
+ REG_WR(bp, NIG_REG_LLH_E1HOV_TYPE_1, ETH_P_8021AD);
+
+ bnx2x_get_c2s_mapping(bp, start_params->c2s_pri,
+ &start_params->c2s_pri_default);
+ start_params->c2s_pri_valid = 1;
+
+ DP(NETIF_MSG_IFUP,
+ "Inner-to-Outer priority: %02x %02x %02x %02x %02x %02x %02x %02x [Default %02x]\n",
+ start_params->c2s_pri[0], start_params->c2s_pri[1],
+ start_params->c2s_pri[2], start_params->c2s_pri[3],
+ start_params->c2s_pri[4], start_params->c2s_pri[5],
+ start_params->c2s_pri[6], start_params->c2s_pri[7],
+ start_params->c2s_pri_default);
+ }
+
if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
start_params->network_cos_mode = STATIC_COS;
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
- start_params->tunnel_mode = TUNN_MODE_GRE;
- start_params->gre_tunnel_type = IPGRE_TUNNEL;
- start_params->inner_gre_rss_en = 1;
+ start_params->vxlan_dst_port = bp->vxlan_dst_port;
+
+ start_params->inner_rss = 1;
if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
start_params->class_fail_ethtype = ETH_P_FIP;
@@ -1037,6 +1068,15 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
BNX2X_FILTER_MAC_PENDING,
&bp->sp_state, obj_type,
&bp->macs_pool);
+
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_init_vlan_obj(bp, &bnx2x_sp_obj(bp, fp).vlan_obj,
+ fp->cl_id, fp->cid, BP_FUNC(bp),
+ bnx2x_sp(bp, vlan_rdata),
+ bnx2x_sp_mapping(bp, vlan_rdata),
+ BNX2X_FILTER_VLAN_PENDING,
+ &bp->sp_state, obj_type,
+ &bp->vlans_pool);
}
/**
@@ -1096,7 +1136,7 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
bnx2x_get_path_func_num(bp));
- bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1,
+ bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_FUNC(bp),
bnx2x_get_path_func_num(bp));
/* RSS configuration object */
@@ -1106,6 +1146,8 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
bnx2x_sp_mapping(bp, rss_rdata),
BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
BNX2X_OBJ_TYPE_RX);
+
+ bp->vlan_credit = PF_VLAN_CREDIT_E2(bp, bnx2x_get_path_func_num(bp));
}
static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
@@ -1339,4 +1381,23 @@ void bnx2x_squeeze_objects(struct bnx2x *bp);
void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
u32 verbose);
+/**
+ * bnx2x_set_os_driver_state - write driver state for management FW usage
+ *
+ * @bp: driver handle
+ * @state: OS_DRIVER_STATE_* value reflecting current driver state
+ */
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state);
+
+/**
+ * bnx2x_nvram_read - reads data from nvram [might sleep]
+ *
+ * @bp: driver handle
+ * @offset: byte offset in nvram
+ * @ret_buf: pointer to buffer where data is to be stored
+ * @buf_size: Length of 'ret_buf' in bytes
+ */
+int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+ int buf_size);
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 6e4294ed1fc9..7ccf6684e0a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1,15 +1,17 @@
-/* bnx2x_dcb.c: Broadcom Everest network driver.
+/* bnx2x_dcb.c: QLogic Everest network driver.
*
* Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -1850,6 +1852,8 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
if (bp->dcbx_port_params.ets.cos_params[cos].
pri_bitmask & pri_bit)
tt2cos[pri].cos = cos;
+
+ pfc_fw_cfg->dcb_outer_pri[pri] = ttp[pri];
}
/* we never want the FW to add a 0 vlan tag */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index c6939ecb02c5..9a9517c0f703 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -1,15 +1,17 @@
-/* bnx2x_dcb.h: Broadcom Everest network driver.
+/* bnx2x_dcb.h: QLogic Everest network driver.
*
* Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
index 741aa130c19f..eccfa13b0f2d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -1,15 +1,17 @@
-/* bnx2x_dump.h: Broadcom Everest network driver.
+/* bnx2x_dump.h: QLogic Everest network driver.
*
* Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 76b9052a961c..d84efcd34fac 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1,6 +1,8 @@
-/* bnx2x_ethtool.c: Broadcom Everest network driver.
+/* bnx2x_ethtool.c: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1088,10 +1090,6 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
- info->n_stats = BNX2X_NUM_STATS;
- info->testinfo_len = BNX2X_NUM_TESTS(bp);
- info->eedump_len = bp->common.flash_size;
- info->regdump_len = bnx2x_get_regs_len(dev);
}
static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -1129,6 +1127,9 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
} else
bp->wol = 0;
+ if (SHMEM2_HAS(bp, curr_cfg))
+ SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
return 0;
}
@@ -1343,8 +1344,8 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
return rc;
}
-static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
- int buf_size)
+int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+ int buf_size)
{
int rc;
u32 cmd_flags;
@@ -1718,6 +1719,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
offset += sizeof(u32);
data_buf += sizeof(u32);
written_so_far += sizeof(u32);
+
+ /* At end of each 4Kb page, release nvram lock to allow MFW
+ * chance to take it for its own use.
+ */
+ if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
+ (written_so_far < buf_size)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Releasing NVM lock after offset 0x%x\n",
+ (u32)(offset - sizeof(u32)));
+ bnx2x_release_nvram_lock(bp);
+ usleep_range(1000, 2000);
+ rc = bnx2x_acquire_nvram_lock(bp);
+ if (rc)
+ return rc;
+ }
+
cmd_flags = 0;
}
@@ -3330,6 +3347,13 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
udp_rss_requested = 0;
else
return -EINVAL;
+
+ if (CHIP_IS_E1x(bp) && udp_rss_requested) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "57710, 57711 boards don't support RSS according to UDP 4-tuple\n");
+ return -EINVAL;
+ }
+
if ((info->flow_type == UDP_V4_FLOW) &&
(bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
@@ -3562,17 +3586,8 @@ static int bnx2x_get_ts_info(struct net_device *dev,
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 7636e3c18771..226ab29f4cb6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -1,6 +1,8 @@
-/* bnx2x_fw_defs.h: Broadcom Everest network driver.
+/* bnx2x_fw_defs.h: Qlogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -372,7 +374,7 @@
#define MAX_COS_NUMBER 4
#define MAX_TRAFFIC_TYPES 8
#define MAX_PFC_PRIORITIES 8
-
+#define MAX_VLAN_PRIORITIES 8
/* used by array traffic_type_to_priority[] to mark traffic type \
that is not mapped to priority*/
#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index 8aafd9b5d6a2..9e3b5a1e9f4f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -1,6 +1,8 @@
/* bnx2x_fw_file_hdr.h: FW binary file header structure.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 058bc7328220..cafd5de675cf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1,6 +1,8 @@
-/* bnx2x_hsi.h: Broadcom Everest network driver.
+/* bnx2x_hsi.h: Qlogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -729,6 +731,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616 0x00001000
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834 0x00001100
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84858 0x00001200
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
@@ -786,6 +789,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616 0x00001000
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834 0x00001100
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858 0x00001200
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
@@ -864,6 +868,7 @@ struct shared_feat_cfg { /* NVRAM Offset */
#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
#define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE 0x00000500
#define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE 0x00000600
#define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE 0x00000700
@@ -2064,6 +2069,45 @@ struct ncsi_oem_fcoe_features {
#define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET 0
};
+enum curr_cfg_method_e {
+ CURR_CFG_MET_NONE = 0, /* default config */
+ CURR_CFG_MET_OS = 1,
+ CURR_CFG_MET_VENDOR_SPEC = 2,/* e.g. Option ROM, NPAR, O/S Cfg Utils */
+};
+
+#define FC_NPIV_WWPN_SIZE 8
+#define FC_NPIV_WWNN_SIZE 8
+struct bdn_npiv_settings {
+ u8 npiv_wwpn[FC_NPIV_WWPN_SIZE];
+ u8 npiv_wwnn[FC_NPIV_WWNN_SIZE];
+};
+
+struct bdn_fc_npiv_cfg {
+ /* hdr used internally by the MFW */
+ u32 hdr;
+ u32 num_of_npiv;
+};
+
+#define MAX_NUMBER_NPIV 64
+struct bdn_fc_npiv_tbl {
+ struct bdn_fc_npiv_cfg fc_npiv_cfg;
+ struct bdn_npiv_settings settings[MAX_NUMBER_NPIV];
+};
+
+struct mdump_driver_info {
+ u32 epoc;
+ u32 drv_ver;
+ u32 fw_ver;
+
+ u32 valid_dump;
+ #define FIRST_DUMP_VALID (1 << 0)
+ #define SECOND_DUMP_VALID (1 << 1)
+
+ u32 flags;
+ #define ENABLE_ALL_TRIGGERS (0x7fffffff)
+ #define TRIGGER_MDUMP_ONCE (1 << 31)
+};
+
struct ncsi_oem_data {
u32 driver_version[4];
struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features;
@@ -2187,6 +2231,8 @@ struct shmem2_region {
#define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002
#define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004
#define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008
+#define DRV_FLAGS_MTU_MASK 0xffff0000
+#define DRV_FLAGS_MTU_SHIFT 16
u32 extended_dev_info_shared_cfg_size;
@@ -2251,6 +2297,7 @@ struct shmem2_region {
u32 reserved4; /* Offset 0x150 */
u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
#define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001
+ #define LINK_ATTR_84858 0x00000002
#define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00
#define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8
#define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000
@@ -2268,6 +2315,74 @@ struct shmem2_region {
/* We use indication for each PF (0..3) */
#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
+ union { /* For various OEMs */ /* Offset 0x1a0 */
+ u8 storage_boot_prog[E2_FUNC_MAX];
+ #define STORAGE_BOOT_PROG_MASK 0x000000FF
+ #define STORAGE_BOOT_PROG_NONE 0x00000000
+ #define STORAGE_BOOT_PROG_ISCSI_IP_ACQUIRED 0x00000002
+ #define STORAGE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS 0x00000002
+ #define STORAGE_BOOT_PROG_TARGET_FOUND 0x00000004
+ #define STORAGE_BOOT_PROG_ISCSI_CHAP_SUCCESS 0x00000008
+ #define STORAGE_BOOT_PROG_FCOE_LUN_FOUND 0x00000008
+ #define STORAGE_BOOT_PROG_LOGGED_INTO_TGT 0x00000010
+ #define STORAGE_BOOT_PROG_IMG_DOWNLOADED 0x00000020
+ #define STORAGE_BOOT_PROG_OS_HANDOFF 0x00000040
+ #define STORAGE_BOOT_PROG_COMPLETED 0x00000080
+
+ u32 oem_i2c_data_addr;
+ };
+
+ /* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
+ /* For PCP values 0-3 use the map lower */
+ /* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1,
+ * 0x0000FF00 - PCP 2, 0x000000FF PCP 3
+ */
+ u32 c2s_pcp_map_lower[E2_FUNC_MAX]; /* 0x1a4 */
+
+ /* For PCP values 4-7 use the map upper */
+ /* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5,
+ * 0x0000FF00 - PCP 6, 0x000000FF PCP 7
+ */
+ u32 c2s_pcp_map_upper[E2_FUNC_MAX]; /* 0x1b4 */
+
+ /* For PCP default value get the MSB byte of the map default */
+ u32 c2s_pcp_map_default[E2_FUNC_MAX]; /* 0x1c4 */
+
+ /* FC_NPIV table offset in NVRAM */
+ u32 fc_npiv_nvram_tbl_addr[PORT_MAX]; /* 0x1d4 */
+
+ /* Shows last method that changed configuration of this device */
+ enum curr_cfg_method_e curr_cfg; /* 0x1dc */
+
+ /* Storm FW version, shold be kept in the format 0xMMmmbbdd:
+ * MM - Major, mm - Minor, bb - Build ,dd - Drop
+ */
+ u32 netproc_fw_ver; /* 0x1e0 */
+
+ /* Option ROM SMASH CLP version */
+ u32 clp_ver; /* 0x1e4 */
+
+ u32 pcie_bus_num; /* 0x1e8 */
+
+ u32 sriov_switch_mode; /* 0x1ec */
+ #define SRIOV_SWITCH_MODE_NONE 0x0
+ #define SRIOV_SWITCH_MODE_VEB 0x1
+ #define SRIOV_SWITCH_MODE_VEPA 0x2
+
+ u8 rsrv2[E2_FUNC_MAX]; /* 0x1f0 */
+
+ u32 img_inv_table_addr; /* Address to INV_TABLE_P */ /* 0x1f4 */
+
+ u32 mtu_size[E2_FUNC_MAX]; /* 0x1f8 */
+
+ u32 os_driver_state[E2_FUNC_MAX]; /* 0x208 */
+ #define OS_DRIVER_STATE_NOT_LOADED 0 /* not installed */
+ #define OS_DRIVER_STATE_LOADING 1 /* transition state */
+ #define OS_DRIVER_STATE_DISABLED 2 /* installed but disabled */
+ #define OS_DRIVER_STATE_ACTIVE 3 /* installed and active */
+
+ /* mini dump driver info */
+ struct mdump_driver_info drv_info; /* 0x218 */
};
@@ -2898,8 +3013,8 @@ struct afex_stats {
};
#define BCM_5710_FW_MAJOR_VERSION 7
-#define BCM_5710_FW_MINOR_VERSION 10
-#define BCM_5710_FW_REVISION_VERSION 51
+#define BCM_5710_FW_MINOR_VERSION 12
+#define BCM_5710_FW_REVISION_VERSION 30
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3901,7 +4016,11 @@ struct eth_fast_path_rx_cqe {
__le16 len_on_bd;
struct parsing_flags pars_flags;
union eth_sgl_or_raw_data sgl_or_raw_data;
- __le32 reserved1[7];
+ u8 tunn_type;
+ u8 tunn_inner_hdrs_offset;
+ __le16 reserved1;
+ __le32 tunn_tenant_id;
+ __le32 padding[5];
u32 marker;
};
@@ -4012,8 +4131,8 @@ struct eth_tunnel_data {
__le16 pseudo_csum;
u8 ip_hdr_start_inner_w;
u8 flags;
-#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0)
-#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0
+#define ETH_TUNNEL_DATA_IPV6_OUTER (0x1<<0)
+#define ETH_TUNNEL_DATA_IPV6_OUTER_SHIFT 0
#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1)
#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
};
@@ -4120,16 +4239,12 @@ struct eth_rss_update_ramrod_data {
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7)
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8)
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8
-#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9)
-#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9
-#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10)
-#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11)
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11
-#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12)
-#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY (0x1<<8)
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY_SHIFT 8
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<9)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 9
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0x3F<<10)
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 10
u8 rss_result_mask;
u8 reserved3;
__le16 reserved4;
@@ -4314,6 +4429,18 @@ enum eth_tunnel_non_lso_csum_location {
MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION
};
+enum eth_tunn_type {
+ TUNN_TYPE_NONE,
+ TUNN_TYPE_VXLAN,
+ TUNN_TYPE_L2_GRE,
+ TUNN_TYPE_IPV4_GRE,
+ TUNN_TYPE_IPV6_GRE,
+ TUNN_TYPE_L2_GENEVE,
+ TUNN_TYPE_IPV4_GENEVE,
+ TUNN_TYPE_IPV6_GENEVE,
+ MAX_ETH_TUNN_TYPE
+};
+
/*
* Tx regular BD structure
*/
@@ -4758,6 +4885,9 @@ struct afex_vif_list_ramrod_data {
__le16 reserved1;
};
+struct c2s_pri_trans_table_entry {
+ u8 val[MAX_VLAN_PRIORITIES];
+};
/*
* cfc delete event data
@@ -5246,6 +5376,7 @@ struct flow_control_configuration {
u8 dont_add_pri_0_en;
u8 reserved1;
__le32 reserved2;
+ u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
};
@@ -5260,18 +5391,25 @@ struct function_start_data {
u8 path_id;
u8 network_cos_mode;
u8 dmae_cmd_id;
- u8 tunnel_mode;
- u8 gre_tunnel_type;
- u8 tunn_clss_en;
- u8 inner_gre_rss_en;
- u8 sd_accept_mf_clss_fail;
+ u8 no_added_tags;
+ __le16 reserved0;
+ __le32 reserved1;
+ u8 inner_clss_vxlan;
+ u8 inner_clss_l2gre;
+ u8 inner_clss_l2geneve;
+ u8 inner_rss;
__le16 vxlan_dst_port;
+ __le16 geneve_dst_port;
+ u8 sd_accept_mf_clss_fail;
+ u8 sd_accept_mf_clss_fail_match_ethtype;
__le16 sd_accept_mf_clss_fail_ethtype;
__le16 sd_vlan_eth_type;
u8 sd_vlan_force_pri_flg;
u8 sd_vlan_force_pri_val;
- u8 sd_accept_mf_clss_fail_match_ethtype;
- u8 no_added_tags;
+ u8 c2s_pri_tt_valid;
+ u8 c2s_pri_default;
+ u8 reserved2[6];
+ struct c2s_pri_trans_table_entry c2s_pri_trans_table;
};
struct function_update_data {
@@ -5289,11 +5427,12 @@ struct function_update_data {
u8 tx_switch_suspend;
u8 echo;
u8 update_tunn_cfg_flg;
- u8 tunnel_mode;
- u8 gre_tunnel_type;
- u8 tunn_clss_en;
- u8 inner_gre_rss_en;
+ u8 inner_clss_vxlan;
+ u8 inner_clss_l2gre;
+ u8 inner_clss_l2geneve;
+ u8 inner_rss;
__le16 vxlan_dst_port;
+ __le16 geneve_dst_port;
u8 sd_vlan_force_pri_change_flg;
u8 sd_vlan_force_pri_flg;
u8 sd_vlan_force_pri_val;
@@ -5302,6 +5441,8 @@ struct function_update_data {
u8 reserved1;
__le16 sd_vlan_tag;
__le16 sd_vlan_eth_type;
+ __le16 reserved0;
+ __le32 reserved2;
};
/*
@@ -5330,15 +5471,6 @@ struct fw_version {
#define __FW_VERSION_RESERVED_SHIFT 4
};
-
-/* GRE Tunnel Mode */
-enum gre_tunnel_type {
- NVGRE_TUNNEL,
- L2GRE_TUNNEL,
- IPGRE_TUNNEL,
- MAX_GRE_TUNNEL_TYPE
-};
-
/*
* Dynamic Host-Coalescing - Driver(host) counters
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index d6e1975b7b69..46ee2c01f4c5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -1,7 +1,9 @@
-/* bnx2x_init.h: Broadcom Everest network driver.
+/* bnx2x_init.h: Qlogic Everest network driver.
* Structures and macroes needed during the initialization.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 5669ed2e87d0..1835d2e451c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -1,8 +1,10 @@
-/* bnx2x_init_ops.h: Broadcom Everest network driver.
+/* bnx2x_init_ops.h: Qlogic Everest network driver.
* Static functions needed during the initialization.
* This file is "included" in bnx2x_main.c.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index a0b03c27e0a3..d946bba43726 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -1,13 +1,15 @@
/* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
* consent.
*
* Written by Yaniv Rosner
@@ -9652,6 +9654,13 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
/******************************************************************/
/* BCM8481/BCM84823/BCM84833 PHY SECTION */
/******************************************************************/
+static int bnx2x_is_8483x_8485x(struct bnx2x_phy *phy)
+{
+ return ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858));
+}
+
static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
struct bnx2x *bp,
u8 port)
@@ -9666,8 +9675,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
};
u16 fw_ver1;
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+ if (bnx2x_is_8483x_8485x(phy)) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
phy->ver_addr);
@@ -9749,8 +9757,7 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
+ if (bnx2x_is_8483x_8485x(phy))
offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
else
offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
@@ -9768,8 +9775,7 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
switch (action) {
case PHY_INIT:
- if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
- (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+ if (!bnx2x_is_8483x_8485x(phy)) {
/* Save spirom version */
bnx2x_save_848xx_spirom_version(phy, bp, params->port);
}
@@ -9901,8 +9907,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
/* Always write this if this is not 84833/4.
* For 84833/4, write it only when it's a forced speed.
*/
- if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
- (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) ||
+ if (!bnx2x_is_8483x_8485x(phy) ||
((autoneg_val & (1<<12)) == 0))
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD,
@@ -9949,8 +9954,86 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
return bnx2x_848xx_cmn_config_init(phy, params, vars);
}
-#define PHY84833_CMDHDLR_WAIT 300
-#define PHY84833_CMDHDLR_MAX_ARGS 5
+#define PHY848xx_CMDHDLR_WAIT 300
+#define PHY848xx_CMDHDLR_MAX_ARGS 5
+
+static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 fw_cmd,
+ u16 cmd_args[], int argc)
+{
+ int idx;
+ u16 val;
+ struct bnx2x *bp = params->bp;
+
+ /* Step 1: Poll the STATUS register to see whether the previous command
+ * is in progress or the system is busy (CMD_IN_PROGRESS or
+ * SYSTEM_BUSY). If previous command is in progress or system is busy,
+ * check again until the previous command finishes execution and the
+ * system is available for taking command
+ */
+
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
+ if ((val != PHY84858_STATUS_CMD_IN_PROGRESS) &&
+ (val != PHY84858_STATUS_CMD_SYSTEM_BUSY))
+ break;
+ usleep_range(1000, 2000);
+ }
+ if (idx >= PHY848xx_CMDHDLR_WAIT) {
+ DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
+ return -EINVAL;
+ }
+
+ /* Step2: If any parameters are required for the function, write them
+ * to the required DATA registers
+ */
+
+ for (idx = 0; idx < argc; idx++) {
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
+ cmd_args[idx]);
+ }
+
+ /* Step3: When the firmware is ready for commands, write the 'Command
+ * code' to the CMD register
+ */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+
+ /* Step4: Once the command has been written, poll the STATUS register
+ * to check whether the command has completed (CMD_COMPLETED_PASS/
+ * CMD_FOR_CMDS or CMD_COMPLETED_ERROR).
+ */
+
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
+ if ((val == PHY84858_STATUS_CMD_COMPLETE_PASS) ||
+ (val == PHY84858_STATUS_CMD_COMPLETE_ERROR))
+ break;
+ usleep_range(1000, 2000);
+ }
+ if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+ (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) {
+ DP(NETIF_MSG_LINK, "FW cmd failed.\n");
+ return -EINVAL;
+ }
+ /* Step5: Once the command has completed, read the specficied DATA
+ * registers for any saved results for the command, if applicable
+ */
+
+ /* Gather returning data */
+ for (idx = 0; idx < argc; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
+ &cmd_args[idx]);
+ }
+
+ return 0;
+}
+
static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
struct link_params *params, u16 fw_cmd,
u16 cmd_args[], int argc)
@@ -9960,16 +10043,16 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
/* Write CMD_OPEN_OVERRIDE to STATUS reg */
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_STATUS,
+ MDIO_848xx_CMD_HDLR_STATUS,
PHY84833_STATUS_CMD_OPEN_OVERRIDE);
- for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_STATUS, &val);
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
break;
usleep_range(1000, 2000);
}
- if (idx >= PHY84833_CMDHDLR_WAIT) {
+ if (idx >= PHY848xx_CMDHDLR_WAIT) {
DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
return -EINVAL;
}
@@ -9977,42 +10060,62 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
/* Prepare argument(s) and issue command */
for (idx = 0; idx < argc; idx++) {
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_DATA1 + idx,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
cmd_args[idx]);
}
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_COMMAND, fw_cmd);
- for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+ MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+ for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_STATUS, &val);
+ MDIO_848xx_CMD_HDLR_STATUS, &val);
if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
- (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
break;
usleep_range(1000, 2000);
}
- if ((idx >= PHY84833_CMDHDLR_WAIT) ||
- (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+ if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
DP(NETIF_MSG_LINK, "FW cmd failed.\n");
return -EINVAL;
}
/* Gather returning data */
for (idx = 0; idx < argc; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_DATA1 + idx,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
&cmd_args[idx]);
}
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_CMD_HDLR_STATUS,
+ MDIO_848xx_CMD_HDLR_STATUS,
PHY84833_STATUS_CMD_CLEAR_COMPLETE);
return 0;
}
-static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 fw_cmd,
+ u16 cmd_args[], int argc)
+{
+ struct bnx2x *bp = params->bp;
+
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) ||
+ (REG_RD(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ link_attr_sync[params->port])) &
+ LINK_ATTR_84858)) {
+ return bnx2x_84858_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+ argc);
+ } else {
+ return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+ argc);
+ }
+}
+
+static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
u32 pair_swap;
- u16 data[PHY84833_CMDHDLR_MAX_ARGS];
+ u16 data[PHY848xx_CMDHDLR_MAX_ARGS];
int status;
struct bnx2x *bp = params->bp;
@@ -10028,8 +10131,9 @@ static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
/* Only the second argument is used for this command */
data[1] = (u16)pair_swap;
- status = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS);
+ status = bnx2x_848xx_cmd_hdlr(phy, params,
+ PHY848xx_CMD_SET_PAIR_SWAP, data,
+ PHY848xx_CMDHDLR_MAX_ARGS);
if (status == 0)
DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
@@ -10118,8 +10222,8 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
/* Prevent Phy from working in EEE and advertising it */
- rc = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ rc = bnx2x_848xx_cmd_hdlr(phy, params,
+ PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
if (rc) {
DP(NETIF_MSG_LINK, "EEE disable failed.\n");
return rc;
@@ -10136,8 +10240,8 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 cmd_args = 1;
- rc = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ rc = bnx2x_848xx_cmd_hdlr(phy, params,
+ PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
if (rc) {
DP(NETIF_MSG_LINK, "EEE enable failed.\n");
return rc;
@@ -10155,7 +10259,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
u8 port, initialize = 1;
u16 val;
u32 actual_phy_selection;
- u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
+ u16 cmd_args[PHY848xx_CMDHDLR_MAX_ARGS];
int rc = 0;
usleep_range(1000, 2000);
@@ -10180,8 +10284,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
/* Wait for GPHY to come out of reset */
msleep(50);
- if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
- (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+ if (!bnx2x_is_8483x_8485x(phy)) {
/* BCM84823 requires that XGXS links up first @ 10G for normal
* behavior.
*/
@@ -10192,7 +10295,19 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
vars->line_speed = temp;
}
+ /* Check if this is actually BCM84858 */
+ if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+ u16 hw_rev;
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_848xx_ID_MSB, &hw_rev);
+ if (hw_rev == BCM84858_PHY_ID) {
+ params->link_attr_sync |= LINK_ATTR_84858;
+ bnx2x_update_link_attr(params, params->link_attr_sync);
+ }
+ }
+
+ /* Set dual-media configuration according to configuration */
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
MDIO_CTL_REG_84823_MEDIA, &val);
val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
@@ -10237,18 +10352,17 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
params->multi_phy_config, val);
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
- bnx2x_84833_pair_swap_cfg(phy, params, vars);
+ if (bnx2x_is_8483x_8485x(phy)) {
+ bnx2x_848xx_pair_swap_cfg(phy, params, vars);
/* Keep AutogrEEEn disabled. */
cmd_args[0] = 0x0;
cmd_args[1] = 0x0;
cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
cmd_args[3] = PHY84833_CONSTANT_LATENCY;
- rc = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, cmd_args,
- PHY84833_CMDHDLR_MAX_ARGS);
+ rc = bnx2x_848xx_cmd_hdlr(phy, params,
+ PHY848xx_CMD_SET_EEE_MODE, cmd_args,
+ PHY848xx_CMDHDLR_MAX_ARGS);
if (rc)
DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
}
@@ -10302,8 +10416,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
}
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+ if (bnx2x_is_8483x_8485x(phy)) {
/* Bring PHY out of super isolate mode as the final step. */
bnx2x_cl45_read_and_write(bp, phy,
MDIO_CTL_DEVAD,
@@ -10435,8 +10548,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
/* Determine if EEE was negotiated */
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
+ if (bnx2x_is_8483x_8485x(phy))
bnx2x_eee_an_resolve(phy, params, vars);
}
@@ -11842,6 +11954,40 @@ static const struct bnx2x_phy phy_84834 = {
.phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
};
+static const struct bnx2x_phy phy_84858 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_848x3_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+};
+
static const struct bnx2x_phy phy_54618se = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
.addr = 0xff,
@@ -12128,6 +12274,9 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
*phy = phy_84834;
break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
+ *phy = phy_84858;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
*phy = phy_54618se;
@@ -12184,9 +12333,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
}
phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
- if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
- (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) &&
- (phy->ver_addr)) {
+ if (bnx2x_is_8483x_8485x(phy) && (phy->ver_addr)) {
/* Remove 100Mb link supported for BCM84833/4 when phy fw
* version lower than or equal to 1.39
*/
@@ -13281,6 +13428,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
/* GPIO3's are linked, and so both need to be toggled
* to obtain required 2us pulse.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index d9cce4c3899b..b7d251108c19 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -1,13 +1,15 @@
/* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
* consent.
*
* Written by Yaniv Rosner
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c27af12314ed..f1d62d5dbaff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1,6 +1,8 @@
-/* bnx2x_main.c: Broadcom Everest network driver.
+/* bnx2x_main.c: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -81,11 +83,11 @@
#define TX_TIMEOUT (5*HZ)
static char version[] =
- "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
+ "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Eliezer Tamir");
-MODULE_DESCRIPTION("Broadcom NetXtreme II "
+MODULE_DESCRIPTION("QLogic "
"BCM57710/57711/57711E/"
"57712/57712_MF/57800/57800_MF/57810/57810_MF/"
"57840/57840_MF Driver");
@@ -163,27 +165,27 @@ enum bnx2x_board_type {
static struct {
char *name;
} board_info[] = {
- [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
- [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
- [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
- [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
- [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
- [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
- [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
- [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
- [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
- [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
- [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
- [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
- [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
- [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
- [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
- [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
- [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
- [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
- [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
- [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
- [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
+ [BCM57710] = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
+ [BCM57711] = { "QLogic BCM57711 10 Gigabit PCIe" },
+ [BCM57711E] = { "QLogic BCM57711E 10 Gigabit PCIe" },
+ [BCM57712] = { "QLogic BCM57712 10 Gigabit Ethernet" },
+ [BCM57712_MF] = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
+ [BCM57712_VF] = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
+ [BCM57800] = { "QLogic BCM57800 10 Gigabit Ethernet" },
+ [BCM57800_MF] = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
+ [BCM57800_VF] = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
+ [BCM57810] = { "QLogic BCM57810 10 Gigabit Ethernet" },
+ [BCM57810_MF] = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
+ [BCM57810_VF] = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
+ [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
+ [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
+ [BCM57840_MF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57840_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
+ [BCM57811] = { "QLogic BCM57811 10 Gigabit Ethernet" },
+ [BCM57811_MF] = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
+ [BCM57840_O] = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
+ [BCM57840_MFO] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57811_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
};
#ifndef PCI_DEVICE_ID_NX2_57710
@@ -264,11 +266,14 @@ static const struct pci_device_id bnx2x_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
@@ -2492,7 +2497,7 @@ static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
else {
u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
- if (IS_MF_SI(bp)) {
+ if (IS_MF_PERCENT_BW(bp)) {
/* maxCfg in percents of linkspeed */
vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
} else /* SD modes */
@@ -2916,7 +2921,7 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
- if (IS_MF_UFP(bp)) {
+ if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
int func = BP_ABS_FUNC(bp);
u32 val;
@@ -2943,16 +2948,16 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
bp->mf_ov);
goto fail;
+ } else {
+ DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
+ bp->mf_ov);
}
-
- DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov);
-
- bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
-
- return;
+ } else {
+ goto fail;
}
- /* not supported by SW yet */
+ bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
+ return;
fail:
bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
}
@@ -3065,7 +3070,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
storm_memset_func_en(bp, p->func_id, 1);
/* spq */
- if (p->func_flgs & FUNC_FLG_SPQ) {
+ if (p->spq_active) {
storm_memset_spq_addr(bp, p->spq_map, p->func_id);
REG_WR(bp, XSEM_REG_FAST_MEMORY +
XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
@@ -3281,7 +3286,6 @@ static void bnx2x_pf_init(struct bnx2x *bp)
{
struct bnx2x_func_init_params func_init = {0};
struct event_ring_data eq_data = { {0} };
- u16 flags;
if (!CHIP_IS_E1x(bp)) {
/* reset IGU PF statistics: MSIX + ATTN */
@@ -3298,15 +3302,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
BP_FUNC(bp) : BP_VN(bp))*4, 0);
}
- /* function setup flags */
- flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
-
- /* This flag is relevant for E1x only.
- * E2 doesn't have a TPA configuration in a function level.
- */
- flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
-
- func_init.func_flgs = flags;
+ func_init.spq_active = true;
func_init.pf_id = BP_FUNC(bp);
func_init.func_id = BP_FUNC(bp);
func_init.spq_map = bp->spq_mapping;
@@ -3707,6 +3703,32 @@ out:
ethver, iscsiver, fcoever);
}
+void bnx2x_update_mfw_dump(struct bnx2x *bp)
+{
+ u32 drv_ver;
+ u32 valid_dump;
+
+ if (!SHMEM2_HAS(bp, drv_info))
+ return;
+
+ /* Update Driver load time, possibly broken in y2038 */
+ SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
+
+ drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
+ SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
+
+ SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
+ /* Check & notify On-Chip dump. */
+ valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
+
+ if (valid_dump & FIRST_DUMP_VALID)
+ DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
+
+ if (valid_dump & SECOND_DUMP_VALID)
+ DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
+}
+
static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
{
u32 cmd_ok, cmd_fail;
@@ -5274,6 +5296,10 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
break;
+ case BNX2X_FILTER_VLAN_PENDING:
+ DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
+ vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
+ break;
case BNX2X_FILTER_MCAST_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
/* This is only relevant for 57710 where multicast MACs are
@@ -5568,6 +5594,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
BNX2X_STATE_OPEN):
case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
BNX2X_STATE_OPENING_WAIT4_PORT):
+ case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
cid = elem->message.data.eth_event.echo &
BNX2X_SWCID_MASK;
DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
@@ -5585,7 +5613,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
BNX2X_STATE_DIAG):
case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
BNX2X_STATE_CLOSING_WAIT4_HALT):
- DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
+ DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
bnx2x_handle_classification_eqe(bp, elem);
break;
@@ -6173,6 +6201,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
__set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
+ if (bp->accept_any_vlan) {
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+ }
+
break;
case BNX2X_RX_MODE_ALLMULTI:
__set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
@@ -6184,6 +6217,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
+ if (bp->accept_any_vlan) {
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+ }
+
break;
case BNX2X_RX_MODE_PROMISC:
/* According to definition of SI mode, iface in promisc mode
@@ -6204,18 +6242,15 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
else
__set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+
break;
default:
BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
return -EINVAL;
}
- /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
- if (rx_mode != BNX2X_RX_MODE_NONE) {
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
- }
-
return 0;
}
@@ -7429,6 +7464,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
} else
BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+ if (SHMEM2_HAS(bp, netproc_fw_ver))
+ SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
return 0;
}
@@ -8406,6 +8444,42 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
return rc;
}
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+ struct bnx2x_vlan_mac_obj *obj, bool set,
+ unsigned long *ramrod_flags)
+{
+ int rc;
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* Fill general parameters */
+ ramrod_param.vlan_mac_obj = obj;
+ ramrod_param.ramrod_flags = *ramrod_flags;
+
+ /* Fill a user request section if needed */
+ if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ /* Set the command: ADD or DEL */
+ if (set)
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ else
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+ }
+
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+
+ if (rc == -EEXIST) {
+ /* Do not treat adding same vlan as error. */
+ DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
+ rc = 0;
+ } else if (rc < 0) {
+ BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
+ }
+
+ return rc;
+}
+
int bnx2x_del_all_macs(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *mac_obj,
int mac_type, bool wait_for_comp)
@@ -10002,6 +10076,91 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
}
}
+#ifdef CONFIG_BNX2X_VXLAN
+static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port)
+{
+ struct bnx2x_func_switch_update_params *switch_update_params;
+ struct bnx2x_func_state_params func_params = {NULL};
+ int rc;
+
+ switch_update_params = &func_params.params.switch_update;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+ /* Function parameters */
+ __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
+ &switch_update_params->changes);
+ switch_update_params->vxlan_dst_port = port;
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc)
+ BNX2X_ERR("failed to change vxlan dst port to %d (rc = 0x%x)\n",
+ port, rc);
+ return rc;
+}
+
+static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port)
+{
+ if (!netif_running(bp->dev))
+ return;
+
+ if (bp->vxlan_dst_port_count && bp->vxlan_dst_port == port) {
+ bp->vxlan_dst_port_count++;
+ return;
+ }
+
+ if (bp->vxlan_dst_port_count || !IS_PF(bp)) {
+ DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n");
+ return;
+ }
+
+ bp->vxlan_dst_port = port;
+ bp->vxlan_dst_port_count = 1;
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0);
+}
+
+static void bnx2x_add_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u16 t_port = ntohs(port);
+
+ __bnx2x_add_vxlan_port(bp, t_port);
+}
+
+static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
+{
+ if (!bp->vxlan_dst_port_count || bp->vxlan_dst_port != port ||
+ !IS_PF(bp)) {
+ DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
+ return;
+ }
+ bp->vxlan_dst_port--;
+ if (bp->vxlan_dst_port)
+ return;
+
+ if (netif_running(bp->dev)) {
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0);
+ } else {
+ bp->vxlan_dst_port = 0;
+ netdev_info(bp->dev, "Deleted vxlan dest port %d", port);
+ }
+}
+
+static void bnx2x_del_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u16 t_port = ntohs(port);
+
+ __bnx2x_del_vxlan_port(bp, t_port);
+}
+#endif
+
static int bnx2x_close(struct net_device *dev);
/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
@@ -10010,6 +10169,9 @@ static int bnx2x_close(struct net_device *dev);
static void bnx2x_sp_rtnl_task(struct work_struct *work)
{
struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
+#ifdef CONFIG_BNX2X_VXLAN
+ u16 port;
+#endif
rtnl_lock();
@@ -10108,6 +10270,27 @@ sp_rtnl_not_reset:
&bp->sp_rtnl_state))
bnx2x_update_mng_version(bp);
+#ifdef CONFIG_BNX2X_VXLAN
+ port = bp->vxlan_dst_port;
+ if (test_and_clear_bit(BNX2X_SP_RTNL_ADD_VXLAN_PORT,
+ &bp->sp_rtnl_state)) {
+ if (!bnx2x_vxlan_port_update(bp, port))
+ netdev_info(bp->dev, "Added vxlan dest port %d", port);
+ else
+ bp->vxlan_dst_port = 0;
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_DEL_VXLAN_PORT,
+ &bp->sp_rtnl_state)) {
+ if (!bnx2x_vxlan_port_update(bp, 0)) {
+ netdev_info(bp->dev,
+ "Deleted vxlan dest port %d", port);
+ bp->vxlan_dst_port = 0;
+ vxlan_get_rx_port(bp->dev);
+ }
+ }
+#endif
+
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
*/
@@ -11678,7 +11861,7 @@ static void validate_set_si_mode(struct bnx2x *bp)
static int bnx2x_get_hwinfo(struct bnx2x *bp)
{
int /*abs*/func = BP_ABS_FUNC(bp);
- int vn;
+ int vn, mfw_vn;
u32 val = 0, val2 = 0;
int rc = 0;
@@ -11768,6 +11951,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_mode = 0;
bp->mf_sub_mode = 0;
vn = BP_VN(bp);
+ mfw_vn = BP_FW_MB_IDX(bp);
if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -11824,6 +12008,31 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
} else
BNX2X_DEV_INFO("illegal OV for SD\n");
break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
+ bp->mf_mode = MULTI_FUNCTION_SD;
+ bp->mf_sub_mode = SUB_MF_MODE_BD;
+ bp->mf_config[vn] =
+ MF_CFG_RD(bp,
+ func_mf_config[func].config);
+
+ if (SHMEM2_HAS(bp, mtu_size)) {
+ int mtu_idx = BP_FW_MB_IDX(bp);
+ u16 mtu_size;
+ u32 mtu;
+
+ mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
+ mtu_size = (u16)mtu;
+ DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
+ mtu_size, mtu);
+
+ /* if valid: update device mtu */
+ if (((mtu_size + ETH_HLEN) >=
+ ETH_MIN_PACKET_SIZE) &&
+ (mtu_size <=
+ ETH_MAX_JUMBO_PACKET_SIZE))
+ bp->dev->mtu = mtu_size;
+ }
+ break;
case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
bp->mf_mode = MULTI_FUNCTION_SD;
bp->mf_sub_mode = SUB_MF_MODE_UFP;
@@ -11871,9 +12080,10 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
func, bp->mf_ov, bp->mf_ov);
- } else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) {
+ } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
+ (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
dev_err(&bp->pdev->dev,
- "Unexpected - no valid MF OV for func %d in UFP mode\n",
+ "Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
func);
bp->path_has_ovlan = true;
} else {
@@ -12078,6 +12288,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
mutex_init(&bp->drv_info_mutex);
sema_init(&bp->stats_lock, 1);
bp->drv_info_mng_owner = false;
+ INIT_LIST_HEAD(&bp->vlan_reg);
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -12278,6 +12489,12 @@ static int bnx2x_open(struct net_device *dev)
rc = bnx2x_nic_load(bp, LOAD_OPEN);
if (rc)
return rc;
+
+#ifdef CONFIG_BNX2X_VXLAN
+ if (IS_PF(bp))
+ vxlan_get_rx_port(dev);
+#endif
+
return 0;
}
@@ -12596,6 +12813,169 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
return vxlan_features_check(skb, features);
}
+static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
+{
+ int rc;
+
+ if (IS_PF(bp)) {
+ unsigned long ramrod_flags = 0;
+
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
+ add, &ramrod_flags);
+ } else {
+ rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
+ }
+
+ return rc;
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_entry *vlan;
+ int rc = 0;
+
+ if (!bp->vlan_cnt) {
+ DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
+ return 0;
+ }
+
+ list_for_each_entry(vlan, &bp->vlan_reg, link) {
+ /* Prepare for cleanup in case of errors */
+ if (rc) {
+ vlan->hw = false;
+ continue;
+ }
+
+ if (!vlan->hw)
+ continue;
+
+ DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+
+ rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+ if (rc) {
+ BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
+ vlan->hw = false;
+ rc = -EINVAL;
+ continue;
+ }
+ }
+
+ return rc;
+}
+
+static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_vlan_entry *vlan;
+ bool hw = false;
+ int rc = 0;
+
+ if (!netif_running(bp->dev)) {
+ DP(NETIF_MSG_IFUP,
+ "Ignoring VLAN configuration the interface is down\n");
+ return -EFAULT;
+ }
+
+ DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
+
+ vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return -ENOMEM;
+
+ bp->vlan_cnt++;
+ if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
+ DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
+ bp->accept_any_vlan = true;
+ if (IS_PF(bp))
+ bnx2x_set_rx_mode_inner(bp);
+ else
+ bnx2x_vfpf_storm_rx_mode(bp);
+ } else if (bp->vlan_cnt <= bp->vlan_credit) {
+ rc = __bnx2x_vlan_configure_vid(bp, vid, true);
+ hw = true;
+ }
+
+ vlan->vid = vid;
+ vlan->hw = hw;
+
+ if (!rc) {
+ list_add(&vlan->link, &bp->vlan_reg);
+ } else {
+ bp->vlan_cnt--;
+ kfree(vlan);
+ }
+
+ DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+
+ return rc;
+}
+
+static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_vlan_entry *vlan;
+ int rc = 0;
+
+ if (!netif_running(bp->dev)) {
+ DP(NETIF_MSG_IFUP,
+ "Ignoring VLAN configuration the interface is down\n");
+ return -EFAULT;
+ }
+
+ DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
+
+ if (!bp->vlan_cnt) {
+ BNX2X_ERR("Unable to kill VLAN %d\n", vid);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ if (vlan->vid == vid)
+ break;
+
+ if (vlan->vid != vid) {
+ BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
+ return -EINVAL;
+ }
+
+ if (vlan->hw)
+ rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+
+ list_del(&vlan->link);
+ kfree(vlan);
+
+ bp->vlan_cnt--;
+
+ if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
+ /* Configure all non-configured entries */
+ list_for_each_entry(vlan, &bp->vlan_reg, link) {
+ if (vlan->hw)
+ continue;
+
+ rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+ if (rc) {
+ BNX2X_ERR("Unable to config VLAN %d\n",
+ vlan->vid);
+ continue;
+ }
+ DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
+ vlan->vid);
+ vlan->hw = true;
+ }
+ DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
+ bp->accept_any_vlan = false;
+ if (IS_PF(bp))
+ bnx2x_set_rx_mode_inner(bp);
+ else
+ bnx2x_vfpf_storm_rx_mode(bp);
+ }
+
+ DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
+
+ return rc;
+}
+
static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_open = bnx2x_open,
.ndo_stop = bnx2x_close,
@@ -12609,6 +12989,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_fix_features = bnx2x_fix_features,
.ndo_set_features = bnx2x_set_features,
.ndo_tx_timeout = bnx2x_tx_timeout,
+ .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2x,
#endif
@@ -12628,6 +13010,10 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_features_check = bnx2x_features_check,
+#ifdef CONFIG_BNX2X_VXLAN
+ .ndo_add_vxlan_port = bnx2x_add_vxlan_port,
+ .ndo_del_vxlan_port = bnx2x_del_vxlan_port,
+#endif
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
@@ -12819,6 +13205,18 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
+ /* VF with OLD Hypervisor or old PF do not support filtering */
+ if (IS_PF(bp)) {
+ if (CHIP_IS_E1x(bp))
+ bp->accept_any_vlan = true;
+ else
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#ifdef CONFIG_BNX2X_SRIOV
+ } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#endif
+ }
+
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= NETIF_F_HIGHDMA;
@@ -13561,6 +13959,9 @@ static int bnx2x_init_one(struct pci_dev *pdev,
bnx2x_register_phc(bp);
+ if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+ bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
return 0;
init_one_exit:
@@ -13623,6 +14024,7 @@ static void __bnx2x_remove(struct pci_dev *pdev,
/* Power on: we can't let PCI layer write to us while we are in D3 */
if (IS_PF(bp)) {
bnx2x_set_power_state(bp, PCI_D0);
+ bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
/* Set endianity registers to reset values in case next driver
* boots in different endianty environment.
@@ -14371,6 +14773,90 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
rc = -EINVAL;
}
+ /* For storage-only interfaces, change driver state */
+ if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
+ switch (ctl->drv_state) {
+ case DRV_NOP:
+ break;
+ case DRV_ACTIVE:
+ bnx2x_set_os_driver_state(bp,
+ OS_DRIVER_STATE_ACTIVE);
+ break;
+ case DRV_INACTIVE:
+ bnx2x_set_os_driver_state(bp,
+ OS_DRIVER_STATE_DISABLED);
+ break;
+ case DRV_UNLOADED:
+ bnx2x_set_os_driver_state(bp,
+ OS_DRIVER_STATE_NOT_LOADED);
+ break;
+ default:
+ BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
+ }
+ }
+
+ return rc;
+}
+
+static int bnx2x_get_fc_npiv(struct net_device *dev,
+ struct cnic_fc_npiv_tbl *cnic_tbl)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bdn_fc_npiv_tbl *tbl = NULL;
+ u32 offset, entries;
+ int rc = -EINVAL;
+ int i;
+
+ if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
+ goto out;
+
+ DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
+
+ tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl) {
+ BNX2X_ERR("Failed to allocate fc_npiv table\n");
+ goto out;
+ }
+
+ offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
+ DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
+
+ /* Read the table contents from nvram */
+ if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
+ BNX2X_ERR("Failed to read FC-NPIV table\n");
+ goto out;
+ }
+
+ /* Since bnx2x_nvram_read() returns data in be32, we need to convert
+ * the number of entries back to cpu endianness.
+ */
+ entries = tbl->fc_npiv_cfg.num_of_npiv;
+ entries = (__force u32)be32_to_cpu((__force __be32)entries);
+ tbl->fc_npiv_cfg.num_of_npiv = entries;
+
+ if (!tbl->fc_npiv_cfg.num_of_npiv) {
+ DP(BNX2X_MSG_MCP,
+ "No FC-NPIV table [valid, simply not present]\n");
+ goto out;
+ } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
+ BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
+ tbl->fc_npiv_cfg.num_of_npiv);
+ goto out;
+ } else {
+ DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
+ tbl->fc_npiv_cfg.num_of_npiv);
+ }
+
+ /* Copy the data into cnic-provided struct */
+ cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
+ for (i = 0; i < cnic_tbl->count; i++) {
+ memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
+ memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
+ }
+
+ rc = 0;
+out:
+ kfree(tbl);
return rc;
}
@@ -14516,6 +15002,7 @@ static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
cp->drv_ctl = bnx2x_drv_ctl;
+ cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
cp->drv_register_cnic = bnx2x_register_cnic;
cp->drv_unregister_cnic = bnx2x_unregister_cnic;
cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
index caf1aef651eb..a91ccbf36345 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -1,6 +1,8 @@
-/* bnx2x_mfw_req.h: Broadcom Everest network driver.
+/* bnx2x_mfw_req.h: Qlogic Everest network driver.
*
* Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 49d511092c82..4dead49bd5cb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1,6 +1,8 @@
-/* bnx2x_reg.h: Broadcom Everest network driver.
+/* bnx2x_reg.h: Qlogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -2137,6 +2139,10 @@
/* [RW 1] When this bit is set; the LLH will expect all packets to be with
e1hov */
#define NIG_REG_LLH_E1HOV_MODE 0x160d8
+/* [RW 16] Outer VLAN type identifier for multi-function mode. In non
+ * multi-function mode; it will hold the inner VLAN type. Typically 0x8100.
+ */
+#define NIG_REG_LLH_E1HOV_TYPE_1 0x16028
/* [RW 1] When this bit is set; the LLH will classify the packet before
sending it to the BRB or calculating WoL on it. */
#define NIG_REG_LLH_MF_MODE 0x16024
@@ -2953,7 +2959,12 @@
#define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac
/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
#define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0
-#define PB_REG_CONTROL 0
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PBF_REG_VLAN_TYPE_0 0x15c06c
/* [RW 2] Interrupt mask register #0 read/write */
#define PB_REG_PB_INT_MASK 0x28
/* [R 2] Interrupt register #0 read */
@@ -3372,6 +3383,12 @@
#define PRS_REG_TCM_CURRENT_CREDIT 0x40160
/* [R 8] debug only: TSDM current credit. Transaction based. */
#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PRS_REG_VLAN_TYPE_0 0x401a8
#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19)
#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20)
#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22)
@@ -7240,6 +7257,9 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
#define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40
#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
+#define MDIO_AN_REG_848xx_ID_MSB 0xffe2
+#define BCM84858_PHY_ID 0x600d
+#define MDIO_AN_REG_848xx_ID_LSB 0xffe3
#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
@@ -7283,31 +7303,31 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81
#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
#define MDIO_84833_SUPER_ISOLATE 0x8000
-/* These are mailbox register set used by 84833. */
-#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005
-#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006
-#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007
-#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008
-#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009
-#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037
-#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038
-#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039
-#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a
-#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b
-#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c
-#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0
-#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26
-#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27
-#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28
-#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29
-#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30
-#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31
+/* These are mailbox register set used by 84833/84858. */
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG0 0x4005
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG1 0x4006
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG2 0x4007
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG3 0x4008
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG4 0x4009
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG26 0x4037
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG27 0x4038
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG28 0x4039
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG29 0x403a
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG30 0x403b
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG31 0x403c
+#define MDIO_848xx_CMD_HDLR_COMMAND (MDIO_848xx_TOP_CFG_SCRATCH_REG0)
+#define MDIO_848xx_CMD_HDLR_STATUS (MDIO_848xx_TOP_CFG_SCRATCH_REG26)
+#define MDIO_848xx_CMD_HDLR_DATA1 (MDIO_848xx_TOP_CFG_SCRATCH_REG27)
+#define MDIO_848xx_CMD_HDLR_DATA2 (MDIO_848xx_TOP_CFG_SCRATCH_REG28)
+#define MDIO_848xx_CMD_HDLR_DATA3 (MDIO_848xx_TOP_CFG_SCRATCH_REG29)
+#define MDIO_848xx_CMD_HDLR_DATA4 (MDIO_848xx_TOP_CFG_SCRATCH_REG30)
+#define MDIO_848xx_CMD_HDLR_DATA5 (MDIO_848xx_TOP_CFG_SCRATCH_REG31)
-/* Mailbox command set used by 84833. */
-#define PHY84833_CMD_SET_PAIR_SWAP 0x8001
-#define PHY84833_CMD_GET_EEE_MODE 0x8008
-#define PHY84833_CMD_SET_EEE_MODE 0x8009
-/* Mailbox status set used by 84833. */
+/* Mailbox command set used by 84833/84858 */
+#define PHY848xx_CMD_SET_PAIR_SWAP 0x8001
+#define PHY848xx_CMD_GET_EEE_MODE 0x8008
+#define PHY848xx_CMD_SET_EEE_MODE 0x8009
+/* Mailbox status set used by 84833 only */
#define PHY84833_STATUS_CMD_RECEIVED 0x0001
#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002
#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004
@@ -7318,6 +7338,13 @@ Theotherbitsarereservedandshouldbezero*/
#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
+/* Mailbox status set used by 84858 only */
+#define PHY84858_STATUS_CMD_RECEIVED 0x0001
+#define PHY84858_STATUS_CMD_IN_PROGRESS 0x0002
+#define PHY84858_STATUS_CMD_COMPLETE_PASS 0x0004
+#define PHY84858_STATUS_CMD_COMPLETE_ERROR 0x0008
+#define PHY84858_STATUS_CMD_SYSTEM_BUSY 0xbbbb
+
/* Warpcore clause 45 addressing */
#define MDIO_WC_DEVAD 0x3
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 4ad415ac8cfe..ff702a707a91 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -1,15 +1,17 @@
-/* bnx2x_sp.c: Broadcom Everest network driver.
+/* bnx2x_sp.c: Qlogic Everest network driver.
*
- * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -355,6 +357,23 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->get(vp, 1);
}
+
+static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ if (!mp->get(mp, 1))
+ return false;
+
+ if (!vp->get(vp, 1)) {
+ mp->put(mp, 1);
+ return false;
+ }
+
+ return true;
+}
+
static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
{
struct bnx2x_credit_pool_obj *mp = o->macs_pool;
@@ -383,6 +402,22 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->put(vp, 1);
}
+static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ if (!mp->put(mp, 1))
+ return false;
+
+ if (!vp->put(vp, 1)) {
+ mp->get(mp, 1);
+ return false;
+ }
+
+ return true;
+}
+
/**
* __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
*
@@ -636,6 +671,26 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp,
return 0;
}
+static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
+ data->vlan_mac.mac, data->vlan_mac.vlan);
+
+ list_for_each_entry(pos, &o->head, link)
+ if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+ (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
+ return -EEXIST;
+
+ return 0;
+}
+
/* check_del() callbacks */
static struct bnx2x_vlan_mac_registry_elem *
bnx2x_check_mac_del(struct bnx2x *bp,
@@ -670,6 +725,27 @@ static struct bnx2x_vlan_mac_registry_elem *
return NULL;
}
+static struct bnx2x_vlan_mac_registry_elem *
+ bnx2x_check_vlan_mac_del(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
+ data->vlan_mac.mac, data->vlan_mac.vlan);
+
+ list_for_each_entry(pos, &o->head, link)
+ if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+ (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
+ return pos;
+
+ return NULL;
+}
+
/* check_move() callback */
static bool bnx2x_check_move(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *src_o,
@@ -1036,6 +1112,96 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
rule_cnt);
}
+static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ int rule_idx, int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct eth_classify_rules_ramrod_data *data =
+ (struct eth_classify_rules_ramrod_data *)(raw->rdata);
+ int rule_cnt = rule_idx + 1;
+ union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
+ bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+ u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
+ u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
+ u16 inner_mac;
+
+ /* Reset the ramrod data buffer for the first rule */
+ if (rule_idx == 0)
+ memset(data, 0, sizeof(*data));
+
+ /* Set a rule header */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
+ &rule_entry->pair.header);
+
+ /* Set VLAN and MAC themselves */
+ rule_entry->pair.vlan = cpu_to_le16(vlan);
+ bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+ &rule_entry->pair.mac_mid,
+ &rule_entry->pair.mac_lsb, mac);
+ inner_mac = elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
+ rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+ /* MOVE: Add a rule that will add this MAC/VLAN to the target Queue */
+ if (cmd == BNX2X_VLAN_MAC_MOVE) {
+ struct bnx2x_vlan_mac_obj *target_obj;
+
+ rule_entry++;
+ rule_cnt++;
+
+ /* Setup ramrod data */
+ target_obj = elem->cmd_data.vlan_mac.target_obj;
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp, target_obj,
+ true, CLASSIFY_RULE_OPCODE_PAIR,
+ &rule_entry->pair.header);
+
+ /* Set a VLAN itself */
+ rule_entry->pair.vlan = cpu_to_le16(vlan);
+ bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+ &rule_entry->pair.mac_mid,
+ &rule_entry->pair.mac_lsb, mac);
+ rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+ }
+
+ /* Set the ramrod data header */
+ bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+ rule_cnt);
+}
+
+/**
+ * bnx2x_set_one_vlan_mac_e1h -
+ *
+ * @bp: device handle
+ * @o: bnx2x_vlan_mac_obj
+ * @elem: bnx2x_exeq_elem
+ * @rule_idx: rule_idx
+ * @cam_offset: cam_offset
+ */
+static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ int rule_idx, int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct mac_configuration_cmd *config =
+ (struct mac_configuration_cmd *)(raw->rdata);
+ /* 57710 and 57711 do not support MOVE command,
+ * so it's either ADD or DEL
+ */
+ bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+ true : false;
+
+ /* Reset the ramrod data buffer */
+ memset(config, 0, sizeof(*config));
+
+ bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
+ cam_offset, add,
+ elem->cmd_data.vlan_mac.u.vlan_mac.mac,
+ elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
+ ETH_VLAN_FILTER_CLASSIFY, config);
+}
+
/**
* bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
*
@@ -1135,6 +1301,25 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
return NULL;
}
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
+ struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_exeq_elem *pos;
+ struct bnx2x_vlan_mac_ramrod_data *data =
+ &elem->cmd_data.vlan_mac.u.vlan_mac;
+
+ /* Check pending for execution commands */
+ list_for_each_entry(pos, &o->exe_queue, link)
+ if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
+ sizeof(*data)) &&
+ (pos->cmd_data.vlan_mac.cmd ==
+ elem->cmd_data.vlan_mac.cmd))
+ return pos;
+
+ return NULL;
+}
+
/**
* bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
*
@@ -2042,6 +2227,68 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
}
}
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool,
+ struct bnx2x_credit_pool_obj *vlans_pool)
+{
+ union bnx2x_qable_obj *qable_obj =
+ (union bnx2x_qable_obj *)vlan_mac_obj;
+
+ bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
+ rdata_mapping, state, pstate, type,
+ macs_pool, vlans_pool);
+
+ /* CAM pool handling */
+ vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
+ vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
+ /* CAM offset is relevant for 57710 and 57711 chips only which have a
+ * single CAM for both MACs and VLAN-MAC pairs. So the offset
+ * will be taken from MACs' pool object only.
+ */
+ vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+ vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+ if (CHIP_IS_E1(bp)) {
+ BNX2X_ERR("Do not support chips others than E2\n");
+ BUG();
+ } else if (CHIP_IS_E1H(bp)) {
+ vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
+ vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
+ vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
+ vlan_mac_obj->check_move = bnx2x_check_move_always_err;
+ vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &vlan_mac_obj->exe_queue, 1, qable_obj,
+ bnx2x_validate_vlan_mac,
+ bnx2x_remove_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_vlan_mac);
+ } else {
+ vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
+ vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
+ vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
+ vlan_mac_obj->check_move = bnx2x_check_move;
+ vlan_mac_obj->ramrod_cmd =
+ RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &vlan_mac_obj->exe_queue,
+ CLASSIFY_RULES_COUNT,
+ qable_obj, bnx2x_validate_vlan_mac,
+ bnx2x_remove_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_vlan_mac);
+ }
+}
/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
static inline void __storm_memset_mac_filters(struct bnx2x *bp,
struct tstorm_eth_mac_filter_config *mac_filters,
@@ -3854,8 +4101,8 @@ static bool bnx2x_credit_pool_get_entry_always_true(
* If credit is negative pool operations will always succeed (unlimited pool).
*
*/
-static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
- int base, int credit)
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+ int base, int credit)
{
/* Zero the object first */
memset(p, 0, sizeof(*p));
@@ -3934,9 +4181,9 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
/* CAM credit is equaly divided between all active functions
* on the PATH.
*/
- if ((func_num > 0)) {
+ if (func_num > 0) {
if (!CHIP_REV_IS_SLOW(bp))
- cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
+ cam_sz = PF_MAC_CREDIT_E2(bp, func_num);
else
cam_sz = BNX2X_CAM_SIZE_EMUL;
@@ -3966,8 +4213,9 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
* on the PATH.
*/
if (func_num > 0) {
- int credit = MAX_VLAN_CREDIT_E2 / func_num;
- bnx2x_init_credit_pool(p, func_id * credit, credit);
+ int credit = PF_VLAN_CREDIT_E2(bp, func_num);
+
+ bnx2x_init_credit_pool(p, -1/*unused for E2*/, credit);
} else
/* this should never happen! Block VLAN operations. */
bnx2x_init_credit_pool(p, 0, 0);
@@ -4060,13 +4308,27 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
- if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags))
- caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY;
+ if (test_bit(BNX2X_RSS_IPV4_VXLAN, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_IPV6_VXLAN, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_TUNN_INNER_HDRS, &p->rss_flags))
+ caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY;
/* RSS keys */
if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
- memcpy(&data->rss_key[0], &p->rss_key[0],
- sizeof(data->rss_key));
+ u8 *dst = (u8 *)(data->rss_key) + sizeof(data->rss_key);
+ const u8 *src = (const u8 *)p->rss_key;
+ int i;
+
+ /* Apparently, bnx2x reads this array in reverse order
+ * We need to byte swap rss_key to comply with Toeplitz specs.
+ */
+ for (i = 0; i < sizeof(data->rss_key); i++)
+ *--dst = *src++;
+
caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
}
@@ -5669,10 +5931,14 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
rdata->path_id = BP_PATH(bp);
rdata->network_cos_mode = start_params->network_cos_mode;
- rdata->tunnel_mode = start_params->tunnel_mode;
- rdata->gre_tunnel_type = start_params->gre_tunnel_type;
- rdata->inner_gre_rss_en = start_params->inner_gre_rss_en;
- rdata->vxlan_dst_port = cpu_to_le16(4789);
+
+ rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port);
+ rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port);
+ rdata->inner_clss_l2gre = start_params->inner_clss_l2gre;
+ rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve;
+ rdata->inner_clss_vxlan = start_params->inner_clss_vxlan;
+ rdata->inner_rss = start_params->inner_rss;
+
rdata->sd_accept_mf_clss_fail = start_params->class_fail;
if (start_params->class_fail_ethtype) {
rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
@@ -5690,6 +5956,14 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
cpu_to_le16(0x8100);
rdata->no_added_tags = start_params->no_added_tags;
+
+ rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid;
+ if (rdata->c2s_pri_tt_valid) {
+ memcpy(rdata->c2s_pri_trans_table.val,
+ start_params->c2s_pri,
+ MAX_VLAN_PRIORITIES);
+ rdata->c2s_pri_default = start_params->c2s_pri_default;
+ }
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
@@ -5750,15 +6024,22 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
&switch_update_params->changes)) {
rdata->update_tunn_cfg_flg = 1;
- if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
+ &switch_update_params->changes))
+ rdata->inner_clss_l2gre = 1;
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
+ &switch_update_params->changes))
+ rdata->inner_clss_vxlan = 1;
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
&switch_update_params->changes))
- rdata->tunn_clss_en = 1;
- if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+ rdata->inner_clss_l2geneve = 1;
+ if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
&switch_update_params->changes))
- rdata->inner_gre_rss_en = 1;
- rdata->tunnel_mode = switch_update_params->tunnel_mode;
- rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type;
- rdata->vxlan_dst_port = cpu_to_le16(4789);
+ rdata->inner_rss = 1;
+ rdata->vxlan_dst_port =
+ cpu_to_le16(switch_update_params->vxlan_dst_port);
+ rdata->geneve_dst_port =
+ cpu_to_le16(switch_update_params->geneve_dst_port);
}
rdata->echo = SWITCH_UPDATE;
@@ -5885,6 +6166,8 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
rdata->traffic_type_to_priority_cos[i] =
tx_start_params->traffic_type_to_priority_cos[i];
+ for (i = 0; i < MAX_TRAFFIC_TYPES; i++)
+ rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i];
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 86baecb7c60c..4048fc594cce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -1,15 +1,17 @@
-/* bnx2x_sp.h: Broadcom Everest network driver.
+/* bnx2x_sp.h: Qlogic Everest network driver.
*
- * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -711,7 +713,10 @@ enum {
BNX2X_RSS_IPV6,
BNX2X_RSS_IPV6_TCP,
BNX2X_RSS_IPV6_UDP,
- BNX2X_RSS_GRE_INNER_HDRS,
+
+ BNX2X_RSS_IPV4_VXLAN,
+ BNX2X_RSS_IPV6_VXLAN,
+ BNX2X_RSS_TUNN_INNER_HDRS,
};
struct bnx2x_config_rss_params {
@@ -1105,8 +1110,10 @@ enum {
BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
- BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
- BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+ BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
+ BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
+ BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
+ BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
};
/* Allowed Function states */
@@ -1171,19 +1178,23 @@ struct bnx2x_func_start_params {
/* Function cos mode */
u8 network_cos_mode;
- /* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */
- u8 tunnel_mode;
+ /* UDP dest port for VXLAN */
+ u16 vxlan_dst_port;
- /* tunneling classification enablement */
- u8 tunn_clss_en;
+ /* UDP dest port for Geneve */
+ u16 geneve_dst_port;
- /* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
- u8 gre_tunnel_type;
+ /* Enable inner Rx classifications for L2GRE packets */
+ u8 inner_clss_l2gre;
- /* Enables Inner GRE RSS on the function, depends on the client RSS
- * capailities
- */
- u8 inner_gre_rss_en;
+ /* Enable inner Rx classifications for L2-Geneve packets */
+ u8 inner_clss_l2geneve;
+
+ /* Enable inner Rx classification for vxlan packets */
+ u8 inner_clss_vxlan;
+
+ /* Enable RSS according to inner header */
+ u8 inner_rss;
/* Allows accepting of packets failing MF classification, possibly
* only matching a given ethertype
@@ -1200,6 +1211,11 @@ struct bnx2x_func_start_params {
/* Prevent inner vlans from being added by FW */
u8 no_added_tags;
+
+ /* Inner-to-Outer vlan priority mapping */
+ u8 c2s_pri[MAX_VLAN_PRIORITIES];
+ u8 c2s_pri_default;
+ u8 c2s_pri_valid;
};
struct bnx2x_func_switch_update_params {
@@ -1207,8 +1223,8 @@ struct bnx2x_func_switch_update_params {
u16 vlan;
u16 vlan_eth_type;
u8 vlan_force_prio;
- u8 tunnel_mode;
- u8 gre_tunnel_type;
+ u16 vxlan_dst_port;
+ u16 geneve_dst_port;
};
struct bnx2x_func_afex_update_params {
@@ -1229,6 +1245,7 @@ struct bnx2x_func_tx_start_params {
u8 dcb_enabled;
u8 dcb_version;
u8 dont_add_pri_0_en;
+ u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
};
struct bnx2x_func_set_timesync_params {
@@ -1396,6 +1413,14 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
unsigned long *pstate, bnx2x_obj_type type,
struct bnx2x_credit_pool_obj *vlans_pool);
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool,
+ struct bnx2x_credit_pool_obj *vlans_pool);
+
int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o);
void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
@@ -1466,6 +1491,8 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
struct bnx2x_credit_pool_obj *p, u8 func_id,
u8 func_num);
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+ int base, int credit);
/****************** RSS CONFIGURATION ****************/
void bnx2x_init_rss_config_obj(struct bnx2x *bp,
@@ -1493,4 +1520,12 @@ int bnx2x_config_rss(struct bnx2x *bp,
void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
u8 *ind_table);
+#define PF_MAC_CREDIT_E2(bp, func_num) \
+ ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
+ func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
+
+#define PF_VLAN_CREDIT_E2(bp, func_num) \
+ ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \
+ func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
+
#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index f67348d16966..9d027348cd09 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1,15 +1,17 @@
-/* bnx2x_sriov.c: Broadcom Everest network driver.
+/* bnx2x_sriov.c: QLogic Everest network driver.
*
* Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -195,14 +197,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
setup_p->gen_params.fp_hsi = vf->fp_hsi;
- /* Setup-op pause params:
- * Nothing to do, the pause thresholds are set by default to 0 which
- * effectively turns off the feature for this queue. We don't want
- * one queue (VF) to interfering with another queue (another VF)
- */
- if (vf->cfg_flags & VF_CFG_FW_FC)
- BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
- vf->abs_vfid);
/* Setup-op flags:
* collect statistics, zero statistics, local-switching, security,
* OV for Flex10, RSS and MCAST for leading
@@ -358,22 +352,24 @@ static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
}
static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
- int qid, bool drv_only, bool mac)
+ int qid, bool drv_only, int type)
{
struct bnx2x_vlan_mac_ramrod_params ramrod;
int rc;
DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
- mac ? "MACs" : "VLANs");
+ (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+ (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
/* Prepare ramrod params */
memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
- if (mac) {
+ if (type == BNX2X_VF_FILTER_VLAN_MAC) {
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+ } else if (type == BNX2X_VF_FILTER_MAC) {
set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
} else {
- set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
- &ramrod.user_req.vlan_mac_flags);
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
}
ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
@@ -391,14 +387,11 @@ static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
&ramrod.ramrod_flags);
if (rc) {
BNX2X_ERR("Failed to delete all %s\n",
- mac ? "MACs" : "VLANs");
+ (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+ (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
return rc;
}
- /* Clear the vlan counters */
- if (!mac)
- atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
-
return 0;
}
@@ -412,13 +405,17 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
vf->abs_vfid, filter->add ? "Adding" : "Deleting",
- filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
+ (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
+ (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
/* Prepare ramrod params */
memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
- if (filter->type == BNX2X_VF_FILTER_VLAN) {
- set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
- &ramrod.user_req.vlan_mac_flags);
+ if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
+ ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+ ramrod.user_req.u.vlan.vlan = filter->vid;
+ memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
+ set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+ } else if (filter->type == BNX2X_VF_FILTER_VLAN) {
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
ramrod.user_req.u.vlan.vlan = filter->vid;
} else {
@@ -429,16 +426,6 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
BNX2X_VLAN_MAC_DEL;
- /* Verify there are available vlan credits */
- if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
- (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
- vf_vlan_rules_cnt(vf))) {
- BNX2X_ERR("No credits for vlan [%d >= %d]\n",
- atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
- vf_vlan_rules_cnt(vf));
- return -ENOMEM;
- }
-
set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
if (drv_only)
set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
@@ -450,16 +437,13 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
if (rc && rc != -EEXIST) {
BNX2X_ERR("Failed to %s %s\n",
filter->add ? "add" : "delete",
- filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
- "VLAN");
+ (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
+ "VLAN-MAC" :
+ (filter->type == BNX2X_VF_FILTER_MAC) ?
+ "MAC" : "VLAN");
return rc;
}
- /* Update the vlan counters */
- if (filter->type == BNX2X_VF_FILTER_VLAN)
- bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
- &bnx2x_vfq(vf, qid, vlan_count));
-
return 0;
}
@@ -511,21 +495,7 @@ int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
if (rc)
goto op_err;
- /* Configure vlan0 for leading queue */
- if (!qid) {
- struct bnx2x_vf_mac_vlan_filter filter;
-
- memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
- filter.type = BNX2X_VF_FILTER_VLAN;
- filter.add = true;
- filter.vid = 0;
- rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
- if (rc)
- goto op_err;
- }
-
/* Schedule the configuration of any pending vlan filters */
- vf->cfg_flags |= VF_CFG_VLAN;
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
BNX2X_MSG_IOV);
return 0;
@@ -544,10 +514,16 @@ static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* If needed, clean the filtering data base */
if ((qid == LEADING_IDX) &&
bnx2x_validate_vf_sp_objs(bp, vf, false)) {
- rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+ BNX2X_VF_FILTER_VLAN_MAC);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+ BNX2X_VF_FILTER_VLAN);
if (rc)
goto op_err;
- rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+ BNX2X_VF_FILTER_MAC);
if (rc)
goto op_err;
}
@@ -680,11 +656,18 @@ int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
/* Remove filtering if feasible */
if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
- false, false);
+ false,
+ BNX2X_VF_FILTER_VLAN_MAC);
+ if (rc)
+ goto op_err;
+ rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+ false,
+ BNX2X_VF_FILTER_VLAN);
if (rc)
goto op_err;
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
- false, true);
+ false,
+ BNX2X_VF_FILTER_MAC);
if (rc)
goto op_err;
rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
@@ -765,8 +748,6 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
- if (vf->cfg_flags & VF_CFG_INT_SIMD)
- val |= IGU_VF_CONF_SINGLE_ISR_EN;
val &= ~IGU_VF_CONF_PARENT_MASK;
val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
@@ -845,29 +826,6 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
return 0;
}
-static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- int new)
-{
- int num = vf_vlan_rules_cnt(vf);
- int diff = new - num;
- bool rc = true;
-
- DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
- vf->abs_vfid, new, num);
-
- if (diff > 0)
- rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
- else if (diff < 0)
- rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
-
- if (rc)
- vf_vlan_rules_cnt(vf) = new;
- else
- DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
- vf->abs_vfid);
-}
-
/* must be called after the number of PF queues and the number of VFs are
* both known
*/
@@ -875,21 +833,13 @@ static void
bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
struct vf_pf_resc_request *resc = &vf->alloc_resc;
- u16 vlan_count = 0;
/* will be set only during VF-ACQUIRE */
resc->num_rxqs = 0;
resc->num_txqs = 0;
- /* no credit calculations for macs (just yet) */
- resc->num_mac_filters = 1;
-
- /* divvy up vlan rules */
- bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
- vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
- vlan_count = 1 << ilog2(vlan_count);
- bnx2x_iov_re_set_vlan_filters(bp, vf,
- vlan_count / BNX2X_NR_VIRTFN(bp));
+ resc->num_mac_filters = VF_MAC_CREDIT_CNT;
+ resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
/* no real limitation */
resc->num_mc_filters = 0;
@@ -1338,6 +1288,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
mutex_init(&bp->vfdb->bulletin_mutex);
+ if (SHMEM2_HAS(bp, sriov_switch_mode))
+ SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
+
return 0;
failed:
DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
@@ -1620,6 +1573,11 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
vf->filter_state = 0;
vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
+ bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
+ vf_vlan_rules_cnt(vf));
+ bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
+ vf_mac_rules_cnt(vf));
+
/* init mcast object - This object will be re-initialized
* during VF-ACQUIRE with the proper cl_id and cid.
* It needs to be initialized here so that it can be safely
@@ -2032,12 +1990,11 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
- /* Save a vlan filter for the Hypervisor */
return ((req_resc->num_rxqs <= rxq_cnt) &&
(req_resc->num_txqs <= txq_cnt) &&
(req_resc->num_sbs <= vf_sb_count(vf)) &&
(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
- (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
+ (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
}
/* CORE VF API */
@@ -2091,16 +2048,12 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf_sb_count(vf) = resc->num_sbs;
vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
- if (resc->num_mac_filters)
- vf_mac_rules_cnt(vf) = resc->num_mac_filters;
- /* Add an additional vlan filter credit for the hypervisor */
- bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
DP(BNX2X_MSG_IOV,
"Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
vf_sb_count(vf), vf_rxq_count(vf),
vf_txq_count(vf), vf_mac_rules_cnt(vf),
- vf_vlan_rules_visible_cnt(vf));
+ vf_vlan_rules_cnt(vf));
/* Initialize the queues */
if (!vf->vfqs) {
@@ -2133,7 +2086,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
{
struct bnx2x_func_init_params func_init = {0};
- u16 flags = 0;
int i;
/* the sb resources are initialized at this point, do the
@@ -2160,23 +2112,9 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
/* reset IGU VF statistics: MSIX */
REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
- /* vf init */
- if (vf->cfg_flags & VF_CFG_STATS)
- flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
-
- if (vf->cfg_flags & VF_CFG_TPA)
- flags |= FUNC_FLG_TPA;
-
- if (is_vf_multi(vf))
- flags |= FUNC_FLG_RSS;
-
/* function setup */
- func_init.func_flgs = flags;
func_init.pf_id = BP_FUNC(bp);
func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
- func_init.fw_stat_map = vf->fw_stat_map;
- func_init.spq_map = vf->spq_map;
- func_init.spq_prod = 0;
bnx2x_func_init(bp, &func_init);
/* Enable the vf */
@@ -2589,8 +2527,8 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
for_each_vf(bp, vfidx) {
- bulletin = BP_VF_BULLETIN(bp, vfidx);
- if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
+ bulletin = BP_VF_BULLETIN(bp, vfidx);
+ if (bulletin->valid_bitmap & (1 << VLAN_VALID))
bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
}
}
@@ -2808,20 +2746,58 @@ out:
return rc;
}
-int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, bool accept)
+{
+ struct bnx2x_rx_mode_ramrod_params rx_ramrod;
+ unsigned long accept_flags;
+
+ /* need to remove/add the VF's accept_any_vlan bit */
+ accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+ if (accept)
+ set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+ else
+ clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+ bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+ accept_flags);
+ bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+ bnx2x_config_rx_mode(bp, &rx_ramrod);
+}
+
+static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ u16 vlan, bool add)
{
- struct bnx2x_queue_state_params q_params = {NULL};
struct bnx2x_vlan_mac_ramrod_params ramrod_param;
- struct bnx2x_queue_update_params *update_params;
+ unsigned long ramrod_flags = 0;
+ int rc = 0;
+
+ /* configure the new vlan to device */
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+ ramrod_param.ramrod_flags = ramrod_flags;
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
+ : BNX2X_VLAN_MAC_DEL;
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc) {
+ BNX2X_ERR("failed to configure vlan\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+{
struct pf_vf_bulletin_content *bulletin = NULL;
- struct bnx2x_rx_mode_ramrod_params rx_ramrod;
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_mac_obj *vlan_obj;
unsigned long vlan_mac_flags = 0;
unsigned long ramrod_flags = 0;
struct bnx2x_virtf *vf = NULL;
- unsigned long accept_flags;
- int rc;
+ int i, rc;
if (vlan > 4095) {
BNX2X_ERR("illegal vlan value %d\n", vlan);
@@ -2850,6 +2826,10 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
bulletin->vlan = vlan;
+ /* Post update on VF's bulletin board */
+ rc = bnx2x_post_vf_bulletin(bp, vfidx);
+ if (rc)
+ BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
mutex_unlock(&bp->vfdb->bulletin_mutex);
/* is vf initialized and queue set up? */
@@ -2876,84 +2856,76 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
goto out;
}
- /* need to remove/add the VF's accept_any_vlan bit */
- accept_flags = bnx2x_leading_vfq(vf, accept_flags);
- if (vlan)
- clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
- else
- set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
-
- bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
- accept_flags);
- bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
- bnx2x_config_rx_mode(bp, &rx_ramrod);
+ /* clear accept_any_vlan when HV forces vlan, otherwise
+ * according to VF capabilities
+ */
+ if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
+ bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
- /* configure the new vlan to device */
- memset(&ramrod_param, 0, sizeof(ramrod_param));
- __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- ramrod_param.vlan_mac_obj = vlan_obj;
- ramrod_param.ramrod_flags = ramrod_flags;
- set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
- &ramrod_param.user_req.vlan_mac_flags);
- ramrod_param.user_req.u.vlan.vlan = vlan;
- ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
- rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
- if (rc) {
- BNX2X_ERR("failed to configure vlan\n");
- rc = -EINVAL;
+ rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
+ if (rc)
goto out;
- }
- /* send queue update ramrod to configure default vlan and silent
- * vlan removal
+ /* send queue update ramrods to configure default vlan and
+ * silent vlan removal
*/
- __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
- q_params.cmd = BNX2X_Q_CMD_UPDATE;
- q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
- update_params = &q_params.params.update;
- __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
- &update_params->update_flags);
- __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
- &update_params->update_flags);
- if (vlan == 0) {
- /* if vlan is 0 then we want to leave the VF traffic
- * untagged, and leave the incoming traffic untouched
- * (i.e. do not remove any vlan tags).
- */
- __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
- &update_params->update_flags);
- __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
- &update_params->update_flags);
- } else {
- /* configure default vlan to vf queue and set silent
- * vlan removal (the vf remains unaware of this vlan).
- */
- __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ for_each_vfq(vf, i) {
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_queue_update_params *update_params;
+
+ q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
+
+ /* validate the Q is UP */
+ if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE)
+ continue;
+
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
&update_params->update_flags);
- __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
&update_params->update_flags);
- update_params->def_vlan = vlan;
- update_params->silent_removal_value =
- vlan & VLAN_VID_MASK;
- update_params->silent_removal_mask = VLAN_VID_MASK;
- }
+ if (vlan == 0) {
+ /* if vlan is 0 then we want to leave the VF traffic
+ * untagged, and leave the incoming traffic untouched
+ * (i.e. do not remove any vlan tags).
+ */
+ __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ } else {
+ /* configure default vlan to vf queue and set silent
+ * vlan removal (the vf remains unaware of this vlan).
+ */
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ update_params->def_vlan = vlan;
+ update_params->silent_removal_value =
+ vlan & VLAN_VID_MASK;
+ update_params->silent_removal_mask = VLAN_VID_MASK;
+ }
- /* Update the Queue state */
- rc = bnx2x_queue_state_change(bp, &q_params);
- if (rc) {
- BNX2X_ERR("Failed to configure default VLAN\n");
- goto out;
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure default VLAN queue %d\n",
+ i);
+ goto out;
+ }
}
-
-
- /* clear the flag indicating that this VF needs its vlan
- * (will only be set if the HV configured the Vlan before vf was
- * up and we were called because the VF came up later
- */
out:
- vf->cfg_flags &= ~VF_CFG_VLAN;
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+ if (rc)
+ DP(BNX2X_MSG_IOV,
+ "updated VF[%d] vlan configuration (vlan = %d)\n",
+ vfidx, vlan);
+
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 66ee62a0401a..670a581ffabc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -1,15 +1,17 @@
-/* bnx2x_sriov.h: Broadcom Everest network driver.
+/* bnx2x_sriov.h: QLogic Everest network driver.
*
* Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -75,7 +77,10 @@ struct bnx2x_vf_queue {
/* VLANs object */
struct bnx2x_vlan_mac_obj vlan_obj;
- atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
+
+ /* VLAN-MACs object */
+ struct bnx2x_vlan_mac_obj vlan_mac_obj;
+
unsigned long accept_flags; /* last accept flags configured */
/* Queue Slow-path State object */
@@ -103,8 +108,10 @@ struct bnx2x_virtf;
struct bnx2x_vf_mac_vlan_filter {
int type;
-#define BNX2X_VF_FILTER_MAC 1
-#define BNX2X_VF_FILTER_VLAN 2
+#define BNX2X_VF_FILTER_MAC BIT(0)
+#define BNX2X_VF_FILTER_VLAN BIT(1)
+#define BNX2X_VF_FILTER_VLAN_MAC \
+ (BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
bool add;
u8 *mac;
@@ -119,14 +126,9 @@ struct bnx2x_vf_mac_vlan_filters {
/* vf context */
struct bnx2x_virtf {
u16 cfg_flags;
-#define VF_CFG_STATS 0x0001
-#define VF_CFG_FW_FC 0x0002
-#define VF_CFG_TPA 0x0004
-#define VF_CFG_INT_SIMD 0x0008
-#define VF_CACHE_LINE 0x0010
-#define VF_CFG_VLAN 0x0020
-#define VF_CFG_STATS_COALESCE 0x0040
-#define VF_CFG_EXT_BULLETIN 0x0080
+#define VF_CFG_STATS_COALESCE 0x1
+#define VF_CFG_EXT_BULLETIN 0x2
+#define VF_CFG_VLAN_FILTER 0x4
u8 link_cfg; /* IFLA_VF_LINK_STATE_AUTO
* IFLA_VF_LINK_STATE_ENABLE
* IFLA_VF_LINK_STATE_DISABLE
@@ -140,9 +142,8 @@ struct bnx2x_virtf {
bool flr_clnup_stage; /* true during flr cleanup */
/* dma */
- dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
+ dma_addr_t fw_stat_map;
u16 stats_stride;
- dma_addr_t spq_map;
dma_addr_t bulletin_map;
/* Allocated resources counters. Before the VF is acquired, the
@@ -163,8 +164,6 @@ struct bnx2x_virtf {
#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
- /* Hide a single vlan filter credit for the hypervisor */
-#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1)
u8 sb_count; /* actual number of SBs */
u8 igu_base_id; /* base igu status block id */
@@ -207,6 +206,9 @@ struct bnx2x_virtf {
enum channel_tlvs op_current;
u8 fp_hsi;
+
+ struct bnx2x_credit_pool_obj vf_vlans_pool;
+ struct bnx2x_credit_pool_obj vf_macs_pool;
};
#define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn)
@@ -230,6 +232,12 @@ struct bnx2x_virtf {
#define FW_VF_HANDLE(abs_vfid) \
(abs_vfid + FW_PF_MAX_HANDLE)
+#define GET_NUM_VFS_PER_PATH(bp) 64 /* use max possible value */
+#define GET_NUM_VFS_PER_PF(bp) ((bp)->vfdb ? (bp)->vfdb->sriov.total \
+ : 0)
+#define VF_MAC_CREDIT_CNT 1
+#define VF_VLAN_CREDIT_CNT 2 /* VLAN0 + 'real' VLAN */
+
/* locking and unlocking the channel mutex */
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv);
@@ -274,6 +282,10 @@ struct bnx2x_vf_sp {
} vlan_rdata;
union {
+ struct eth_classify_rules_ramrod_data e2;
+ } vlan_mac_rdata;
+
+ union {
struct eth_filter_rules_ramrod_data e2;
} rx_mode_rdata;
@@ -536,8 +548,14 @@ int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx);
int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state);
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add);
#else /* CONFIG_BNX2X_SRIOV */
+#define GET_NUM_VFS_PER_PATH(bp) 0
+#define GET_NUM_VFS_PER_PF(bp) 0
+#define VF_MAC_CREDIT_CNT 0
+#define VF_VLAN_CREDIT_CNT 0
+
static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
struct bnx2x_queue_sp_obj **q_obj) {}
static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
@@ -604,5 +622,7 @@ struct pf_vf_bulletin_content;
static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
bool support_long) {}
+static inline int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) {return 0; }
+
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 69d699f0730a..7e0919aa450e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1,6 +1,8 @@
-/* bnx2x_stats.c: Broadcom Everest network driver.
+/* bnx2x_stats.c: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 965539a9dabe..b2644ed13d06 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -1,6 +1,8 @@
-/* bnx2x_stats.h: Broadcom Everest network driver.
+/* bnx2x_stats.h: QLogic Everest network driver.
*
* Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 06b8c0d8fd3b..1374e5394a79 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -1,15 +1,17 @@
-/* bnx2x_vfpf.c: Broadcom Everest network driver.
+/* bnx2x_vfpf.c: QLogic Everest network driver.
*
* Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
* consent.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -245,6 +247,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
req->resc_request.num_sbs = bp->igu_sb_cnt;
req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
+ req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
/* pf 2 vf bulletin board address */
req->bulletin_addr = bp->pf2vf_bulletin_mapping;
@@ -255,6 +258,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* Bulletin support for bulletin board with length > legacy length */
req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
+ /* vlan filtering is supported */
+ req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
/* add list termination tlv */
bnx2x_add_tlv(bp, req,
@@ -373,6 +378,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
+ bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
+
strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
sizeof(bp->fw_ver));
@@ -546,7 +553,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
BNX2X_FILTER_MAC_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX,
- &bp->macs_pool);
+ &vf->vf_macs_pool);
/* vlan */
bnx2x_init_vlan_obj(bp, &q->vlan_obj,
cl_id, q->cid, func_id,
@@ -555,8 +562,17 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
BNX2X_FILTER_VLAN_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX,
- &bp->vlans_pool);
-
+ &vf->vf_vlans_pool);
+ /* vlan-mac */
+ bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, vlan_mac_rdata),
+ bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata),
+ BNX2X_FILTER_VLAN_MAC_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &vf->vf_macs_pool,
+ &vf->vf_vlans_pool);
/* mcast */
bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
q->cid, func_id, func_id,
@@ -723,7 +739,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
if (set)
- req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
+ req->filters[0].flags |= VFPF_Q_FILTER_SET;
/* sample bulletin board for new mac */
bnx2x_sample_bulletin(bp);
@@ -911,6 +927,67 @@ out:
return 0;
}
+/* request pf to add a vlan for the vf */
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
+{
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc = 0;
+
+ if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) {
+ DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n");
+ return 0;
+ }
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
+ req->vf_qid = vf_qid;
+ req->n_mac_vlan_filters = 1;
+
+ req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
+
+ if (add)
+ req->filters[0].flags |= VFPF_Q_FILTER_SET;
+
+ /* sample bulletin board for hypervisor vlan */
+ bnx2x_sample_bulletin(bp);
+
+ if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
+ BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ req->filters[0].vlan_tag = vid;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+ goto out;
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del",
+ vid);
+ rc = -EINVAL;
+ }
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
+}
+
int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
{
int mode = bp->rx_mode;
@@ -934,8 +1011,13 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ if (mode == BNX2X_RX_MODE_PROMISC)
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
}
+ if (bp->accept_any_vlan)
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
+
req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
req->vf_qid = 0;
@@ -1188,7 +1270,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
PFVF_CAP_TPA |
- PFVF_CAP_TPA_UPDATE);
+ PFVF_CAP_TPA_UPDATE |
+ PFVF_CAP_VLAN_FILTER);
bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
sizeof(resp->pfdev_info.fw_ver));
@@ -1203,7 +1286,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_max_queue_cnt(bp, vf);
resc->num_sbs = vf_sb_count(vf);
resc->num_mac_filters = vf_mac_rules_cnt(vf);
- resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
+ resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
resc->num_mc_filters = 0;
if (status == PFVF_STATUS_SUCCESS) {
@@ -1370,6 +1453,14 @@ static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
}
+ if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) {
+ DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n",
+ vf->abs_vfid);
+ vf->cfg_flags |= VF_CFG_VLAN_FILTER;
+ } else {
+ vf->cfg_flags &= ~VF_CFG_VLAN_FILTER;
+ }
+
out:
/* response */
bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
@@ -1382,7 +1473,6 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
int rc;
/* record ghost addresses from vf message */
- vf->spq_map = init->spq_addr;
vf->fw_stat_map = init->stats_addr;
vf->stats_stride = init->stats_stride;
rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
@@ -1578,17 +1668,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
if ((msg_filter->flags & type_flag) != type_flag)
continue;
- if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
+ memset(&fl->filters[j], 0, sizeof(fl->filters[j]));
+ if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) {
fl->filters[j].mac = msg_filter->mac;
- fl->filters[j].type = BNX2X_VF_FILTER_MAC;
- } else {
+ fl->filters[j].type |= BNX2X_VF_FILTER_MAC;
+ }
+ if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) {
fl->filters[j].vid = msg_filter->vlan_tag;
- fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
+ fl->filters[j].type |= BNX2X_VF_FILTER_VLAN;
}
- fl->filters[j].add =
- (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
- true : false;
+ fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET);
fl->count++;
+ j++;
}
if (!fl->count)
kfree(fl);
@@ -1598,6 +1689,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
return 0;
}
+static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters,
+ u32 flags)
+{
+ int i, cnt = 0;
+
+ for (i = 0; i < filters->n_mac_vlan_filters; i++)
+ if ((filters->filters[i].flags & flags) == flags)
+ cnt++;
+
+ return cnt;
+}
+
static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
struct vfpf_q_mac_vlan_filter *filter)
{
@@ -1629,6 +1732,7 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
+#define VFPF_VLAN_MAC_FILTER (VFPF_VLAN_FILTER | VFPF_MAC_FILTER)
static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
@@ -1639,17 +1743,17 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
/* check for any mac/vlan changes */
if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
- /* build mac list */
struct bnx2x_vf_mac_vlan_filters *fl = NULL;
+ /* build vlan-mac list */
rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
- VFPF_MAC_FILTER);
+ VFPF_VLAN_MAC_FILTER);
if (rc)
goto op_err;
if (fl) {
- /* set mac list */
+ /* set vlan-mac list */
rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
msg->vf_qid,
false);
@@ -1657,22 +1761,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
goto op_err;
}
- /* build vlan list */
+ /* build mac list */
fl = NULL;
rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
- VFPF_VLAN_FILTER);
+ VFPF_MAC_FILTER);
if (rc)
goto op_err;
if (fl) {
- /* set vlan list */
+ /* set mac list */
rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
msg->vf_qid,
false);
if (rc)
goto op_err;
}
+
}
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
@@ -1687,11 +1792,15 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
}
- /* A packet arriving the vf's mac should be accepted
- * with any vlan, unless a vlan has already been
- * configured.
+ /* any_vlan is not configured if HV is forcing VLAN
+ * any_vlan is configured if
+ * 1. VF does not support vlan filtering
+ * OR
+ * 2. VF supports vlan filtering and explicitly requested it
*/
- if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+ if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) &&
+ (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) ||
+ msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN))
__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
/* set rx-mode */
@@ -1727,17 +1836,31 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp,
* since queue was not set up.
*/
if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
- /* once a mac was set by ndo can only accept a single mac... */
- if (filters->n_mac_vlan_filters > 1) {
- BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
- vf->abs_vfid);
- rc = -EPERM;
- goto response;
+ struct vfpf_q_mac_vlan_filter *filter = NULL;
+ int i;
+
+ for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+ if (!(filters->filters[i].flags &
+ VFPF_Q_FILTER_DEST_MAC_VALID))
+ continue;
+
+ /* once a mac was set by ndo can only accept
+ * a single mac...
+ */
+ if (filter) {
+ BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n",
+ vf->abs_vfid,
+ filters->n_mac_vlan_filters);
+ rc = -EPERM;
+ goto response;
+ }
+
+ filter = &filters->filters[i];
}
/* ...and only the mac set by the ndo */
- if (filters->n_mac_vlan_filters == 1 &&
- !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
+ if (filter &&
+ !ether_addr_equal(filter->mac, bulletin->mac)) {
BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
vf->abs_vfid);
@@ -1759,17 +1882,14 @@ static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
/* if vlan was set by hypervisor we don't allow guest to config vlan */
if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
- int i;
-
/* search for vlan filters */
- for (i = 0; i < filters->n_mac_vlan_filters; i++) {
- if (filters->filters[i].flags &
- VFPF_Q_FILTER_VLAN_TAG_VALID) {
- BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
- vf->abs_vfid);
- rc = -EPERM;
- goto response;
- }
+
+ if (bnx2x_vf_filters_contain(filters,
+ VFPF_Q_FILTER_VLAN_TAG_VALID)) {
+ BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+ vf->abs_vfid);
+ rc = -EPERM;
+ goto response;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index b86479fc0d2f..64f2b52c5829 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -1,16 +1,22 @@
-/* bnx2x_vfpf.h: Broadcom Everest network driver.
+/* bnx2x_vfpf.h: Qlogic Everest network driver.
*
* Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
*
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
* agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * under the terms of the GNU General Public License version 2 (the “GPL”),
+ * available at http://www.gnu.org/licenses/gpl-2.0.html, with the following
+ * added to such license:
*
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions
+ * of the license of that module. An independent module is a module which is
+ * not derived from this software. The special exception does not apply to any
+ * modifications of the software.
*
* Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Ariel Elior <ariel.elior@qlogic.com>
@@ -64,6 +70,8 @@ struct hw_sb_info {
#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004
#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008
#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010
+#define VFPF_RX_MASK_ACCEPT_ANY_VLAN 0x00000020
+
#define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content))
#define BULLETIN_CONTENT_LEGACY_SIZE (32)
#define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */
@@ -127,6 +135,7 @@ struct vfpf_acquire_tlv {
u8 fp_hsi_ver;
u8 caps;
#define VF_CAP_SUPPORT_EXT_BULLETIN (1 << 0)
+#define VF_CAP_SUPPORT_VLAN_FILTER (1 << 1)
} vfdev_info;
struct vf_pf_resc_request resc_request;
@@ -168,10 +177,12 @@ struct pfvf_acquire_resp_tlv {
struct pf_vf_pfdev_info {
u32 chip_num;
u32 pf_cap;
-#define PFVF_CAP_RSS 0x00000001
-#define PFVF_CAP_DHC 0x00000002
-#define PFVF_CAP_TPA 0x00000004
-#define PFVF_CAP_TPA_UPDATE 0x00000008
+#define PFVF_CAP_RSS 0x00000001
+#define PFVF_CAP_DHC 0x00000002
+#define PFVF_CAP_TPA 0x00000004
+#define PFVF_CAP_TPA_UPDATE 0x00000008
+#define PFVF_CAP_VLAN_FILTER 0x00000010
+
char fw_ver[32];
u16 db_size;
u8 indices_per_sb;
@@ -288,7 +299,7 @@ struct vfpf_q_mac_vlan_filter {
u32 flags;
#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
-#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+#define VFPF_Q_FILTER_SET 0x100 /* set/clear */
u8 mac[ETH_ALEN];
u16 vlan_tag;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
new file mode 100644
index 000000000000..97e78e217928
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_BNXT) += bnxt_en.o
+
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
new file mode 100644
index 000000000000..6c2e0c622831
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -0,0 +1,5728 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <linux/time.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
+#include <net/vxlan.h>
+#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#endif
+#include <linux/workqueue.h>
+#include <linux/prefetch.h>
+#include <linux/cache.h>
+#include <linux/log2.h>
+#include <linux/aer.h>
+#include <linux/bitmap.h>
+#include <linux/cpu_rmap.h>
+
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_sriov.h"
+#include "bnxt_ethtool.h"
+
+#define BNXT_TX_TIMEOUT (5 * HZ)
+
+static const char version[] =
+ "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
+#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
+#define BNXT_RX_COPY_THRESH 256
+
+#define BNXT_TX_PUSH_THRESH 92
+
+enum board_idx {
+ BCM57302,
+ BCM57304,
+ BCM57404,
+ BCM57406,
+ BCM57304_VF,
+ BCM57404_VF,
+};
+
+/* indexed by enum above */
+static const struct {
+ char *name;
+} board_info[] = {
+ { "Broadcom BCM57302 NetXtreme-C Single-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57406 NetXtreme-E Dual-port 10Gb Ethernet" },
+ { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
+ { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
+};
+
+static const struct pci_device_id bnxt_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
+ { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
+ { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
+ { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
+#ifdef CONFIG_BNXT_SRIOV
+ { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
+#endif
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
+
+static const u16 bnxt_vf_req_snif[] = {
+ HWRM_FUNC_CFG,
+ HWRM_PORT_PHY_QCFG,
+ HWRM_CFA_L2_FILTER_ALLOC,
+};
+
+static bool bnxt_vf_pciid(enum board_idx idx)
+{
+ return (idx == BCM57304_VF || idx == BCM57404_VF);
+}
+
+#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
+#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
+#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
+
+#define BNXT_CP_DB_REARM(db, raw_cons) \
+ writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
+
+#define BNXT_CP_DB(db, raw_cons) \
+ writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
+
+#define BNXT_CP_DB_IRQ_DIS(db) \
+ writel(DB_CP_IRQ_DIS_FLAGS, db)
+
+static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
+{
+ /* Tell compiler to fetch tx indices from memory. */
+ barrier();
+
+ return bp->tx_ring_size -
+ ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
+}
+
+static const u16 bnxt_lhint_arr[] = {
+ TX_BD_FLAGS_LHINT_512_AND_SMALLER,
+ TX_BD_FLAGS_LHINT_512_TO_1023,
+ TX_BD_FLAGS_LHINT_1024_TO_2047,
+ TX_BD_FLAGS_LHINT_1024_TO_2047,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+ TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+};
+
+static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct tx_bd *txbd;
+ struct tx_bd_ext *txbd1;
+ struct netdev_queue *txq;
+ int i;
+ dma_addr_t mapping;
+ unsigned int length, pad = 0;
+ u32 len, free_size, vlan_tag_flags, cfa_action, flags;
+ u16 prod, last_frag;
+ struct pci_dev *pdev = bp->pdev;
+ struct bnxt_napi *bnapi;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_sw_tx_bd *tx_buf;
+
+ i = skb_get_queue_mapping(skb);
+ if (unlikely(i >= bp->tx_nr_rings)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ bnapi = bp->bnapi[i];
+ txr = &bnapi->tx_ring;
+ txq = netdev_get_tx_queue(dev, i);
+ prod = txr->tx_prod;
+
+ free_size = bnxt_tx_avail(bp, txr);
+ if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
+ netif_tx_stop_queue(txq);
+ return NETDEV_TX_BUSY;
+ }
+
+ length = skb->len;
+ len = skb_headlen(skb);
+ last_frag = skb_shinfo(skb)->nr_frags;
+
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ txbd->tx_bd_opaque = prod;
+
+ tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf->skb = skb;
+ tx_buf->nr_frags = last_frag;
+
+ vlan_tag_flags = 0;
+ cfa_action = 0;
+ if (skb_vlan_tag_present(skb)) {
+ vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
+ skb_vlan_tag_get(skb);
+ /* Currently supports 8021Q, 8021AD vlan offloads
+ * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
+ */
+ if (skb->vlan_proto == htons(ETH_P_8021Q))
+ vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
+ }
+
+ if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
+ struct tx_push_bd *push = txr->tx_push;
+ struct tx_bd *tx_push = &push->txbd1;
+ struct tx_bd_ext *tx_push1 = &push->txbd2;
+ void *pdata = tx_push1 + 1;
+ int j;
+
+ /* Set COAL_NOW to be ready quickly for the next push */
+ tx_push->tx_bd_len_flags_type =
+ cpu_to_le32((length << TX_BD_LEN_SHIFT) |
+ TX_BD_TYPE_LONG_TX_BD |
+ TX_BD_FLAGS_LHINT_512_AND_SMALLER |
+ TX_BD_FLAGS_COAL_NOW |
+ TX_BD_FLAGS_PACKET_END |
+ (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_push1->tx_bd_hsize_lflags =
+ cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+ else
+ tx_push1->tx_bd_hsize_lflags = 0;
+
+ tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+ tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+
+ skb_copy_from_linear_data(skb, pdata, len);
+ pdata += len;
+ for (j = 0; j < last_frag; j++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
+ void *fptr;
+
+ fptr = skb_frag_address_safe(frag);
+ if (!fptr)
+ goto normal_tx;
+
+ memcpy(pdata, fptr, skb_frag_size(frag));
+ pdata += skb_frag_size(frag);
+ }
+
+ memcpy(txbd, tx_push, sizeof(*txbd));
+ prod = NEXT_TX(prod);
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ memcpy(txbd, tx_push1, sizeof(*txbd));
+ prod = NEXT_TX(prod);
+ push->doorbell =
+ cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
+ txr->tx_prod = prod;
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ __iowrite64_copy(txr->tx_doorbell, push,
+ (length + sizeof(*push) + 8) / 8);
+
+ tx_buf->is_push = 1;
+
+ goto tx_done;
+ }
+
+normal_tx:
+ if (length < BNXT_MIN_PKT_SIZE) {
+ pad = BNXT_MIN_PKT_SIZE - length;
+ if (skb_pad(skb, pad)) {
+ /* SKB already freed. */
+ tx_buf->skb = NULL;
+ return NETDEV_TX_OK;
+ }
+ length = BNXT_MIN_PKT_SIZE;
+ }
+
+ mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
+ dev_kfree_skb_any(skb);
+ tx_buf->skb = NULL;
+ return NETDEV_TX_OK;
+ }
+
+ dma_unmap_addr_set(tx_buf, mapping, mapping);
+ flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
+ ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
+
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+ prod = NEXT_TX(prod);
+ txbd1 = (struct tx_bd_ext *)
+ &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ txbd1->tx_bd_hsize_lflags = 0;
+ if (skb_is_gso(skb)) {
+ u32 hdr_len;
+
+ if (skb->encapsulation)
+ hdr_len = skb_inner_network_offset(skb) +
+ skb_inner_network_header_len(skb) +
+ inner_tcp_hdrlen(skb);
+ else
+ hdr_len = skb_transport_offset(skb) +
+ tcp_hdrlen(skb);
+
+ txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
+ TX_BD_FLAGS_T_IPID |
+ (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
+ length = skb_shinfo(skb)->gso_size;
+ txbd1->tx_bd_mss = cpu_to_le32(length);
+ length += hdr_len;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ txbd1->tx_bd_hsize_lflags =
+ cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+ txbd1->tx_bd_mss = 0;
+ }
+
+ length >>= 9;
+ flags |= bnxt_lhint_arr[length];
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+
+ txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+ txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+ for (i = 0; i < last_frag; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ prod = NEXT_TX(prod);
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ len = skb_frag_size(frag);
+ mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
+ goto tx_dma_error;
+
+ tx_buf = &txr->tx_buf_ring[prod];
+ dma_unmap_addr_set(tx_buf, mapping, mapping);
+
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+ flags = len << TX_BD_LEN_SHIFT;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ }
+
+ flags &= ~TX_BD_LEN;
+ txbd->tx_bd_len_flags_type =
+ cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
+ TX_BD_FLAGS_PACKET_END);
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* Sync BD data before updating doorbell */
+ wmb();
+
+ prod = NEXT_TX(prod);
+ txr->tx_prod = prod;
+
+ writel(DB_KEY_TX | prod, txr->tx_doorbell);
+ writel(DB_KEY_TX | prod, txr->tx_doorbell);
+
+tx_done:
+
+ mmiowb();
+
+ if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
+ netif_tx_stop_queue(txq);
+
+ /* netif_tx_stop_queue() must be done before checking
+ * tx index in bnxt_tx_avail() below, because in
+ * bnxt_tx_int(), we update tx index before checking for
+ * netif_tx_queue_stopped().
+ */
+ smp_mb();
+ if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
+ netif_tx_wake_queue(txq);
+ }
+ return NETDEV_TX_OK;
+
+tx_dma_error:
+ last_frag = i;
+
+ /* start back at beginning and unmap skb */
+ prod = txr->tx_prod;
+ tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf->skb = NULL;
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ prod = NEXT_TX(prod);
+
+ /* unmap remaining mapped pages */
+ for (i = 0; i < last_frag; i++) {
+ prod = NEXT_TX(prod);
+ tx_buf = &txr->tx_buf_ring[prod];
+ dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
+ PCI_DMA_TODEVICE);
+ }
+
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+{
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ int index = bnapi->index;
+ struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
+ u16 cons = txr->tx_cons;
+ struct pci_dev *pdev = bp->pdev;
+ int i;
+ unsigned int tx_bytes = 0;
+
+ for (i = 0; i < nr_pkts; i++) {
+ struct bnxt_sw_tx_bd *tx_buf;
+ struct sk_buff *skb;
+ int j, last;
+
+ tx_buf = &txr->tx_buf_ring[cons];
+ cons = NEXT_TX(cons);
+ skb = tx_buf->skb;
+ tx_buf->skb = NULL;
+
+ if (tx_buf->is_push) {
+ tx_buf->is_push = 0;
+ goto next_tx_int;
+ }
+
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ last = tx_buf->nr_frags;
+
+ for (j = 0; j < last; j++) {
+ cons = NEXT_TX(cons);
+ tx_buf = &txr->tx_buf_ring[cons];
+ dma_unmap_page(
+ &pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(&skb_shinfo(skb)->frags[j]),
+ PCI_DMA_TODEVICE);
+ }
+
+next_tx_int:
+ cons = NEXT_TX(cons);
+
+ tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
+ }
+
+ netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
+ txr->tx_cons = cons;
+
+ /* Need to make the tx_cons update visible to bnxt_start_xmit()
+ * before checking for netif_tx_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that bnxt_start_xmit()
+ * will miss it and cause the queue to be stopped forever.
+ */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(txq)) &&
+ (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
+ __netif_tx_lock(txq, smp_processor_id());
+ if (netif_tx_queue_stopped(txq) &&
+ bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+ txr->dev_state != BNXT_DEV_STATE_CLOSING)
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock(txq);
+ }
+}
+
+static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
+ gfp_t gfp)
+{
+ u8 *data;
+ struct pci_dev *pdev = bp->pdev;
+
+ data = kmalloc(bp->rx_buf_size, gfp);
+ if (!data)
+ return NULL;
+
+ *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
+ bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
+
+ if (dma_mapping_error(&pdev->dev, *mapping)) {
+ kfree(data);
+ data = NULL;
+ }
+ return data;
+}
+
+static inline int bnxt_alloc_rx_data(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
+ u8 *data;
+ dma_addr_t mapping;
+
+ data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+ if (!data)
+ return -ENOMEM;
+
+ rx_buf->data = data;
+ dma_unmap_addr_set(rx_buf, mapping, mapping);
+
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+
+ return 0;
+}
+
+static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
+ u8 *data)
+{
+ u16 prod = rxr->rx_prod;
+ struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_bd *cons_bd, *prod_bd;
+
+ prod_rx_buf = &rxr->rx_buf_ring[prod];
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+ prod_rx_buf->data = data;
+
+ dma_unmap_addr_set(prod_rx_buf, mapping,
+ dma_unmap_addr(cons_rx_buf, mapping));
+
+ prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+
+ prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
+}
+
+static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
+{
+ u16 next, max = rxr->rx_agg_bmap_size;
+
+ next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
+ if (next >= max)
+ next = find_first_zero_bit(rxr->rx_agg_bmap, max);
+ return next;
+}
+
+static inline int bnxt_alloc_rx_page(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct rx_bd *rxbd =
+ &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ struct bnxt_sw_rx_agg_bd *rx_agg_buf;
+ struct pci_dev *pdev = bp->pdev;
+ struct page *page;
+ dma_addr_t mapping;
+ u16 sw_prod = rxr->rx_sw_agg_prod;
+
+ page = alloc_page(gfp);
+ if (!page)
+ return -ENOMEM;
+
+ mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&pdev->dev, mapping)) {
+ __free_page(page);
+ return -EIO;
+ }
+
+ if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+ sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
+
+ __set_bit(sw_prod, rxr->rx_agg_bmap);
+ rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
+ rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
+
+ rx_agg_buf->page = page;
+ rx_agg_buf->mapping = mapping;
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+ rxbd->rx_bd_opaque = sw_prod;
+ return 0;
+}
+
+static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
+ u32 agg_bufs)
+{
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ u16 prod = rxr->rx_agg_prod;
+ u16 sw_prod = rxr->rx_sw_agg_prod;
+ u32 i;
+
+ for (i = 0; i < agg_bufs; i++) {
+ u16 cons;
+ struct rx_agg_cmp *agg;
+ struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_bd *prod_bd;
+ struct page *page;
+
+ agg = (struct rx_agg_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ cons = agg->rx_agg_cmp_opaque;
+ __clear_bit(cons, rxr->rx_agg_bmap);
+
+ if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+ sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
+
+ __set_bit(sw_prod, rxr->rx_agg_bmap);
+ prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
+ cons_rx_buf = &rxr->rx_agg_ring[cons];
+
+ /* It is possible for sw_prod to be equal to cons, so
+ * set cons_rx_buf->page to NULL first.
+ */
+ page = cons_rx_buf->page;
+ cons_rx_buf->page = NULL;
+ prod_rx_buf->page = page;
+
+ prod_rx_buf->mapping = cons_rx_buf->mapping;
+
+ prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+ prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
+ prod_bd->rx_bd_opaque = sw_prod;
+
+ prod = NEXT_RX_AGG(prod);
+ sw_prod = NEXT_RX_AGG(sw_prod);
+ cp_cons = NEXT_CMP(cp_cons);
+ }
+ rxr->rx_agg_prod = prod;
+ rxr->rx_sw_agg_prod = sw_prod;
+}
+
+static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr, u16 cons,
+ u16 prod, u8 *data, dma_addr_t dma_addr,
+ unsigned int len)
+{
+ int err;
+ struct sk_buff *skb;
+
+ err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ return NULL;
+ }
+
+ skb = build_skb(data, 0);
+ dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+ if (!skb) {
+ kfree(data);
+ return NULL;
+ }
+
+ skb_reserve(skb, BNXT_RX_OFFSET);
+ skb_put(skb, len);
+ return skb;
+}
+
+static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct sk_buff *skb, u16 cp_cons,
+ u32 agg_bufs)
+{
+ struct pci_dev *pdev = bp->pdev;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ u16 prod = rxr->rx_agg_prod;
+ u32 i;
+
+ for (i = 0; i < agg_bufs; i++) {
+ u16 cons, frag_len;
+ struct rx_agg_cmp *agg;
+ struct bnxt_sw_rx_agg_bd *cons_rx_buf;
+ struct page *page;
+ dma_addr_t mapping;
+
+ agg = (struct rx_agg_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ cons = agg->rx_agg_cmp_opaque;
+ frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
+ RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
+
+ cons_rx_buf = &rxr->rx_agg_ring[cons];
+ skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
+ __clear_bit(cons, rxr->rx_agg_bmap);
+
+ /* It is possible for bnxt_alloc_rx_page() to allocate
+ * a sw_prod index that equals the cons index, so we
+ * need to clear the cons entry now.
+ */
+ mapping = dma_unmap_addr(cons_rx_buf, mapping);
+ page = cons_rx_buf->page;
+ cons_rx_buf->page = NULL;
+
+ if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
+ struct skb_shared_info *shinfo;
+ unsigned int nr_frags;
+
+ shinfo = skb_shinfo(skb);
+ nr_frags = --shinfo->nr_frags;
+ __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
+
+ dev_kfree_skb(skb);
+
+ cons_rx_buf->page = page;
+
+ /* Update prod since possibly some pages have been
+ * allocated already.
+ */
+ rxr->rx_agg_prod = prod;
+ bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
+ return NULL;
+ }
+
+ dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+
+ skb->data_len += frag_len;
+ skb->len += frag_len;
+ skb->truesize += PAGE_SIZE;
+
+ prod = NEXT_RX_AGG(prod);
+ cp_cons = NEXT_CMP(cp_cons);
+ }
+ rxr->rx_agg_prod = prod;
+ return skb;
+}
+
+static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ u8 agg_bufs, u32 *raw_cons)
+{
+ u16 last;
+ struct rx_agg_cmp *agg;
+
+ *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
+ last = RING_CMP(*raw_cons);
+ agg = (struct rx_agg_cmp *)
+ &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
+ return RX_AGG_CMP_VALID(agg, *raw_cons);
+}
+
+static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
+ unsigned int len,
+ dma_addr_t mapping)
+{
+ struct bnxt *bp = bnapi->bp;
+ struct pci_dev *pdev = bp->pdev;
+ struct sk_buff *skb;
+
+ skb = napi_alloc_skb(&bnapi->napi, len);
+ if (!skb)
+ return NULL;
+
+ dma_sync_single_for_cpu(&pdev->dev, mapping,
+ bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
+
+ memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
+
+ dma_sync_single_for_device(&pdev->dev, mapping,
+ bp->rx_copy_thresh,
+ PCI_DMA_FROMDEVICE);
+
+ skb_put(skb, len);
+ return skb;
+}
+
+static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ u8 agg_id = TPA_START_AGG_ID(tpa_start);
+ u16 cons, prod;
+ struct bnxt_tpa_info *tpa_info;
+ struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_bd *prod_bd;
+ dma_addr_t mapping;
+
+ cons = tpa_start->rx_tpa_start_cmp_opaque;
+ prod = rxr->rx_prod;
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+ prod_rx_buf = &rxr->rx_buf_ring[prod];
+ tpa_info = &rxr->rx_tpa[agg_id];
+
+ prod_rx_buf->data = tpa_info->data;
+
+ mapping = tpa_info->mapping;
+ dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+
+ prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+ prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
+
+ tpa_info->data = cons_rx_buf->data;
+ cons_rx_buf->data = NULL;
+ tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
+
+ tpa_info->len =
+ le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
+ RX_TPA_START_CMP_LEN_SHIFT;
+ if (likely(TPA_START_HASH_VALID(tpa_start))) {
+ u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
+
+ tpa_info->hash_type = PKT_HASH_TYPE_L4;
+ tpa_info->gso_type = SKB_GSO_TCPV4;
+ /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
+ if (hash_type == 3)
+ tpa_info->gso_type = SKB_GSO_TCPV6;
+ tpa_info->rss_hash =
+ le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
+ } else {
+ tpa_info->hash_type = PKT_HASH_TYPE_NONE;
+ tpa_info->gso_type = 0;
+ if (netif_msg_rx_err(bp))
+ netdev_warn(bp->dev, "TPA packet without valid hash\n");
+ }
+ tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
+ tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+
+ rxr->rx_prod = NEXT_RX(prod);
+ cons = NEXT_RX(cons);
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+ bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
+ rxr->rx_prod = NEXT_RX(rxr->rx_prod);
+ cons_rx_buf->data = NULL;
+}
+
+static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
+ u16 cp_cons, u32 agg_bufs)
+{
+ if (agg_bufs)
+ bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+}
+
+#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
+#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
+
+static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
+ struct rx_tpa_end_cmp *tpa_end,
+ struct rx_tpa_end_cmp_ext *tpa_end1,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ struct tcphdr *th;
+ int payload_off, tcp_opt_len = 0;
+ int len, nw_off;
+
+ NAPI_GRO_CB(skb)->count = TPA_END_TPA_SEGS(tpa_end);
+ skb_shinfo(skb)->gso_size =
+ le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
+ skb_shinfo(skb)->gso_type = tpa_info->gso_type;
+ payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+ RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
+ RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
+ if (TPA_END_GRO_TS(tpa_end))
+ tcp_opt_len = 12;
+
+ if (tpa_info->gso_type == SKB_GSO_TCPV4) {
+ struct iphdr *iph;
+
+ nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
+ ETH_HLEN;
+ skb_set_network_header(skb, nw_off);
+ iph = ip_hdr(skb);
+ skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
+ len = skb->len - skb_transport_offset(skb);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
+ } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
+ struct ipv6hdr *iph;
+
+ nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
+ ETH_HLEN;
+ skb_set_network_header(skb, nw_off);
+ iph = ipv6_hdr(skb);
+ skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
+ len = skb->len - skb_transport_offset(skb);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
+ } else {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+ tcp_gro_complete(skb);
+
+ if (nw_off) { /* tunnel */
+ struct udphdr *uh = NULL;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+ if (iph->nexthdr == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ }
+ if (uh) {
+ if (uh->check)
+ skb_shinfo(skb)->gso_type |=
+ SKB_GSO_UDP_TUNNEL_CSUM;
+ else
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+ }
+#endif
+ return skb;
+}
+
+static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+ struct bnxt_napi *bnapi,
+ u32 *raw_cons,
+ struct rx_tpa_end_cmp *tpa_end,
+ struct rx_tpa_end_cmp_ext *tpa_end1,
+ bool *agg_event)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ u8 agg_id = TPA_END_AGG_ID(tpa_end);
+ u8 *data, agg_bufs;
+ u16 cp_cons = RING_CMP(*raw_cons);
+ unsigned int len;
+ struct bnxt_tpa_info *tpa_info;
+ dma_addr_t mapping;
+ struct sk_buff *skb;
+
+ tpa_info = &rxr->rx_tpa[agg_id];
+ data = tpa_info->data;
+ prefetch(data);
+ len = tpa_info->len;
+ mapping = tpa_info->mapping;
+
+ agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+ RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
+
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
+ return ERR_PTR(-EBUSY);
+
+ *agg_event = true;
+ cp_cons = NEXT_CMP(cp_cons);
+ }
+
+ if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
+ bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
+ agg_bufs, (int)MAX_SKB_FRAGS);
+ return NULL;
+ }
+
+ if (len <= bp->rx_copy_thresh) {
+ skb = bnxt_copy_skb(bnapi, data, len, mapping);
+ if (!skb) {
+ bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ return NULL;
+ }
+ } else {
+ u8 *new_data;
+ dma_addr_t new_mapping;
+
+ new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
+ if (!new_data) {
+ bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ return NULL;
+ }
+
+ tpa_info->data = new_data;
+ tpa_info->mapping = new_mapping;
+
+ skb = build_skb(data, 0);
+ dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+
+ if (!skb) {
+ kfree(data);
+ bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ return NULL;
+ }
+ skb_reserve(skb, BNXT_RX_OFFSET);
+ skb_put(skb, len);
+ }
+
+ if (agg_bufs) {
+ skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+ if (!skb) {
+ /* Page reuse already handled by bnxt_rx_pages(). */
+ return NULL;
+ }
+ }
+ skb->protocol = eth_type_trans(skb, bp->dev);
+
+ if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
+ skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
+
+ if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
+ netdev_features_t features = skb->dev->features;
+ u16 vlan_proto = tpa_info->metadata >>
+ RX_CMP_FLAGS2_METADATA_TPID_SFT;
+
+ if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ vlan_proto == ETH_P_8021Q) ||
+ ((features & NETIF_F_HW_VLAN_STAG_RX) &&
+ vlan_proto == ETH_P_8021AD)) {
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
+ tpa_info->metadata &
+ RX_CMP_FLAGS2_METADATA_VID_MASK);
+ }
+ }
+
+ skb_checksum_none_assert(skb);
+ if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level =
+ (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
+ }
+
+ if (TPA_END_GRO(tpa_end))
+ skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
+
+ return skb;
+}
+
+/* returns the following:
+ * 1 - 1 packet successfully received
+ * 0 - successful TPA_START, packet not completed yet
+ * -EBUSY - completion ring does not have all the agg buffers yet
+ * -ENOMEM - packet aborted due to out of memory
+ * -EIO - packet aborted due to hw error indicated in BD
+ */
+static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
+ bool *agg_event)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct net_device *dev = bp->dev;
+ struct rx_cmp *rxcmp;
+ struct rx_cmp_ext *rxcmp1;
+ u32 tmp_raw_cons = *raw_cons;
+ u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ struct bnxt_sw_rx_bd *rx_buf;
+ unsigned int len;
+ u8 *data, agg_bufs, cmp_type;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+ int rc = 0;
+
+ rxcmp = (struct rx_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+ cp_cons = RING_CMP(tmp_raw_cons);
+ rxcmp1 = (struct rx_cmp_ext *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
+ return -EBUSY;
+
+ cmp_type = RX_CMP_TYPE(rxcmp);
+
+ prod = rxr->rx_prod;
+
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
+ bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
+ (struct rx_tpa_start_cmp_ext *)rxcmp1);
+
+ goto next_rx_no_prod;
+
+ } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+ skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
+ (struct rx_tpa_end_cmp *)rxcmp,
+ (struct rx_tpa_end_cmp_ext *)rxcmp1,
+ agg_event);
+
+ if (unlikely(IS_ERR(skb)))
+ return -EBUSY;
+
+ rc = -ENOMEM;
+ if (likely(skb)) {
+ skb_record_rx_queue(skb, bnapi->index);
+ skb_mark_napi_id(skb, &bnapi->napi);
+ if (bnxt_busy_polling(bnapi))
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&bnapi->napi, skb);
+ rc = 1;
+ }
+ goto next_rx_no_prod;
+ }
+
+ cons = rxcmp->rx_cmp_opaque;
+ rx_buf = &rxr->rx_buf_ring[cons];
+ data = rx_buf->data;
+ prefetch(data);
+
+ agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
+ RX_CMP_AGG_BUFS_SHIFT;
+
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
+ return -EBUSY;
+
+ cp_cons = NEXT_CMP(cp_cons);
+ *agg_event = true;
+ }
+
+ rx_buf->data = NULL;
+ if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ if (agg_bufs)
+ bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+
+ rc = -EIO;
+ goto next_rx;
+ }
+
+ len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
+ dma_addr = dma_unmap_addr(rx_buf, mapping);
+
+ if (len <= bp->rx_copy_thresh) {
+ skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
+ bnxt_reuse_rx_data(rxr, cons, data);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ } else {
+ skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ }
+
+ if (agg_bufs) {
+ skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ }
+
+ if (RX_CMP_HASH_VALID(rxcmp)) {
+ u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
+ enum pkt_hash_types type = PKT_HASH_TYPE_L4;
+
+ /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
+ if (hash_type != 1 && hash_type != 3)
+ type = PKT_HASH_TYPE_L3;
+ skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (rxcmp1->rx_cmp_flags2 &
+ cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
+ netdev_features_t features = skb->dev->features;
+ u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+ u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
+
+ if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ vlan_proto == ETH_P_8021Q) ||
+ ((features & NETIF_F_HW_VLAN_STAG_RX) &&
+ vlan_proto == ETH_P_8021AD))
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
+ meta_data &
+ RX_CMP_FLAGS2_METADATA_VID_MASK);
+ }
+
+ skb_checksum_none_assert(skb);
+ if (RX_CMP_L4_CS_OK(rxcmp1)) {
+ if (dev->features & NETIF_F_RXCSUM) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = RX_CMP_ENCAP(rxcmp1);
+ }
+ } else {
+ if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS)
+ cpr->rx_l4_csum_errors++;
+ }
+
+ skb_record_rx_queue(skb, bnapi->index);
+ skb_mark_napi_id(skb, &bnapi->napi);
+ if (bnxt_busy_polling(bnapi))
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&bnapi->napi, skb);
+ rc = 1;
+
+next_rx:
+ rxr->rx_prod = NEXT_RX(prod);
+
+next_rx_no_prod:
+ *raw_cons = tmp_raw_cons;
+
+ return rc;
+}
+
+static int bnxt_async_event_process(struct bnxt *bp,
+ struct hwrm_async_event_cmpl *cmpl)
+{
+ u16 event_id = le16_to_cpu(cmpl->event_id);
+
+ /* TODO CHIMP_FW: Define event id's for link change, error etc */
+ switch (event_id) {
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
+ set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ break;
+ default:
+ netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
+ event_id);
+ break;
+ }
+ return 0;
+}
+
+static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
+{
+ u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
+ struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
+ struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
+ (struct hwrm_fwd_req_cmpl *)txcmp;
+
+ switch (cmpl_type) {
+ case CMPL_BASE_TYPE_HWRM_DONE:
+ seq_id = le16_to_cpu(h_cmpl->sequence_id);
+ if (seq_id == bp->hwrm_intr_seq_id)
+ bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
+ else
+ netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
+ break;
+
+ case CMPL_BASE_TYPE_HWRM_FWD_REQ:
+ vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
+
+ if ((vf_id < bp->pf.first_vf_id) ||
+ (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
+ netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
+ vf_id);
+ return -EINVAL;
+ }
+
+ set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
+ set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ break;
+
+ case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
+ bnxt_async_event_process(bp,
+ (struct hwrm_async_event_cmpl *)txcmp);
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static irqreturn_t bnxt_msix(int irq, void *dev_instance)
+{
+ struct bnxt_napi *bnapi = dev_instance;
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 cons = RING_CMP(cpr->cp_raw_cons);
+
+ prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+ napi_schedule(&bnapi->napi);
+ return IRQ_HANDLED;
+}
+
+static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+ u32 raw_cons = cpr->cp_raw_cons;
+ u16 cons = RING_CMP(raw_cons);
+ struct tx_cmp *txcmp;
+
+ txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ return TX_CMP_VALID(txcmp, raw_cons);
+}
+
+#define CAG_LEGACY_INT_STATUS 0x2014
+
+static irqreturn_t bnxt_inta(int irq, void *dev_instance)
+{
+ struct bnxt_napi *bnapi = dev_instance;
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 cons = RING_CMP(cpr->cp_raw_cons);
+ u32 int_status;
+
+ prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+
+ if (!bnxt_has_work(bp, cpr)) {
+ int_status = readl(bp->bar0 + CAG_LEGACY_INT_STATUS);
+ /* return if erroneous interrupt */
+ if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
+ return IRQ_NONE;
+ }
+
+ /* disable ring IRQ */
+ BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
+
+ /* Return here if interrupt is shared and is disabled. */
+ if (unlikely(atomic_read(&bp->intr_sem) != 0))
+ return IRQ_HANDLED;
+
+ napi_schedule(&bnapi->napi);
+ return IRQ_HANDLED;
+}
+
+static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 raw_cons = cpr->cp_raw_cons;
+ u32 cons;
+ int tx_pkts = 0;
+ int rx_pkts = 0;
+ bool rx_event = false;
+ bool agg_event = false;
+ struct tx_cmp *txcmp;
+
+ while (1) {
+ int rc;
+
+ cons = RING_CMP(raw_cons);
+ txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ if (!TX_CMP_VALID(txcmp, raw_cons))
+ break;
+
+ if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
+ tx_pkts++;
+ /* return full budget so NAPI will complete. */
+ if (unlikely(tx_pkts > bp->tx_wake_thresh))
+ rx_pkts = budget;
+ } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
+ rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+ if (likely(rc >= 0))
+ rx_pkts += rc;
+ else if (rc == -EBUSY) /* partial completion */
+ break;
+ rx_event = true;
+ } else if (unlikely((TX_CMP_TYPE(txcmp) ==
+ CMPL_BASE_TYPE_HWRM_DONE) ||
+ (TX_CMP_TYPE(txcmp) ==
+ CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
+ (TX_CMP_TYPE(txcmp) ==
+ CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
+ bnxt_hwrm_handler(bp, txcmp);
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+
+ if (rx_pkts == budget)
+ break;
+ }
+
+ cpr->cp_raw_cons = raw_cons;
+ /* ACK completion ring before freeing tx ring and producing new
+ * buffers in rx/agg rings to prevent overflowing the completion
+ * ring.
+ */
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+
+ if (tx_pkts)
+ bnxt_tx_int(bp, bnapi, tx_pkts);
+
+ if (rx_event) {
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+
+ writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ if (agg_event) {
+ writel(DB_KEY_RX | rxr->rx_agg_prod,
+ rxr->rx_agg_doorbell);
+ writel(DB_KEY_RX | rxr->rx_agg_prod,
+ rxr->rx_agg_doorbell);
+ }
+ }
+ return rx_pkts;
+}
+
+static int bnxt_poll(struct napi_struct *napi, int budget)
+{
+ struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ int work_done = 0;
+
+ if (!bnxt_lock_napi(bnapi))
+ return budget;
+
+ while (1) {
+ work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
+
+ if (work_done >= budget)
+ break;
+
+ if (!bnxt_has_work(bp, cpr)) {
+ napi_complete(napi);
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ break;
+ }
+ }
+ mmiowb();
+ bnxt_unlock_napi(bnapi);
+ return work_done;
+}
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int bnxt_busy_poll(struct napi_struct *napi)
+{
+ struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ int rx_work, budget = 4;
+
+ if (atomic_read(&bp->intr_sem) != 0)
+ return LL_FLUSH_FAILED;
+
+ if (!bnxt_lock_poll(bnapi))
+ return LL_FLUSH_BUSY;
+
+ rx_work = bnxt_poll_work(bp, bnapi, budget);
+
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+
+ bnxt_unlock_poll(bnapi);
+ return rx_work;
+}
+#endif
+
+static void bnxt_free_tx_skbs(struct bnxt *bp)
+{
+ int i, max_idx;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->bnapi)
+ return;
+
+ max_idx = bp->tx_nr_pages * TX_DESC_CNT;
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr;
+ int j;
+
+ if (!bnapi)
+ continue;
+
+ txr = &bnapi->tx_ring;
+ for (j = 0; j < max_idx;) {
+ struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
+ struct sk_buff *skb = tx_buf->skb;
+ int k, last;
+
+ if (!skb) {
+ j++;
+ continue;
+ }
+
+ tx_buf->skb = NULL;
+
+ if (tx_buf->is_push) {
+ dev_kfree_skb(skb);
+ j += 2;
+ continue;
+ }
+
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+
+ last = tx_buf->nr_frags;
+ j += 2;
+ for (k = 0; k < last; k++, j = NEXT_TX(j)) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
+
+ tx_buf = &txr->tx_buf_ring[j];
+ dma_unmap_page(
+ &pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
+ }
+ dev_kfree_skb(skb);
+ }
+ netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+ }
+}
+
+static void bnxt_free_rx_skbs(struct bnxt *bp)
+{
+ int i, max_idx, max_agg_idx;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->bnapi)
+ return;
+
+ max_idx = bp->rx_nr_pages * RX_DESC_CNT;
+ max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr;
+ int j;
+
+ if (!bnapi)
+ continue;
+
+ rxr = &bnapi->rx_ring;
+
+ if (rxr->rx_tpa) {
+ for (j = 0; j < MAX_TPA; j++) {
+ struct bnxt_tpa_info *tpa_info =
+ &rxr->rx_tpa[j];
+ u8 *data = tpa_info->data;
+
+ if (!data)
+ continue;
+
+ dma_unmap_single(
+ &pdev->dev,
+ dma_unmap_addr(tpa_info, mapping),
+ bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+
+ tpa_info->data = NULL;
+
+ kfree(data);
+ }
+ }
+
+ for (j = 0; j < max_idx; j++) {
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
+ u8 *data = rx_buf->data;
+
+ if (!data)
+ continue;
+
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ bp->rx_buf_use_size,
+ PCI_DMA_FROMDEVICE);
+
+ rx_buf->data = NULL;
+
+ kfree(data);
+ }
+
+ for (j = 0; j < max_agg_idx; j++) {
+ struct bnxt_sw_rx_agg_bd *rx_agg_buf =
+ &rxr->rx_agg_ring[j];
+ struct page *page = rx_agg_buf->page;
+
+ if (!page)
+ continue;
+
+ dma_unmap_page(&pdev->dev,
+ dma_unmap_addr(rx_agg_buf, mapping),
+ PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+ rx_agg_buf->page = NULL;
+ __clear_bit(j, rxr->rx_agg_bmap);
+
+ __free_page(page);
+ }
+ }
+}
+
+static void bnxt_free_skbs(struct bnxt *bp)
+{
+ bnxt_free_tx_skbs(bp);
+ bnxt_free_rx_skbs(bp);
+}
+
+static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+ struct pci_dev *pdev = bp->pdev;
+ int i;
+
+ for (i = 0; i < ring->nr_pages; i++) {
+ if (!ring->pg_arr[i])
+ continue;
+
+ dma_free_coherent(&pdev->dev, ring->page_size,
+ ring->pg_arr[i], ring->dma_arr[i]);
+
+ ring->pg_arr[i] = NULL;
+ }
+ if (ring->pg_tbl) {
+ dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
+ ring->pg_tbl, ring->pg_tbl_map);
+ ring->pg_tbl = NULL;
+ }
+ if (ring->vmem_size && *ring->vmem) {
+ vfree(*ring->vmem);
+ *ring->vmem = NULL;
+ }
+}
+
+static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+ int i;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (ring->nr_pages > 1) {
+ ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
+ ring->nr_pages * 8,
+ &ring->pg_tbl_map,
+ GFP_KERNEL);
+ if (!ring->pg_tbl)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ring->nr_pages; i++) {
+ ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+ ring->page_size,
+ &ring->dma_arr[i],
+ GFP_KERNEL);
+ if (!ring->pg_arr[i])
+ return -ENOMEM;
+
+ if (ring->nr_pages > 1)
+ ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
+ }
+
+ if (ring->vmem_size) {
+ *ring->vmem = vzalloc(ring->vmem_size);
+ if (!(*ring->vmem))
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void bnxt_free_rx_rings(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ rxr = &bnapi->rx_ring;
+
+ kfree(rxr->rx_tpa);
+ rxr->rx_tpa = NULL;
+
+ kfree(rxr->rx_agg_bmap);
+ rxr->rx_agg_bmap = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ bnxt_free_ring(bp, ring);
+
+ ring = &rxr->rx_agg_ring_struct;
+ bnxt_free_ring(bp, ring);
+ }
+}
+
+static int bnxt_alloc_rx_rings(struct bnxt *bp)
+{
+ int i, rc, agg_rings = 0, tpa_rings = 0;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ agg_rings = 1;
+
+ if (bp->flags & BNXT_FLAG_TPA)
+ tpa_rings = 1;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ rxr = &bnapi->rx_ring;
+ ring = &rxr->rx_ring_struct;
+
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
+
+ if (agg_rings) {
+ u16 mem_size;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
+
+ rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
+
+ if (tpa_rings) {
+ rxr->rx_tpa = kcalloc(MAX_TPA,
+ sizeof(struct bnxt_tpa_info),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa)
+ return -ENOMEM;
+ }
+ }
+ }
+ return 0;
+}
+
+static void bnxt_free_tx_rings(struct bnxt *bp)
+{
+ int i;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ txr = &bnapi->tx_ring;
+
+ if (txr->tx_push) {
+ dma_free_coherent(&pdev->dev, bp->tx_push_size,
+ txr->tx_push, txr->tx_push_mapping);
+ txr->tx_push = NULL;
+ }
+
+ ring = &txr->tx_ring_struct;
+
+ bnxt_free_ring(bp, ring);
+ }
+}
+
+static int bnxt_alloc_tx_rings(struct bnxt *bp)
+{
+ int i, j, rc;
+ struct pci_dev *pdev = bp->pdev;
+
+ bp->tx_push_size = 0;
+ if (bp->tx_push_thresh) {
+ int push_size;
+
+ push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
+ bp->tx_push_thresh);
+
+ if (push_size > 128) {
+ push_size = 0;
+ bp->tx_push_thresh = 0;
+ }
+
+ bp->tx_push_size = push_size;
+ }
+
+ for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ txr = &bnapi->tx_ring;
+ ring = &txr->tx_ring_struct;
+
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
+
+ if (bp->tx_push_size) {
+ struct tx_bd *txbd;
+ dma_addr_t mapping;
+
+ /* One pre-allocated DMA buffer to backup
+ * TX push operation
+ */
+ txr->tx_push = dma_alloc_coherent(&pdev->dev,
+ bp->tx_push_size,
+ &txr->tx_push_mapping,
+ GFP_KERNEL);
+
+ if (!txr->tx_push)
+ return -ENOMEM;
+
+ txbd = &txr->tx_push->txbd1;
+
+ mapping = txr->tx_push_mapping +
+ sizeof(struct tx_push_bd);
+ txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+ memset(txbd + 1, 0, sizeof(struct tx_bd_ext));
+ }
+ ring->queue_id = bp->q_info[j].queue_id;
+ if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
+ j++;
+ }
+ return 0;
+}
+
+static void bnxt_free_cp_rings(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ cpr = &bnapi->cp_ring;
+ ring = &cpr->cp_ring_struct;
+
+ bnxt_free_ring(bp, ring);
+ }
+}
+
+static int bnxt_alloc_cp_rings(struct bnxt *bp)
+{
+ int i, rc;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ cpr = &bnapi->cp_ring;
+ ring = &cpr->cp_ring_struct;
+
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static void bnxt_init_ring_struct(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_ring_struct *ring;
+
+ if (!bnapi)
+ continue;
+
+ cpr = &bnapi->cp_ring;
+ ring = &cpr->cp_ring_struct;
+ ring->nr_pages = bp->cp_nr_pages;
+ ring->page_size = HW_CMPD_RING_SIZE;
+ ring->pg_arr = (void **)cpr->cp_desc_ring;
+ ring->dma_arr = cpr->cp_desc_mapping;
+ ring->vmem_size = 0;
+
+ rxr = &bnapi->rx_ring;
+ ring = &rxr->rx_ring_struct;
+ ring->nr_pages = bp->rx_nr_pages;
+ ring->page_size = HW_RXBD_RING_SIZE;
+ ring->pg_arr = (void **)rxr->rx_desc_ring;
+ ring->dma_arr = rxr->rx_desc_mapping;
+ ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
+ ring->vmem = (void **)&rxr->rx_buf_ring;
+
+ ring = &rxr->rx_agg_ring_struct;
+ ring->nr_pages = bp->rx_agg_nr_pages;
+ ring->page_size = HW_RXBD_RING_SIZE;
+ ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ ring->dma_arr = rxr->rx_agg_desc_mapping;
+ ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
+ ring->vmem = (void **)&rxr->rx_agg_ring;
+
+ txr = &bnapi->tx_ring;
+ ring = &txr->tx_ring_struct;
+ ring->nr_pages = bp->tx_nr_pages;
+ ring->page_size = HW_RXBD_RING_SIZE;
+ ring->pg_arr = (void **)txr->tx_desc_ring;
+ ring->dma_arr = txr->tx_desc_mapping;
+ ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
+ ring->vmem = (void **)&txr->tx_buf_ring;
+ }
+}
+
+static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
+{
+ int i;
+ u32 prod;
+ struct rx_bd **rx_buf_ring;
+
+ rx_buf_ring = (struct rx_bd **)ring->pg_arr;
+ for (i = 0, prod = 0; i < ring->nr_pages; i++) {
+ int j;
+ struct rx_bd *rxbd;
+
+ rxbd = rx_buf_ring[i];
+ if (!rxbd)
+ continue;
+
+ for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
+ rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
+ rxbd->rx_bd_opaque = prod;
+ }
+ }
+}
+
+static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+ struct net_device *dev = bp->dev;
+ struct bnxt_napi *bnapi = bp->bnapi[ring_nr];
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring_struct *ring;
+ u32 prod, type;
+ int i;
+
+ if (!bnapi)
+ return -EINVAL;
+
+ type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
+
+ if (NET_IP_ALIGN == 2)
+ type |= RX_BD_FLAGS_SOP;
+
+ rxr = &bnapi->rx_ring;
+ ring = &rxr->rx_ring_struct;
+ bnxt_init_rxbd_pages(ring, type);
+
+ prod = rxr->rx_prod;
+ for (i = 0; i < bp->rx_ring_size; i++) {
+ if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
+ netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
+ ring_nr, i, bp->rx_ring_size);
+ break;
+ }
+ prod = NEXT_RX(prod);
+ }
+ rxr->rx_prod = prod;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+ return 0;
+
+ ring = &rxr->rx_agg_ring_struct;
+
+ type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+
+ bnxt_init_rxbd_pages(ring, type);
+
+ prod = rxr->rx_agg_prod;
+ for (i = 0; i < bp->rx_agg_ring_size; i++) {
+ if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
+ netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
+ ring_nr, i, bp->rx_ring_size);
+ break;
+ }
+ prod = NEXT_RX_AGG(prod);
+ }
+ rxr->rx_agg_prod = prod;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ if (bp->flags & BNXT_FLAG_TPA) {
+ if (rxr->rx_tpa) {
+ u8 *data;
+ dma_addr_t mapping;
+
+ for (i = 0; i < MAX_TPA; i++) {
+ data = __bnxt_alloc_rx_data(bp, &mapping,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].mapping = mapping;
+ }
+ } else {
+ netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int bnxt_init_rx_rings(struct bnxt *bp)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ rc = bnxt_init_one_rx_ring(bp, i);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+static int bnxt_init_tx_rings(struct bnxt *bp)
+{
+ u16 i;
+
+ bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
+ MAX_SKB_FRAGS + 1);
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+
+ return 0;
+}
+
+static void bnxt_free_ring_grps(struct bnxt *bp)
+{
+ kfree(bp->grp_info);
+ bp->grp_info = NULL;
+}
+
+static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
+{
+ int i;
+
+ if (irq_re_init) {
+ bp->grp_info = kcalloc(bp->cp_nr_rings,
+ sizeof(struct bnxt_ring_grp_info),
+ GFP_KERNEL);
+ if (!bp->grp_info)
+ return -ENOMEM;
+ }
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ if (irq_re_init)
+ bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
+ bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
+ }
+ return 0;
+}
+
+static void bnxt_free_vnics(struct bnxt *bp)
+{
+ kfree(bp->vnic_info);
+ bp->vnic_info = NULL;
+ bp->nr_vnics = 0;
+}
+
+static int bnxt_alloc_vnics(struct bnxt *bp)
+{
+ int num_vnics = 1;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (bp->flags & BNXT_FLAG_RFS)
+ num_vnics += bp->rx_nr_rings;
+#endif
+
+ bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
+ GFP_KERNEL);
+ if (!bp->vnic_info)
+ return -ENOMEM;
+
+ bp->nr_vnics = num_vnics;
+ return 0;
+}
+
+static void bnxt_init_vnics(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+ vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
+
+ if (bp->vnic_info[i].rss_hash_key) {
+ if (i == 0)
+ prandom_bytes(vnic->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ else
+ memcpy(vnic->rss_hash_key,
+ bp->vnic_info[0].rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ }
+ }
+}
+
+static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
+{
+ int pages;
+
+ pages = ring_size / desc_per_pg;
+
+ if (!pages)
+ return 1;
+
+ pages++;
+
+ while (pages & (pages - 1))
+ pages++;
+
+ return pages;
+}
+
+static void bnxt_set_tpa_flags(struct bnxt *bp)
+{
+ bp->flags &= ~BNXT_FLAG_TPA;
+ if (bp->dev->features & NETIF_F_LRO)
+ bp->flags |= BNXT_FLAG_LRO;
+ if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+ bp->flags |= BNXT_FLAG_GRO;
+}
+
+/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
+ * be set on entry.
+ */
+void bnxt_set_ring_params(struct bnxt *bp)
+{
+ u32 ring_size, rx_size, rx_space;
+ u32 agg_factor = 0, agg_ring_size = 0;
+
+ /* 8 for CRC and VLAN */
+ rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
+
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
+ ring_size = bp->rx_ring_size;
+ bp->rx_agg_ring_size = 0;
+ bp->rx_agg_nr_pages = 0;
+
+ if (bp->flags & BNXT_FLAG_TPA)
+ agg_factor = 4;
+
+ bp->flags &= ~BNXT_FLAG_JUMBO;
+ if (rx_space > PAGE_SIZE) {
+ u32 jumbo_factor;
+
+ bp->flags |= BNXT_FLAG_JUMBO;
+ jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
+ if (jumbo_factor > agg_factor)
+ agg_factor = jumbo_factor;
+ }
+ agg_ring_size = ring_size * agg_factor;
+
+ if (agg_ring_size) {
+ bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
+ RX_DESC_CNT);
+ if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
+ u32 tmp = agg_ring_size;
+
+ bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
+ agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
+ netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
+ tmp, agg_ring_size);
+ }
+ bp->rx_agg_ring_size = agg_ring_size;
+ bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
+ rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
+
+ bp->rx_buf_use_size = rx_size;
+ bp->rx_buf_size = rx_space;
+
+ bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
+ bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
+
+ ring_size = bp->tx_ring_size;
+ bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
+ bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
+
+ ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
+ bp->cp_ring_size = ring_size;
+
+ bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
+ if (bp->cp_nr_pages > MAX_CP_PAGES) {
+ bp->cp_nr_pages = MAX_CP_PAGES;
+ bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
+ netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
+ ring_size, bp->cp_ring_size);
+ }
+ bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
+ bp->cp_ring_mask = bp->cp_bit - 1;
+}
+
+static void bnxt_free_vnic_attributes(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_vnic_info *vnic;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->vnic_info)
+ return;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+
+ kfree(vnic->fw_grp_ids);
+ vnic->fw_grp_ids = NULL;
+
+ kfree(vnic->uc_list);
+ vnic->uc_list = NULL;
+
+ if (vnic->mc_list) {
+ dma_free_coherent(&pdev->dev, vnic->mc_list_size,
+ vnic->mc_list, vnic->mc_list_mapping);
+ vnic->mc_list = NULL;
+ }
+
+ if (vnic->rss_table) {
+ dma_free_coherent(&pdev->dev, PAGE_SIZE,
+ vnic->rss_table,
+ vnic->rss_table_dma_addr);
+ vnic->rss_table = NULL;
+ }
+
+ vnic->rss_hash_key = NULL;
+ vnic->flags = 0;
+ }
+}
+
+static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
+{
+ int i, rc = 0, size;
+ struct bnxt_vnic_info *vnic;
+ struct pci_dev *pdev = bp->pdev;
+ int max_rings;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+
+ if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
+ int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
+
+ if (mem_size > 0) {
+ vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
+ if (!vnic->uc_list) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+ }
+
+ if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
+ vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
+ vnic->mc_list =
+ dma_alloc_coherent(&pdev->dev,
+ vnic->mc_list_size,
+ &vnic->mc_list_mapping,
+ GFP_KERNEL);
+ if (!vnic->mc_list) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+ max_rings = bp->rx_nr_rings;
+ else
+ max_rings = 1;
+
+ vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
+ if (!vnic->fw_grp_ids) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Allocate rss table and hash key */
+ vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &vnic->rss_table_dma_addr,
+ GFP_KERNEL);
+ if (!vnic->rss_table) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
+
+ vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+ vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+ }
+ return 0;
+
+out:
+ return rc;
+}
+
+static void bnxt_free_hwrm_resources(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
+ bp->hwrm_cmd_resp_dma_addr);
+
+ bp->hwrm_cmd_resp_addr = NULL;
+ if (bp->hwrm_dbg_resp_addr) {
+ dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
+ bp->hwrm_dbg_resp_addr,
+ bp->hwrm_dbg_resp_dma_addr);
+
+ bp->hwrm_dbg_resp_addr = NULL;
+ }
+}
+
+static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &bp->hwrm_cmd_resp_dma_addr,
+ GFP_KERNEL);
+ if (!bp->hwrm_cmd_resp_addr)
+ return -ENOMEM;
+ bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
+ HWRM_DBG_REG_BUF_SIZE,
+ &bp->hwrm_dbg_resp_dma_addr,
+ GFP_KERNEL);
+ if (!bp->hwrm_dbg_resp_addr)
+ netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
+
+ return 0;
+}
+
+static void bnxt_free_stats(struct bnxt *bp)
+{
+ u32 size, i;
+ struct pci_dev *pdev = bp->pdev;
+
+ if (!bp->bnapi)
+ return;
+
+ size = sizeof(struct ctx_hw_stats);
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ if (cpr->hw_stats) {
+ dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
+ cpr->hw_stats_map);
+ cpr->hw_stats = NULL;
+ }
+ }
+}
+
+static int bnxt_alloc_stats(struct bnxt *bp)
+{
+ u32 size, i;
+ struct pci_dev *pdev = bp->pdev;
+
+ size = sizeof(struct ctx_hw_stats);
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
+ &cpr->hw_stats_map,
+ GFP_KERNEL);
+ if (!cpr->hw_stats)
+ return -ENOMEM;
+
+ cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ return 0;
+}
+
+static void bnxt_clear_ring_indices(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_tx_ring_info *txr;
+
+ if (!bnapi)
+ continue;
+
+ cpr = &bnapi->cp_ring;
+ cpr->cp_raw_cons = 0;
+
+ txr = &bnapi->tx_ring;
+ txr->tx_prod = 0;
+ txr->tx_cons = 0;
+
+ rxr = &bnapi->rx_ring;
+ rxr->rx_prod = 0;
+ rxr->rx_agg_prod = 0;
+ rxr->rx_sw_agg_prod = 0;
+ }
+}
+
+static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
+{
+#ifdef CONFIG_RFS_ACCEL
+ int i;
+
+ /* Under rtnl_lock and all our NAPIs have been disabled. It's
+ * safe to delete the hash table.
+ */
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ struct bnxt_ntuple_filter *fltr;
+
+ head = &bp->ntp_fltr_hash_tbl[i];
+ hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+ hlist_del(&fltr->hash);
+ kfree(fltr);
+ }
+ }
+ if (irq_reinit) {
+ kfree(bp->ntp_fltr_bmap);
+ bp->ntp_fltr_bmap = NULL;
+ }
+ bp->ntp_fltr_count = 0;
+#endif
+}
+
+static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+ int i, rc = 0;
+
+ if (!(bp->flags & BNXT_FLAG_RFS))
+ return 0;
+
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
+
+ bp->ntp_fltr_count = 0;
+ bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
+ GFP_KERNEL);
+
+ if (!bp->ntp_fltr_bmap)
+ rc = -ENOMEM;
+
+ return rc;
+#else
+ return 0;
+#endif
+}
+
+static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
+{
+ bnxt_free_vnic_attributes(bp);
+ bnxt_free_tx_rings(bp);
+ bnxt_free_rx_rings(bp);
+ bnxt_free_cp_rings(bp);
+ bnxt_free_ntp_fltrs(bp, irq_re_init);
+ if (irq_re_init) {
+ bnxt_free_stats(bp);
+ bnxt_free_ring_grps(bp);
+ bnxt_free_vnics(bp);
+ kfree(bp->bnapi);
+ bp->bnapi = NULL;
+ } else {
+ bnxt_clear_ring_indices(bp);
+ }
+}
+
+static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
+{
+ int i, rc, size, arr_size;
+ void *bnapi;
+
+ if (irq_re_init) {
+ /* Allocate bnapi mem pointer array and mem block for
+ * all queues
+ */
+ arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
+ bp->cp_nr_rings);
+ size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
+ bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
+ if (!bnapi)
+ return -ENOMEM;
+
+ bp->bnapi = bnapi;
+ bnapi += arr_size;
+ for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
+ bp->bnapi[i] = bnapi;
+ bp->bnapi[i]->index = i;
+ bp->bnapi[i]->bp = bp;
+ }
+
+ rc = bnxt_alloc_stats(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_ntp_fltrs(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_vnics(bp);
+ if (rc)
+ goto alloc_mem_err;
+ }
+
+ bnxt_init_ring_struct(bp);
+
+ rc = bnxt_alloc_rx_rings(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_tx_rings(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_cp_rings(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
+ BNXT_VNIC_UCAST_FLAG;
+ rc = bnxt_alloc_vnic_attributes(bp);
+ if (rc)
+ goto alloc_mem_err;
+ return 0;
+
+alloc_mem_err:
+ bnxt_free_mem(bp, true);
+ return rc;
+}
+
+void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
+ u16 cmpl_ring, u16 target_id)
+{
+ struct hwrm_cmd_req_hdr *req = request;
+
+ req->cmpl_ring_req_type =
+ cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT));
+ req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT);
+ req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
+}
+
+int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+ int i, intr_process, rc;
+ struct hwrm_cmd_req_hdr *req = msg;
+ u32 *data = msg;
+ __le32 *resp_len, *valid;
+ u16 cp_ring_id, len = 0;
+ struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
+
+ req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++);
+ memset(resp, 0, PAGE_SIZE);
+ cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) &
+ HWRM_CMPL_RING_MASK) >>
+ HWRM_CMPL_RING_SFT;
+ intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
+
+ /* Write request msg to hwrm channel */
+ __iowrite32_copy(bp->bar0, data, msg_len / 4);
+
+ /* currently supports only one outstanding message */
+ if (intr_process)
+ bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) &
+ HWRM_SEQ_ID_MASK;
+
+ /* Ring channel doorbell */
+ writel(1, bp->bar0 + 0x100);
+
+ i = 0;
+ if (intr_process) {
+ /* Wait until hwrm response cmpl interrupt is processed */
+ while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
+ i++ < timeout) {
+ usleep_range(600, 800);
+ }
+
+ if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
+ netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
+ req->cmpl_ring_req_type);
+ return -1;
+ }
+ } else {
+ /* Check if response len is updated */
+ resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
+ for (i = 0; i < timeout; i++) {
+ len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
+ HWRM_RESP_LEN_SFT;
+ if (len)
+ break;
+ usleep_range(600, 800);
+ }
+
+ if (i >= timeout) {
+ netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
+ timeout, req->cmpl_ring_req_type,
+ req->target_id_seq_id, *resp_len);
+ return -1;
+ }
+
+ /* Last word of resp contains valid bit */
+ valid = bp->hwrm_cmd_resp_addr + len - 4;
+ for (i = 0; i < timeout; i++) {
+ if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
+ break;
+ usleep_range(600, 800);
+ }
+
+ if (i >= timeout) {
+ netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
+ timeout, req->cmpl_ring_req_type,
+ req->target_id_seq_id, len, *valid);
+ return -1;
+ }
+ }
+
+ rc = le16_to_cpu(resp->error_code);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
+ le16_to_cpu(resp->req_type),
+ le16_to_cpu(resp->seq_id), rc);
+ return rc;
+ }
+ return 0;
+}
+
+int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+ int rc;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, msg, msg_len, timeout);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
+{
+ struct hwrm_func_drv_rgtr_input req = {0};
+ int i;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
+
+ req.enables =
+ cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+ FUNC_DRV_RGTR_REQ_ENABLES_VER |
+ FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+
+ /* TODO: current async event fwd bits are not defined and the firmware
+ * only checks if it is non-zero to enable async event forwarding
+ */
+ req.async_event_fwd[0] |= cpu_to_le32(1);
+ req.os_type = cpu_to_le16(1);
+ req.ver_maj = DRV_VER_MAJ;
+ req.ver_min = DRV_VER_MIN;
+ req.ver_upd = DRV_VER_UPD;
+
+ if (BNXT_PF(bp)) {
+ unsigned long vf_req_snif_bmap[4];
+ u32 *data = (u32 *)vf_req_snif_bmap;
+
+ memset(vf_req_snif_bmap, 0, 32);
+ for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
+ __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
+
+ for (i = 0; i < 8; i++) {
+ req.vf_req_fwd[i] = cpu_to_le32(*data);
+ data++;
+ }
+ req.enables |=
+ cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
+ }
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
+{
+ u32 rc = 0;
+ struct hwrm_tunnel_dst_port_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
+ req.tunnel_type = tunnel_type;
+
+ switch (tunnel_type) {
+ case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
+ req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
+ break;
+ case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
+ req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
+ break;
+ default:
+ break;
+ }
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
+ rc);
+ return rc;
+}
+
+static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
+ u8 tunnel_type)
+{
+ u32 rc = 0;
+ struct hwrm_tunnel_dst_port_alloc_input req = {0};
+ struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
+
+ req.tunnel_type = tunnel_type;
+ req.tunnel_dst_port_val = port;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
+ rc);
+ goto err_out;
+ }
+
+ if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN)
+ bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+
+ else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE)
+ bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
+err_out:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
+{
+ struct hwrm_cfa_l2_set_rx_mask_input req = {0};
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
+ req.dflt_vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+ req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+ req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ req.mask = cpu_to_le32(vnic->rx_mask);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr)
+{
+ struct hwrm_cfa_ntuple_filter_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
+ req.ntuple_filter_id = fltr->filter_id;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+#define BNXT_NTP_FLTR_FLAGS \
+ (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
+ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID)
+
+static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr)
+{
+ int rc = 0;
+ struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
+ struct hwrm_cfa_ntuple_filter_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ struct flow_keys *keys = &fltr->fkeys;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
+ req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0];
+
+ req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+
+ req.ethertype = htons(ETH_P_IP);
+ memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
+ req.ipaddr_type = 4;
+ req.ip_protocol = keys->basic.ip_proto;
+
+ req.src_ipaddr[0] = keys->addrs.v4addrs.src;
+ req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+ req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+
+ req.src_port = keys->ports.src;
+ req.src_port_mask = cpu_to_be16(0xffff);
+ req.dst_port = keys->ports.dst;
+ req.dst_port_mask = cpu_to_be16(0xffff);
+
+ req.dst_vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ fltr->filter_id = resp->ntuple_filter_id;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+#endif
+
+static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
+ u8 *mac_addr)
+{
+ u32 rc = 0;
+ struct hwrm_cfa_l2_filter_alloc_input req = {0};
+ struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
+ req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
+ CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+ req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
+ req.enables =
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
+ memcpy(req.l2_addr, mac_addr, ETH_ALEN);
+ req.l2_addr_mask[0] = 0xff;
+ req.l2_addr_mask[1] = 0xff;
+ req.l2_addr_mask[2] = 0xff;
+ req.l2_addr_mask[3] = 0xff;
+ req.l2_addr_mask[4] = 0xff;
+ req.l2_addr_mask[5] = 0xff;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
+ resp->l2_filter_id;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
+{
+ u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
+ int rc = 0;
+
+ /* Any associated ntuple filters will also be cleared by firmware. */
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < num_of_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ for (j = 0; j < vnic->uc_filter_count; j++) {
+ struct hwrm_cfa_l2_filter_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req,
+ HWRM_CFA_L2_FILTER_FREE, -1, -1);
+
+ req.l2_filter_id = vnic->fw_l2_filter_id[j];
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ }
+ vnic->uc_filter_count = 0;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ return rc;
+}
+
+static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct hwrm_vnic_tpa_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
+
+ if (tpa_flags) {
+ u16 mss = bp->dev->mtu - 40;
+ u32 nsegs, n, segs = 0, flags;
+
+ flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
+ VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
+ VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
+ VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
+ VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
+ if (tpa_flags & BNXT_FLAG_GRO)
+ flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
+
+ req.flags = cpu_to_le32(flags);
+
+ req.enables =
+ cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
+ VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS);
+
+ /* Number of segs are log2 units, and first packet is not
+ * included as part of this units.
+ */
+ if (mss <= PAGE_SIZE) {
+ n = PAGE_SIZE / mss;
+ nsegs = (MAX_SKB_FRAGS - 1) * n;
+ } else {
+ n = mss / PAGE_SIZE;
+ if (mss & (PAGE_SIZE - 1))
+ n++;
+ nsegs = (MAX_SKB_FRAGS - n) / n;
+ }
+
+ segs = ilog2(nsegs);
+ req.max_agg_segs = cpu_to_le16(segs);
+ req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
+ }
+ req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
+{
+ u32 i, j, max_rings;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct hwrm_vnic_rss_cfg_input req = {0};
+
+ if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
+ if (set_rss) {
+ vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
+ BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
+ BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
+ BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
+
+ req.hash_type = cpu_to_le32(vnic->hash_type);
+
+ if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+ max_rings = bp->rx_nr_rings;
+ else
+ max_rings = 1;
+
+ /* Fill the RSS indirection table with ring group ids */
+ for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
+ if (j == max_rings)
+ j = 0;
+ vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
+ }
+
+ req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+ req.hash_key_tbl_addr =
+ cpu_to_le64(vnic->rss_hash_key_dma_addr);
+ }
+ req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct hwrm_vnic_plcmodes_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
+ req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+ req.enables =
+ cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
+ VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+ /* thresholds not implemented in firmware yet */
+ req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
+ req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
+{
+ struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
+ req.rss_cos_lb_ctx_id =
+ cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
+
+ hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+}
+
+static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
+ bnxt_hwrm_vnic_ctx_free_one(bp, i);
+ }
+ bp->rsscos_nr_ctxs = 0;
+}
+
+static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
+{
+ int rc;
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
+ -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
+ le16_to_cpu(resp->rss_cos_lb_ctx_id);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ return rc;
+}
+
+static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
+{
+ int grp_idx = 0;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct hwrm_vnic_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
+ /* Only RSS support for now TBD: COS & LB */
+ req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
+ VNIC_CFG_REQ_ENABLES_RSS_RULE);
+ req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
+ req.cos_rule = cpu_to_le16(0xffff);
+ if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+ grp_idx = 0;
+ else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
+ grp_idx = vnic_id - 1;
+
+ req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
+
+ req.lb_rule = cpu_to_le16(0xffff);
+ req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN);
+
+ if (bp->flags & BNXT_FLAG_STRIP_VLAN)
+ req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
+{
+ u32 rc = 0;
+
+ if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
+ struct hwrm_vnic_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
+ req.vnic_id =
+ cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return rc;
+ bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
+ }
+ return rc;
+}
+
+static void bnxt_hwrm_vnic_free(struct bnxt *bp)
+{
+ u16 i;
+
+ for (i = 0; i < bp->nr_vnics; i++)
+ bnxt_hwrm_vnic_free_one(bp, i);
+}
+
+static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id,
+ u16 end_grp_id)
+{
+ u32 rc = 0, i, j;
+ struct hwrm_vnic_alloc_input req = {0};
+ struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ /* map ring groups to this vnic */
+ for (i = start_grp_id, j = 0; i < end_grp_id; i++, j++) {
+ if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) {
+ netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
+ j, (end_grp_id - start_grp_id));
+ break;
+ }
+ bp->vnic_info[vnic_id].fw_grp_ids[j] =
+ bp->grp_info[i].fw_grp_id;
+ }
+
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+ if (vnic_id == 0)
+ req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
+{
+ u16 i;
+ u32 rc = 0;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct hwrm_ring_grp_alloc_input req = {0};
+ struct hwrm_ring_grp_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
+
+ req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+ req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id);
+ req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id);
+ req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+
+ bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
+{
+ u16 i;
+ u32 rc = 0;
+ struct hwrm_ring_grp_free_input req = {0};
+
+ if (!bp->grp_info)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
+ continue;
+ req.ring_group_id =
+ cpu_to_le32(bp->grp_info[i].fw_grp_id);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
+ struct bnxt_ring_struct *ring,
+ u32 ring_type, u32 map_index,
+ u32 stats_ctx_id)
+{
+ int rc = 0, err = 0;
+ struct hwrm_ring_alloc_input req = {0};
+ struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 ring_id;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
+
+ req.enables = 0;
+ if (ring->nr_pages > 1) {
+ req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
+ /* Page size is in log2 units */
+ req.page_size = BNXT_PAGE_SHIFT;
+ req.page_tbl_depth = 1;
+ } else {
+ req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
+ }
+ req.fbo = 0;
+ /* Association of ring index with doorbell index and MSIX number */
+ req.logical_id = cpu_to_le16(map_index);
+
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
+ /* Association of transmit ring with completion ring */
+ req.cmpl_ring_id =
+ cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
+ req.length = cpu_to_le32(bp->tx_ring_mask + 1);
+ req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
+ req.queue_id = cpu_to_le16(ring->queue_id);
+ break;
+ case HWRM_RING_ALLOC_RX:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req.length = cpu_to_le32(bp->rx_ring_mask + 1);
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
+ req.length = cpu_to_le32(bp->cp_ring_mask + 1);
+ if (bp->flags & BNXT_FLAG_USING_MSIX)
+ req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ break;
+ default:
+ netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
+ ring_type);
+ return -1;
+ }
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ err = le16_to_cpu(resp->error_code);
+ ring_id = le16_to_cpu(resp->ring_id);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (rc || err) {
+ switch (ring_type) {
+ case RING_FREE_REQ_RING_TYPE_CMPL:
+ netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
+ rc, err);
+ return -1;
+
+ case RING_FREE_REQ_RING_TYPE_RX:
+ netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
+ rc, err);
+ return -1;
+
+ case RING_FREE_REQ_RING_TYPE_TX:
+ netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
+ rc, err);
+ return -1;
+
+ default:
+ netdev_err(bp->dev, "Invalid ring\n");
+ return -1;
+ }
+ }
+ ring->fw_ring_id = ring_id;
+ return rc;
+}
+
+static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
+{
+ int i, rc = 0;
+
+ if (bp->cp_nr_rings) {
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring,
+ HWRM_RING_ALLOC_CMPL, i,
+ INVALID_STATS_CTX_ID);
+ if (rc)
+ goto err_out;
+ cpr->cp_doorbell = bp->bar1 + i * 0x80;
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
+ }
+ }
+
+ if (bp->tx_nr_rings) {
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring,
+ HWRM_RING_ALLOC_TX, i,
+ fw_stats_ctx);
+ if (rc)
+ goto err_out;
+ txr->tx_doorbell = bp->bar1 + i * 0x80;
+ }
+ }
+
+ if (bp->rx_nr_rings) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring,
+ HWRM_RING_ALLOC_RX, i,
+ INVALID_STATS_CTX_ID);
+ if (rc)
+ goto err_out;
+ rxr->rx_doorbell = bp->bar1 + i * 0x80;
+ writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
+ }
+ }
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring =
+ &rxr->rx_agg_ring_struct;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring,
+ HWRM_RING_ALLOC_AGG,
+ bp->rx_nr_rings + i,
+ INVALID_STATS_CTX_ID);
+ if (rc)
+ goto err_out;
+
+ rxr->rx_agg_doorbell =
+ bp->bar1 + (bp->rx_nr_rings + i) * 0x80;
+ writel(DB_KEY_RX | rxr->rx_agg_prod,
+ rxr->rx_agg_doorbell);
+ bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id;
+ }
+ }
+err_out:
+ return rc;
+}
+
+static int hwrm_ring_free_send_msg(struct bnxt *bp,
+ struct bnxt_ring_struct *ring,
+ u32 ring_type, int cmpl_ring_id)
+{
+ int rc;
+ struct hwrm_ring_free_input req = {0};
+ struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 error_code;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1);
+ req.ring_type = ring_type;
+ req.ring_id = cpu_to_le16(ring->fw_ring_id);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ error_code = le16_to_cpu(resp->error_code);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (rc || error_code) {
+ switch (ring_type) {
+ case RING_FREE_REQ_RING_TYPE_CMPL:
+ netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
+ rc);
+ return rc;
+ case RING_FREE_REQ_RING_TYPE_RX:
+ netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
+ rc);
+ return rc;
+ case RING_FREE_REQ_RING_TYPE_TX:
+ netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
+ rc);
+ return rc;
+ default:
+ netdev_err(bp->dev, "Invalid ring\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
+{
+ int i, rc = 0;
+
+ if (!bp->bnapi)
+ return 0;
+
+ if (bp->tx_nr_rings) {
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(
+ bp, ring,
+ RING_FREE_REQ_RING_TYPE_TX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+ }
+
+ if (bp->rx_nr_rings) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+ u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(
+ bp, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].rx_fw_ring_id =
+ INVALID_HW_RING_ID;
+ }
+ }
+ }
+
+ if (bp->rx_agg_nr_pages) {
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+ struct bnxt_ring_struct *ring =
+ &rxr->rx_agg_ring_struct;
+ u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(
+ bp, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].agg_fw_ring_id =
+ INVALID_HW_RING_ID;
+ }
+ }
+ }
+
+ if (bp->cp_nr_rings) {
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(
+ bp, ring,
+ RING_FREE_REQ_RING_TYPE_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[i].cp_fw_ring_id =
+ INVALID_HW_RING_ID;
+ }
+ }
+ }
+
+ return rc;
+}
+
+int bnxt_hwrm_set_coal(struct bnxt *bp)
+{
+ int i, rc = 0;
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
+ u16 max_buf, max_buf_irq;
+ u16 buf_tmr, buf_tmr_irq;
+ u32 flags;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
+ -1, -1);
+
+ /* Each rx completion (2 records) should be DMAed immediately */
+ max_buf = min_t(u16, bp->coal_bufs / 4, 2);
+ /* max_buf must not be zero */
+ max_buf = clamp_t(u16, max_buf, 1, 63);
+ max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63);
+ buf_tmr = max_t(u16, bp->coal_ticks / 4, 1);
+ buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1);
+
+ flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+
+ /* RING_IDLE generates more IRQs for lower latency. Enable it only
+ * if coal_ticks is less than 25 us.
+ */
+ if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25)
+ flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
+
+ req.flags = cpu_to_le16(flags);
+ req.num_cmpl_dma_aggr = cpu_to_le16(max_buf);
+ req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq);
+ req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr);
+ req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq);
+ req.int_lat_tmr_min = cpu_to_le16(buf_tmr);
+ req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks);
+ req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
+{
+ int rc = 0, i;
+ struct hwrm_stat_ctx_free_input req = {0};
+
+ if (!bp->bnapi)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
+ req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+
+ cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
+{
+ int rc = 0, i;
+ struct hwrm_stat_ctx_alloc_input req = {0};
+ struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
+
+ req.update_period_ms = cpu_to_le32(1000);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+
+ cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
+
+ bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return 0;
+}
+
+static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_func_qcaps_input req = {0};
+ struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto hwrm_func_qcaps_exit;
+
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ pf->fw_fid = le16_to_cpu(resp->fid);
+ pf->port_id = le16_to_cpu(resp->port_id);
+ memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+ pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ pf->max_pf_tx_rings = pf->max_tx_rings;
+ pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ pf->max_pf_rx_rings = pf->max_rx_rings;
+ pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ pf->max_vnics = le16_to_cpu(resp->max_vnics);
+ pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+ pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
+ pf->max_vfs = le16_to_cpu(resp->max_vfs);
+ pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
+ pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
+ pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
+ pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
+ pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
+ pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+ } else {
+#ifdef CONFIG_BNXT_SRIOV
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ vf->fw_fid = le16_to_cpu(resp->fid);
+ memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+ if (!is_valid_ether_addr(vf->mac_addr))
+ random_ether_addr(vf->mac_addr);
+
+ vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+ vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+ vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+ vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+ vf->max_vnics = le16_to_cpu(resp->max_vnics);
+ vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+#endif
+ }
+
+ bp->tx_push_thresh = 0;
+ if (resp->flags &
+ cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+ bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+
+hwrm_func_qcaps_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_func_reset(struct bnxt *bp)
+{
+ struct hwrm_func_reset_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
+ req.enables = 0;
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
+}
+
+static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_queue_qportcfg_input req = {0};
+ struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ u8 i, *qptr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto qportcfg_exit;
+
+ if (!resp->max_configurable_queues) {
+ rc = -EINVAL;
+ goto qportcfg_exit;
+ }
+ bp->max_tc = resp->max_configurable_queues;
+ if (bp->max_tc > BNXT_MAX_QUEUE)
+ bp->max_tc = BNXT_MAX_QUEUE;
+
+ qptr = &resp->queue_id0;
+ for (i = 0; i < bp->max_tc; i++) {
+ bp->q_info[i].queue_id = *qptr++;
+ bp->q_info[i].queue_profile = *qptr++;
+ }
+
+qportcfg_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_ver_get(struct bnxt *bp)
+{
+ int rc;
+ struct hwrm_ver_get_input req = {0};
+ struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
+ req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req.hwrm_intf_min = HWRM_VERSION_MINOR;
+ req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto hwrm_ver_get_exit;
+
+ memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
+
+ if (req.hwrm_intf_maj != resp->hwrm_intf_maj ||
+ req.hwrm_intf_min != resp->hwrm_intf_min ||
+ req.hwrm_intf_upd != resp->hwrm_intf_upd) {
+ netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n",
+ resp->hwrm_intf_maj, resp->hwrm_intf_min,
+ resp->hwrm_intf_upd, req.hwrm_intf_maj,
+ req.hwrm_intf_min, req.hwrm_intf_upd);
+ netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n");
+ }
+ snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
+ resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
+ resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
+
+hwrm_ver_get_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
+{
+ if (bp->vxlan_port_cnt) {
+ bnxt_hwrm_tunnel_dst_port_free(
+ bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+ }
+ bp->vxlan_port_cnt = 0;
+ if (bp->nge_port_cnt) {
+ bnxt_hwrm_tunnel_dst_port_free(
+ bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
+ }
+ bp->nge_port_cnt = 0;
+}
+
+static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
+{
+ int rc, i;
+ u32 tpa_flags = 0;
+
+ if (set_tpa)
+ tpa_flags = bp->flags & BNXT_FLAG_TPA;
+ for (i = 0; i < bp->nr_vnics; i++) {
+ rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
+ rc, i);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->nr_vnics; i++)
+ bnxt_hwrm_vnic_set_rss(bp, i, false);
+}
+
+static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
+ bool irq_re_init)
+{
+ if (bp->vnic_info) {
+ bnxt_hwrm_clear_vnic_filter(bp);
+ /* clear all RSS setting before free vnic ctx */
+ bnxt_hwrm_clear_vnic_rss(bp);
+ bnxt_hwrm_vnic_ctx_free(bp);
+ /* before free the vnic, undo the vnic tpa settings */
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, false);
+ bnxt_hwrm_vnic_free(bp);
+ }
+ bnxt_hwrm_ring_free(bp, close_path);
+ bnxt_hwrm_ring_grp_free(bp);
+ if (irq_re_init) {
+ bnxt_hwrm_stat_ctx_free(bp);
+ bnxt_hwrm_free_tunnel_ports(bp);
+ }
+}
+
+static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+{
+ int rc;
+
+ /* allocate context for vnic */
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+ vnic_id, rc);
+ goto vnic_setup_err;
+ }
+ bp->rsscos_nr_ctxs++;
+
+ /* configure default vnic, ring grp */
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
+ vnic_id, rc);
+ goto vnic_setup_err;
+ }
+
+ /* Enable RSS hashing on vnic */
+ rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
+ vnic_id, rc);
+ goto vnic_setup_err;
+ }
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
+ vnic_id, rc);
+ }
+ }
+
+vnic_setup_err:
+ return rc;
+}
+
+static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+ int i, rc = 0;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ u16 vnic_id = i + 1;
+ u16 ring_id = i;
+
+ if (vnic_id >= bp->nr_vnics)
+ break;
+
+ bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+ vnic_id, rc);
+ break;
+ }
+ rc = bnxt_setup_vnic(bp, vnic_id);
+ if (rc)
+ break;
+ }
+ return rc;
+#else
+ return 0;
+#endif
+}
+
+static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
+{
+ int rc = 0;
+
+ if (irq_re_init) {
+ rc = bnxt_hwrm_stat_ctx_alloc(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
+ rc);
+ goto err_out;
+ }
+ }
+
+ rc = bnxt_hwrm_ring_alloc(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_hwrm_ring_grp_alloc(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
+ goto err_out;
+ }
+
+ /* default vnic 0 */
+ rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_setup_vnic(bp, 0);
+ if (rc)
+ goto err_out;
+
+ if (bp->flags & BNXT_FLAG_RFS) {
+ rc = bnxt_alloc_rfs_vnics(bp);
+ if (rc)
+ goto err_out;
+ }
+
+ if (bp->flags & BNXT_FLAG_TPA) {
+ rc = bnxt_set_tpa(bp, true);
+ if (rc)
+ goto err_out;
+ }
+
+ if (BNXT_VF(bp))
+ bnxt_update_vf_mac(bp);
+
+ /* Filter for default vnic 0 */
+ rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
+ if (rc) {
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+ goto err_out;
+ }
+ bp->vnic_info[0].uc_filter_count = 1;
+
+ bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST |
+ CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+
+ if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+ bp->vnic_info[0].rx_mask |=
+ CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ if (rc) {
+ netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_hwrm_set_coal(bp);
+ if (rc)
+ netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
+ rc);
+
+ return 0;
+
+err_out:
+ bnxt_hwrm_resource_free(bp, 0, true);
+
+ return rc;
+}
+
+static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
+{
+ bnxt_hwrm_resource_free(bp, 1, irq_re_init);
+ return 0;
+}
+
+static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
+{
+ bnxt_init_rx_rings(bp);
+ bnxt_init_tx_rings(bp);
+ bnxt_init_ring_grps(bp, irq_re_init);
+ bnxt_init_vnics(bp);
+
+ return bnxt_init_chip(bp, irq_re_init);
+}
+
+static void bnxt_disable_int(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ }
+}
+
+static void bnxt_enable_int(struct bnxt *bp)
+{
+ int i;
+
+ atomic_set(&bp->intr_sem, 0);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ }
+}
+
+static int bnxt_set_real_num_queues(struct bnxt *bp)
+{
+ int rc;
+ struct net_device *dev = bp->dev;
+
+ rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
+ if (rc)
+ return rc;
+
+ rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (bp->rx_nr_rings)
+ dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
+ if (!dev->rx_cpu_rmap)
+ rc = -ENOMEM;
+#endif
+
+ return rc;
+}
+
+static int bnxt_setup_msix(struct bnxt *bp)
+{
+ struct msix_entry *msix_ent;
+ struct net_device *dev = bp->dev;
+ int i, total_vecs, rc = 0;
+ const int len = sizeof(bp->irq_tbl[0].name);
+
+ bp->flags &= ~BNXT_FLAG_USING_MSIX;
+ total_vecs = bp->cp_nr_rings;
+
+ msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
+ if (!msix_ent)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vecs; i++) {
+ msix_ent[i].entry = i;
+ msix_ent[i].vector = 0;
+ }
+
+ total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs);
+ if (total_vecs < 0) {
+ rc = -ENODEV;
+ goto msix_setup_exit;
+ }
+
+ bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
+ if (bp->irq_tbl) {
+ int tcs;
+
+ /* Trim rings based upon num of vectors allocated */
+ bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings);
+ bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings);
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ tcs = netdev_get_num_tc(dev);
+ if (tcs > 1) {
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
+ if (bp->tx_nr_rings_per_tc == 0) {
+ netdev_reset_tc(dev);
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ } else {
+ int i, off, count;
+
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+ for (i = 0; i < tcs; i++) {
+ count = bp->tx_nr_rings_per_tc;
+ off = i * count;
+ netdev_set_tc_queue(dev, i, count, off);
+ }
+ }
+ }
+ bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ bp->irq_tbl[i].vector = msix_ent[i].vector;
+ snprintf(bp->irq_tbl[i].name, len,
+ "%s-%s-%d", dev->name, "TxRx", i);
+ bp->irq_tbl[i].handler = bnxt_msix;
+ }
+ rc = bnxt_set_real_num_queues(bp);
+ if (rc)
+ goto msix_setup_exit;
+ } else {
+ rc = -ENOMEM;
+ goto msix_setup_exit;
+ }
+ bp->flags |= BNXT_FLAG_USING_MSIX;
+ kfree(msix_ent);
+ return 0;
+
+msix_setup_exit:
+ netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
+ pci_disable_msix(bp->pdev);
+ kfree(msix_ent);
+ return rc;
+}
+
+static int bnxt_setup_inta(struct bnxt *bp)
+{
+ int rc;
+ const int len = sizeof(bp->irq_tbl[0].name);
+
+ if (netdev_get_num_tc(bp->dev))
+ netdev_reset_tc(bp->dev);
+
+ bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
+ if (!bp->irq_tbl) {
+ rc = -ENOMEM;
+ return rc;
+ }
+ bp->rx_nr_rings = 1;
+ bp->tx_nr_rings = 1;
+ bp->cp_nr_rings = 1;
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->irq_tbl[0].vector = bp->pdev->irq;
+ snprintf(bp->irq_tbl[0].name, len,
+ "%s-%s-%d", bp->dev->name, "TxRx", 0);
+ bp->irq_tbl[0].handler = bnxt_inta;
+ rc = bnxt_set_real_num_queues(bp);
+ return rc;
+}
+
+static int bnxt_setup_int_mode(struct bnxt *bp)
+{
+ int rc = 0;
+
+ if (bp->flags & BNXT_FLAG_MSIX_CAP)
+ rc = bnxt_setup_msix(bp);
+
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+ /* fallback to INTA */
+ rc = bnxt_setup_inta(bp);
+ }
+ return rc;
+}
+
+static void bnxt_free_irq(struct bnxt *bp)
+{
+ struct bnxt_irq *irq;
+ int i;
+
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
+ bp->dev->rx_cpu_rmap = NULL;
+#endif
+ if (!bp->irq_tbl)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ irq = &bp->irq_tbl[i];
+ if (irq->requested)
+ free_irq(irq->vector, bp->bnapi[i]);
+ irq->requested = 0;
+ }
+ if (bp->flags & BNXT_FLAG_USING_MSIX)
+ pci_disable_msix(bp->pdev);
+ kfree(bp->irq_tbl);
+ bp->irq_tbl = NULL;
+}
+
+static int bnxt_request_irq(struct bnxt *bp)
+{
+ int i, rc = 0;
+ unsigned long flags = 0;
+#ifdef CONFIG_RFS_ACCEL
+ struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
+#endif
+
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX))
+ flags = IRQF_SHARED;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_irq *irq = &bp->irq_tbl[i];
+#ifdef CONFIG_RFS_ACCEL
+ if (rmap && (i < bp->rx_nr_rings)) {
+ rc = irq_cpu_rmap_add(rmap, irq->vector);
+ if (rc)
+ netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
+ i);
+ }
+#endif
+ rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+ bp->bnapi[i]);
+ if (rc)
+ break;
+
+ irq->requested = 1;
+ }
+ return rc;
+}
+
+static void bnxt_del_napi(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+
+ napi_hash_del(&bnapi->napi);
+ netif_napi_del(&bnapi->napi);
+ }
+}
+
+static void bnxt_init_napi(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_napi *bnapi;
+
+ if (bp->flags & BNXT_FLAG_USING_MSIX) {
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ netif_napi_add(bp->dev, &bnapi->napi,
+ bnxt_poll, 64);
+ napi_hash_add(&bnapi->napi);
+ }
+ } else {
+ bnapi = bp->bnapi[0];
+ netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
+ napi_hash_add(&bnapi->napi);
+ }
+}
+
+static void bnxt_disable_napi(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ napi_disable(&bp->bnapi[i]->napi);
+ bnxt_disable_poll(bp->bnapi[i]);
+ }
+}
+
+static void bnxt_enable_napi(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ bnxt_enable_poll(bp->bnapi[i]);
+ napi_enable(&bp->bnapi[i]->napi);
+ }
+}
+
+static void bnxt_tx_disable(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_napi *bnapi;
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+
+ if (bp->bnapi) {
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ txr = &bnapi->tx_ring;
+ txq = netdev_get_tx_queue(bp->dev, i);
+ __netif_tx_lock(txq, smp_processor_id());
+ txr->dev_state = BNXT_DEV_STATE_CLOSING;
+ __netif_tx_unlock(txq);
+ }
+ }
+ /* Stop all TX queues */
+ netif_tx_disable(bp->dev);
+ netif_carrier_off(bp->dev);
+}
+
+static void bnxt_tx_enable(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_napi *bnapi;
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ txr = &bnapi->tx_ring;
+ txq = netdev_get_tx_queue(bp->dev, i);
+ txr->dev_state = 0;
+ }
+ netif_tx_wake_all_queues(bp->dev);
+ if (bp->link_info.link_up)
+ netif_carrier_on(bp->dev);
+}
+
+static void bnxt_report_link(struct bnxt *bp)
+{
+ if (bp->link_info.link_up) {
+ const char *duplex;
+ const char *flow_ctrl;
+ u16 speed;
+
+ netif_carrier_on(bp->dev);
+ if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
+ duplex = "full";
+ else
+ duplex = "half";
+ if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
+ flow_ctrl = "ON - receive & transmit";
+ else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
+ flow_ctrl = "ON - transmit";
+ else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
+ flow_ctrl = "ON - receive";
+ else
+ flow_ctrl = "none";
+ speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+ netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
+ speed, duplex, flow_ctrl);
+ } else {
+ netif_carrier_off(bp->dev);
+ netdev_err(bp->dev, "NIC Link is Down\n");
+ }
+}
+
+static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
+{
+ int rc = 0;
+ struct bnxt_link_info *link_info = &bp->link_info;
+ struct hwrm_port_phy_qcfg_input req = {0};
+ struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ u8 link_up = link_info->link_up;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+ }
+
+ memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
+ link_info->phy_link_status = resp->link;
+ link_info->duplex = resp->duplex;
+ link_info->pause = resp->pause;
+ link_info->auto_mode = resp->auto_mode;
+ link_info->auto_pause_setting = resp->auto_pause;
+ link_info->force_pause_setting = resp->force_pause;
+ link_info->duplex_setting = resp->duplex_setting;
+ if (link_info->phy_link_status == BNXT_LINK_LINK)
+ link_info->link_speed = le16_to_cpu(resp->link_speed);
+ else
+ link_info->link_speed = 0;
+ link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
+ link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
+ link_info->support_speeds = le16_to_cpu(resp->support_speeds);
+ link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
+ link_info->preemphasis = le32_to_cpu(resp->preemphasis);
+ link_info->phy_ver[0] = resp->phy_maj;
+ link_info->phy_ver[1] = resp->phy_min;
+ link_info->phy_ver[2] = resp->phy_bld;
+ link_info->media_type = resp->media_type;
+ link_info->transceiver = resp->transceiver_type;
+ link_info->phy_addr = resp->phy_addr;
+
+ /* TODO: need to add more logic to report VF link */
+ if (chng_link_state) {
+ if (link_info->phy_link_status == BNXT_LINK_LINK)
+ link_info->link_up = 1;
+ else
+ link_info->link_up = 0;
+ if (link_up != link_info->link_up)
+ bnxt_report_link(bp);
+ } else {
+ /* alwasy link down if not require to update link state */
+ link_info->link_up = 0;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return 0;
+}
+
+static void
+bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
+{
+ if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
+ if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
+ req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+ if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
+ req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+ req->enables |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
+ } else {
+ if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
+ req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
+ if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
+ req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
+ req->enables |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
+ }
+}
+
+static void bnxt_hwrm_set_link_common(struct bnxt *bp,
+ struct hwrm_port_phy_cfg_input *req)
+{
+ u8 autoneg = bp->link_info.autoneg;
+ u16 fw_link_speed = bp->link_info.req_link_speed;
+ u32 advertising = bp->link_info.advertising;
+
+ if (autoneg & BNXT_AUTONEG_SPEED) {
+ req->auto_mode |=
+ PORT_PHY_CFG_REQ_AUTO_MODE_MASK;
+
+ req->enables |= cpu_to_le32(
+ PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
+ req->auto_link_speed_mask = cpu_to_le16(advertising);
+
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
+ req->flags |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
+ } else {
+ req->force_link_speed = cpu_to_le16(fw_link_speed);
+ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
+ }
+
+ /* currently don't support half duplex */
+ req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
+ /* tell chimp that the setting takes effect immediately */
+ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
+}
+
+int bnxt_hwrm_set_pause(struct bnxt *bp)
+{
+ struct hwrm_port_phy_cfg_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+ bnxt_hwrm_set_pause_common(bp, &req);
+
+ if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
+ bp->link_info.force_link_chng)
+ bnxt_hwrm_set_link_common(bp, &req);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
+ /* since changing of pause setting doesn't trigger any link
+ * change event, the driver needs to update the current pause
+ * result upon successfully return of the phy_cfg command
+ */
+ bp->link_info.pause =
+ bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
+ bp->link_info.auto_pause_setting = 0;
+ if (!bp->link_info.force_link_chng)
+ bnxt_report_link(bp);
+ }
+ bp->link_info.force_link_chng = false;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
+{
+ struct hwrm_port_phy_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+ if (set_pause)
+ bnxt_hwrm_set_pause_common(bp, &req);
+
+ bnxt_hwrm_set_link_common(bp, &req);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_update_phy_setting(struct bnxt *bp)
+{
+ int rc;
+ bool update_link = false;
+ bool update_pause = false;
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ rc = bnxt_update_link(bp, true);
+ if (rc) {
+ netdev_err(bp->dev, "failed to update link (rc: %x)\n",
+ rc);
+ return rc;
+ }
+ if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
+ link_info->auto_pause_setting != link_info->req_flow_ctrl)
+ update_pause = true;
+ if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
+ link_info->force_pause_setting != link_info->req_flow_ctrl)
+ update_pause = true;
+ if (link_info->req_duplex != link_info->duplex_setting)
+ update_link = true;
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+ if (BNXT_AUTO_MODE(link_info->auto_mode))
+ update_link = true;
+ if (link_info->req_link_speed != link_info->force_link_speed)
+ update_link = true;
+ } else {
+ if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
+ update_link = true;
+ if (link_info->advertising != link_info->auto_link_speeds)
+ update_link = true;
+ if (link_info->req_link_speed != link_info->auto_link_speed)
+ update_link = true;
+ }
+
+ if (update_link)
+ rc = bnxt_hwrm_set_link_setting(bp, update_pause);
+ else if (update_pause)
+ rc = bnxt_hwrm_set_pause(bp);
+ if (rc) {
+ netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
+ rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+ int rc = 0;
+
+ netif_carrier_off(bp->dev);
+ if (irq_re_init) {
+ rc = bnxt_setup_int_mode(bp);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
+ rc);
+ return rc;
+ }
+ }
+ if ((bp->flags & BNXT_FLAG_RFS) &&
+ !(bp->flags & BNXT_FLAG_USING_MSIX)) {
+ /* disable RFS if falling back to INTA */
+ bp->dev->hw_features &= ~NETIF_F_NTUPLE;
+ bp->flags &= ~BNXT_FLAG_RFS;
+ }
+
+ rc = bnxt_alloc_mem(bp, irq_re_init);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
+ goto open_err_free_mem;
+ }
+
+ if (irq_re_init) {
+ bnxt_init_napi(bp);
+ rc = bnxt_request_irq(bp);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
+ goto open_err;
+ }
+ }
+
+ bnxt_enable_napi(bp);
+
+ rc = bnxt_init_nic(bp, irq_re_init);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
+ goto open_err;
+ }
+
+ if (link_re_init) {
+ rc = bnxt_update_phy_setting(bp);
+ if (rc)
+ goto open_err;
+ }
+
+ if (irq_re_init) {
+#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
+ vxlan_get_rx_port(bp->dev);
+#endif
+ if (!bnxt_hwrm_tunnel_dst_port_alloc(
+ bp, htons(0x17c1),
+ TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
+ bp->nge_port_cnt = 1;
+ }
+
+ bp->state = BNXT_STATE_OPEN;
+ bnxt_enable_int(bp);
+ /* Enable TX queues */
+ bnxt_tx_enable(bp);
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+ return 0;
+
+open_err:
+ bnxt_disable_napi(bp);
+ bnxt_del_napi(bp);
+
+open_err_free_mem:
+ bnxt_free_skbs(bp);
+ bnxt_free_irq(bp);
+ bnxt_free_mem(bp, true);
+ return rc;
+}
+
+/* rtnl_lock held */
+int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+ int rc = 0;
+
+ rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
+ if (rc) {
+ netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
+ dev_close(bp->dev);
+ }
+ return rc;
+}
+
+static int bnxt_open(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
+ rc);
+ rc = -1;
+ return rc;
+ }
+ return __bnxt_open_nic(bp, true, true);
+}
+
+static void bnxt_disable_int_sync(struct bnxt *bp)
+{
+ int i;
+
+ atomic_inc(&bp->intr_sem);
+ if (!netif_running(bp->dev))
+ return;
+
+ bnxt_disable_int(bp);
+ for (i = 0; i < bp->cp_nr_rings; i++)
+ synchronize_irq(bp->irq_tbl[i].vector);
+}
+
+int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+ int rc = 0;
+
+#ifdef CONFIG_BNXT_SRIOV
+ if (bp->sriov_cfg) {
+ rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
+ !bp->sriov_cfg,
+ BNXT_SRIOV_CFG_WAIT_TMO);
+ if (rc)
+ netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+ }
+#endif
+ /* Change device state to avoid TX queue wake up's */
+ bnxt_tx_disable(bp);
+
+ bp->state = BNXT_STATE_CLOSED;
+ cancel_work_sync(&bp->sp_task);
+
+ /* Flush rings before disabling interrupts */
+ bnxt_shutdown_nic(bp, irq_re_init);
+
+ /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
+
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ del_timer_sync(&bp->timer);
+ bnxt_free_skbs(bp);
+
+ if (irq_re_init) {
+ bnxt_free_irq(bp);
+ bnxt_del_napi(bp);
+ }
+ bnxt_free_mem(bp, irq_re_init);
+ return rc;
+}
+
+static int bnxt_close(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ bnxt_close_nic(bp, true, true);
+ return 0;
+}
+
+/* rtnl_lock held */
+static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ /* fallthru */
+ case SIOCGMIIREG: {
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ return 0;
+ }
+
+ case SIOCSMIIREG:
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ return 0;
+
+ default:
+ /* do nothing */
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static struct rtnl_link_stats64 *
+bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ u32 i;
+ struct bnxt *bp = netdev_priv(dev);
+
+ memset(stats, 0, sizeof(struct rtnl_link_stats64));
+
+ if (!bp->bnapi)
+ return stats;
+
+ /* TODO check if we need to synchronize with bnxt_close path */
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct ctx_hw_stats *hw_stats = cpr->hw_stats;
+
+ stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
+ stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
+ stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
+
+ stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
+ stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
+ stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
+
+ stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
+ stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
+ stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
+
+ stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
+ stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
+ stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
+
+ stats->rx_missed_errors +=
+ le64_to_cpu(hw_stats->rx_discard_pkts);
+
+ stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
+
+ stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts);
+
+ stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
+ }
+
+ return stats;
+}
+
+static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
+{
+ struct net_device *dev = bp->dev;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct netdev_hw_addr *ha;
+ u8 *haddr;
+ int mc_count = 0;
+ bool update = false;
+ int off = 0;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ if (mc_count >= BNXT_MAX_MC_ADDRS) {
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ return false;
+ }
+ haddr = ha->addr;
+ if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
+ memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
+ update = true;
+ }
+ off += ETH_ALEN;
+ mc_count++;
+ }
+ if (mc_count)
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+
+ if (mc_count != vnic->mc_list_count) {
+ vnic->mc_list_count = mc_count;
+ update = true;
+ }
+ return update;
+}
+
+static bool bnxt_uc_list_updated(struct bnxt *bp)
+{
+ struct net_device *dev = bp->dev;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct netdev_hw_addr *ha;
+ int off = 0;
+
+ if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
+ return true;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
+ return true;
+
+ off += ETH_ALEN;
+ }
+ return false;
+}
+
+static void bnxt_set_rx_mode(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ u32 mask = vnic->rx_mask;
+ bool mc_update = false;
+ bool uc_update;
+
+ if (!netif_running(dev))
+ return;
+
+ mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
+ CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
+ CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
+
+ /* Only allow PF to be in promiscuous mode */
+ if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+ mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+ uc_update = bnxt_uc_list_updated(bp);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ } else {
+ mc_update = bnxt_mc_list_updated(bp, &mask);
+ }
+
+ if (mask != vnic->rx_mask || uc_update || mc_update) {
+ vnic->rx_mask = mask;
+
+ set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ }
+}
+
+static void bnxt_cfg_rx_mode(struct bnxt *bp)
+{
+ struct net_device *dev = bp->dev;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct netdev_hw_addr *ha;
+ int i, off = 0, rc;
+ bool uc_update;
+
+ netif_addr_lock_bh(dev);
+ uc_update = bnxt_uc_list_updated(bp);
+ netif_addr_unlock_bh(dev);
+
+ if (!uc_update)
+ goto skip_uc;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 1; i < vnic->uc_filter_count; i++) {
+ struct hwrm_cfa_l2_filter_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
+ -1);
+
+ req.l2_filter_id = vnic->fw_l2_filter_id[i];
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ vnic->uc_filter_count = 1;
+
+ netif_addr_lock_bh(dev);
+ if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+ } else {
+ netdev_for_each_uc_addr(ha, dev) {
+ memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
+ off += ETH_ALEN;
+ vnic->uc_filter_count++;
+ }
+ }
+ netif_addr_unlock_bh(dev);
+
+ for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
+ rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
+ if (rc) {
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
+ rc);
+ vnic->uc_filter_count = i;
+ }
+ }
+
+skip_uc:
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ if (rc)
+ netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
+ rc);
+}
+
+static netdev_features_t bnxt_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ return features;
+}
+
+static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u32 flags = bp->flags;
+ u32 changes;
+ int rc = 0;
+ bool re_init = false;
+ bool update_tpa = false;
+
+ flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
+ if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+ flags |= BNXT_FLAG_GRO;
+ if (features & NETIF_F_LRO)
+ flags |= BNXT_FLAG_LRO;
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ flags |= BNXT_FLAG_STRIP_VLAN;
+
+ if (features & NETIF_F_NTUPLE)
+ flags |= BNXT_FLAG_RFS;
+
+ changes = flags ^ bp->flags;
+ if (changes & BNXT_FLAG_TPA) {
+ update_tpa = true;
+ if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
+ (flags & BNXT_FLAG_TPA) == 0)
+ re_init = true;
+ }
+
+ if (changes & ~BNXT_FLAG_TPA)
+ re_init = true;
+
+ if (flags != bp->flags) {
+ u32 old_flags = bp->flags;
+
+ bp->flags = flags;
+
+ if (!netif_running(dev)) {
+ if (update_tpa)
+ bnxt_set_ring_params(bp);
+ return rc;
+ }
+
+ if (re_init) {
+ bnxt_close_nic(bp, false, false);
+ if (update_tpa)
+ bnxt_set_ring_params(bp);
+
+ return bnxt_open_nic(bp, false, false);
+ }
+ if (update_tpa) {
+ rc = bnxt_set_tpa(bp,
+ (flags & BNXT_FLAG_TPA) ?
+ true : false);
+ if (rc)
+ bp->flags = old_flags;
+ }
+ }
+ return rc;
+}
+
+static void bnxt_dbg_dump_states(struct bnxt *bp)
+{
+ int i;
+ struct bnxt_napi *bnapi;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_cp_ring_info *cpr;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ txr = &bnapi->tx_ring;
+ rxr = &bnapi->rx_ring;
+ cpr = &bnapi->cp_ring;
+ if (netif_msg_drv(bp)) {
+ netdev_info(bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
+ i, txr->tx_ring_struct.fw_ring_id,
+ txr->tx_prod, txr->tx_cons);
+ netdev_info(bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
+ i, rxr->rx_ring_struct.fw_ring_id,
+ rxr->rx_prod,
+ rxr->rx_agg_ring_struct.fw_ring_id,
+ rxr->rx_agg_prod, rxr->rx_sw_agg_prod);
+ netdev_info(bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
+ i, cpr->cp_ring_struct.fw_ring_id,
+ cpr->cp_raw_cons);
+ }
+ }
+}
+
+static void bnxt_reset_task(struct bnxt *bp)
+{
+ bnxt_dbg_dump_states(bp);
+ if (netif_running(bp->dev))
+ bnxt_tx_disable(bp); /* prevent tx timout again */
+}
+
+static void bnxt_tx_timeout(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
+ set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bnxt_poll_controller(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_irq *irq = &bp->irq_tbl[i];
+
+ disable_irq(irq->vector);
+ irq->handler(irq->vector, bp->bnapi[i]);
+ enable_irq(irq->vector);
+ }
+}
+#endif
+
+static void bnxt_timer(unsigned long data)
+{
+ struct bnxt *bp = (struct bnxt *)data;
+ struct net_device *dev = bp->dev;
+
+ if (!netif_running(dev))
+ return;
+
+ if (atomic_read(&bp->intr_sem) != 0)
+ goto bnxt_restart_timer;
+
+bnxt_restart_timer:
+ mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+static void bnxt_cfg_ntp_filters(struct bnxt *);
+
+static void bnxt_sp_task(struct work_struct *work)
+{
+ struct bnxt *bp = container_of(work, struct bnxt, sp_task);
+ int rc;
+
+ if (bp->state != BNXT_STATE_OPEN)
+ return;
+
+ if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
+ bnxt_cfg_rx_mode(bp);
+
+ if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
+ bnxt_cfg_ntp_filters(bp);
+ if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+ rc = bnxt_update_link(bp, true);
+ if (rc)
+ netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+ rc);
+ }
+ if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
+ bnxt_hwrm_exec_fwd_req(bp);
+ if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
+ bnxt_hwrm_tunnel_dst_port_alloc(
+ bp, bp->vxlan_port,
+ TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+ }
+ if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
+ bnxt_hwrm_tunnel_dst_port_free(
+ bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+ }
+ if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
+ bnxt_reset_task(bp);
+}
+
+static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
+{
+ int rc;
+ struct bnxt *bp = netdev_priv(dev);
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* enable device (incl. PCI PM wakeup), and bus-mastering */
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ goto init_err;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev,
+ "Cannot find PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto init_err_disable;
+ }
+
+ rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+ goto init_err_disable;
+ }
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(&pdev->dev, "System does not support DMA, aborting\n");
+ goto init_err_disable;
+ }
+
+ pci_set_master(pdev);
+
+ bp->dev = dev;
+ bp->pdev = pdev;
+
+ bp->bar0 = pci_ioremap_bar(pdev, 0);
+ if (!bp->bar0) {
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
+ rc = -ENOMEM;
+ goto init_err_release;
+ }
+
+ bp->bar1 = pci_ioremap_bar(pdev, 2);
+ if (!bp->bar1) {
+ dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
+ rc = -ENOMEM;
+ goto init_err_release;
+ }
+
+ bp->bar2 = pci_ioremap_bar(pdev, 4);
+ if (!bp->bar2) {
+ dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
+ rc = -ENOMEM;
+ goto init_err_release;
+ }
+
+ INIT_WORK(&bp->sp_task, bnxt_sp_task);
+
+ spin_lock_init(&bp->ntp_fltr_lock);
+
+ bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
+ bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
+
+ bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4);
+ bp->coal_bufs = 20;
+ bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1);
+ bp->coal_bufs_irq = 2;
+
+ init_timer(&bp->timer);
+ bp->timer.data = (unsigned long)bp;
+ bp->timer.function = bnxt_timer;
+ bp->current_interval = BNXT_TIMER_INTERVAL;
+
+ bp->state = BNXT_STATE_CLOSED;
+
+ return 0;
+
+init_err_release:
+ if (bp->bar2) {
+ pci_iounmap(pdev, bp->bar2);
+ bp->bar2 = NULL;
+ }
+
+ if (bp->bar1) {
+ pci_iounmap(pdev, bp->bar1);
+ bp->bar1 = NULL;
+ }
+
+ if (bp->bar0) {
+ pci_iounmap(pdev, bp->bar0);
+ bp->bar0 = NULL;
+ }
+
+ pci_release_regions(pdev);
+
+init_err_disable:
+ pci_disable_device(pdev);
+
+init_err:
+ return rc;
+}
+
+/* rtnl_lock held */
+static int bnxt_change_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ return 0;
+}
+
+/* rtnl_lock held */
+static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (new_mtu < 60 || new_mtu > 9000)
+ return -EINVAL;
+
+ if (netif_running(dev))
+ bnxt_close_nic(bp, false, false);
+
+ dev->mtu = new_mtu;
+ bnxt_set_ring_params(bp);
+
+ if (netif_running(dev))
+ return bnxt_open_nic(bp, false, false);
+
+ return 0;
+}
+
+static int bnxt_setup_tc(struct net_device *dev, u8 tc)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (tc > bp->max_tc) {
+ netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
+ tc, bp->max_tc);
+ return -EINVAL;
+ }
+
+ if (netdev_get_num_tc(dev) == tc)
+ return 0;
+
+ if (tc) {
+ int max_rx_rings, max_tx_rings;
+
+ bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+ if (bp->tx_nr_rings_per_tc * tc > max_tx_rings)
+ return -ENOMEM;
+ }
+
+ /* Needs to close the device and do hw resource re-allocations */
+ if (netif_running(bp->dev))
+ bnxt_close_nic(bp, true, false);
+
+ if (tc) {
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
+ netdev_set_num_tc(dev, tc);
+ } else {
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ netdev_reset_tc(dev);
+ }
+ bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+
+ if (netif_running(bp->dev))
+ return bnxt_open_nic(bp, true, false);
+
+ return 0;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
+ struct bnxt_ntuple_filter *f2)
+{
+ struct flow_keys *keys1 = &f1->fkeys;
+ struct flow_keys *keys2 = &f2->fkeys;
+
+ if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
+ keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
+ keys1->ports.ports == keys2->ports.ports &&
+ keys1->basic.ip_proto == keys2->basic.ip_proto &&
+ keys1->basic.n_proto == keys2->basic.n_proto &&
+ ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr))
+ return true;
+
+ return false;
+}
+
+static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ntuple_filter *fltr, *new_fltr;
+ struct flow_keys *fkeys;
+ struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
+ int rc = 0, idx;
+ struct hlist_head *head;
+
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
+ new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
+ if (!new_fltr)
+ return -ENOMEM;
+
+ fkeys = &new_fltr->fkeys;
+ if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
+
+ if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
+ ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
+ (fkeys->basic.ip_proto != IPPROTO_UDP))) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
+
+ memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
+
+ idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
+ head = &bp->ntp_fltr_hash_tbl[idx];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(fltr, head, hash) {
+ if (bnxt_fltr_match(fltr, new_fltr)) {
+ rcu_read_unlock();
+ rc = 0;
+ goto err_free;
+ }
+ }
+ rcu_read_unlock();
+
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ new_fltr->sw_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
+ BNXT_NTP_FLTR_MAX_FLTR, 0);
+ if (new_fltr->sw_id < 0) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ rc = -ENOMEM;
+ goto err_free;
+ }
+
+ new_fltr->flow_id = flow_id;
+ new_fltr->rxq = rxq_index;
+ hlist_add_head_rcu(&new_fltr->hash, head);
+ bp->ntp_fltr_count++;
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+
+ set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+
+ return new_fltr->sw_id;
+
+err_free:
+ kfree(new_fltr);
+ return rc;
+}
+
+static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ struct bnxt_ntuple_filter *fltr;
+ int rc;
+
+ head = &bp->ntp_fltr_hash_tbl[i];
+ hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+ bool del = false;
+
+ if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
+ if (rps_may_expire_flow(bp->dev, fltr->rxq,
+ fltr->flow_id,
+ fltr->sw_id)) {
+ bnxt_hwrm_cfa_ntuple_filter_free(bp,
+ fltr);
+ del = true;
+ }
+ } else {
+ rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
+ fltr);
+ if (rc)
+ del = true;
+ else
+ set_bit(BNXT_FLTR_VALID, &fltr->state);
+ }
+
+ if (del) {
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ hlist_del_rcu(&fltr->hash);
+ bp->ntp_fltr_count--;
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ synchronize_rcu();
+ clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
+ kfree(fltr);
+ }
+ }
+ }
+}
+
+#else
+
+static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+{
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+ __be16 port)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return;
+
+ if (sa_family != AF_INET6 && sa_family != AF_INET)
+ return;
+
+ if (bp->vxlan_port_cnt && bp->vxlan_port != port)
+ return;
+
+ bp->vxlan_port_cnt++;
+ if (bp->vxlan_port_cnt == 1) {
+ bp->vxlan_port = port;
+ set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ }
+}
+
+static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+ __be16 port)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return;
+
+ if (sa_family != AF_INET6 && sa_family != AF_INET)
+ return;
+
+ if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
+ bp->vxlan_port_cnt--;
+
+ if (bp->vxlan_port_cnt == 0) {
+ set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ }
+ }
+}
+
+static const struct net_device_ops bnxt_netdev_ops = {
+ .ndo_open = bnxt_open,
+ .ndo_start_xmit = bnxt_start_xmit,
+ .ndo_stop = bnxt_close,
+ .ndo_get_stats64 = bnxt_get_stats64,
+ .ndo_set_rx_mode = bnxt_set_rx_mode,
+ .ndo_do_ioctl = bnxt_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = bnxt_change_mac_addr,
+ .ndo_change_mtu = bnxt_change_mtu,
+ .ndo_fix_features = bnxt_fix_features,
+ .ndo_set_features = bnxt_set_features,
+ .ndo_tx_timeout = bnxt_tx_timeout,
+#ifdef CONFIG_BNXT_SRIOV
+ .ndo_get_vf_config = bnxt_get_vf_config,
+ .ndo_set_vf_mac = bnxt_set_vf_mac,
+ .ndo_set_vf_vlan = bnxt_set_vf_vlan,
+ .ndo_set_vf_rate = bnxt_set_vf_bw,
+ .ndo_set_vf_link_state = bnxt_set_vf_link_state,
+ .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bnxt_poll_controller,
+#endif
+ .ndo_setup_tc = bnxt_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = bnxt_rx_flow_steer,
+#endif
+ .ndo_add_vxlan_port = bnxt_add_vxlan_port,
+ .ndo_del_vxlan_port = bnxt_del_vxlan_port,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = bnxt_busy_poll,
+#endif
+};
+
+static void bnxt_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (BNXT_PF(bp))
+ bnxt_sriov_disable(bp);
+
+ unregister_netdev(dev);
+ cancel_work_sync(&bp->sp_task);
+ bp->sp_event = 0;
+
+ bnxt_free_hwrm_resources(bp);
+ pci_iounmap(pdev, bp->bar2);
+ pci_iounmap(pdev, bp->bar1);
+ pci_iounmap(pdev, bp->bar0);
+ free_netdev(dev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static int bnxt_probe_phy(struct bnxt *bp)
+{
+ int rc = 0;
+ struct bnxt_link_info *link_info = &bp->link_info;
+ char phy_ver[PHY_VER_STR_LEN];
+
+ rc = bnxt_update_link(bp, false);
+ if (rc) {
+ netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
+ rc);
+ return rc;
+ }
+
+ /*initialize the ethool setting copy with NVM settings */
+ if (BNXT_AUTO_MODE(link_info->auto_mode))
+ link_info->autoneg |= BNXT_AUTONEG_SPEED;
+
+ if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ link_info->req_flow_ctrl = link_info->auto_pause_setting;
+ } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ link_info->req_flow_ctrl = link_info->force_pause_setting;
+ }
+ link_info->req_duplex = link_info->duplex_setting;
+ if (link_info->autoneg & BNXT_AUTONEG_SPEED)
+ link_info->req_link_speed = link_info->auto_link_speed;
+ else
+ link_info->req_link_speed = link_info->force_link_speed;
+ link_info->advertising = link_info->auto_link_speeds;
+ snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
+ link_info->phy_ver[0],
+ link_info->phy_ver[1],
+ link_info->phy_ver[2]);
+ strcat(bp->fw_ver_str, phy_ver);
+ return rc;
+}
+
+static int bnxt_get_max_irq(struct pci_dev *pdev)
+{
+ u16 ctrl;
+
+ if (!pdev->msix_cap)
+ return 1;
+
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
+}
+
+void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx)
+{
+ int max_rings = 0;
+
+ if (BNXT_PF(bp)) {
+ *max_tx = bp->pf.max_pf_tx_rings;
+ *max_rx = bp->pf.max_pf_rx_rings;
+ max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
+ max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs);
+ } else {
+#ifdef CONFIG_BNXT_SRIOV
+ *max_tx = bp->vf.max_tx_rings;
+ *max_rx = bp->vf.max_rx_rings;
+ max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
+ max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs);
+#endif
+ }
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ *max_rx >>= 1;
+
+ *max_rx = min_t(int, *max_rx, max_rings);
+ *max_tx = min_t(int, *max_tx, max_rings);
+}
+
+static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int version_printed;
+ struct net_device *dev;
+ struct bnxt *bp;
+ int rc, max_rx_rings, max_tx_rings, max_irqs, dflt_rings;
+
+ if (version_printed++ == 0)
+ pr_info("%s", version);
+
+ max_irqs = bnxt_get_max_irq(pdev);
+ dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
+ if (!dev)
+ return -ENOMEM;
+
+ bp = netdev_priv(dev);
+
+ if (bnxt_vf_pciid(ent->driver_data))
+ bp->flags |= BNXT_FLAG_VF;
+
+ if (pdev->msix_cap) {
+ bp->flags |= BNXT_FLAG_MSIX_CAP;
+ if (BNXT_PF(bp))
+ bp->flags |= BNXT_FLAG_RFS;
+ }
+
+ rc = bnxt_init_board(pdev, dev);
+ if (rc < 0)
+ goto init_err_free;
+
+ dev->netdev_ops = &bnxt_netdev_ops;
+ dev->watchdog_timeo = BNXT_TX_TIMEOUT;
+ dev->ethtool_ops = &bnxt_ethtool_ops;
+
+ pci_set_drvdata(pdev, dev);
+
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
+
+ if (bp->flags & BNXT_FLAG_RFS)
+ dev->hw_features |= NETIF_F_NTUPLE;
+
+ dev->hw_enc_features =
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
+ dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+ dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+#ifdef CONFIG_BNXT_SRIOV
+ init_waitqueue_head(&bp->sriov_cfg_wait);
+#endif
+ rc = bnxt_alloc_hwrm_resources(bp);
+ if (rc)
+ goto init_err;
+
+ mutex_init(&bp->hwrm_cmd_lock);
+ bnxt_hwrm_ver_get(bp);
+
+ rc = bnxt_hwrm_func_drv_rgtr(bp);
+ if (rc)
+ goto init_err;
+
+ /* Get the MAX capabilities for this function */
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
+ rc);
+ rc = -1;
+ goto init_err;
+ }
+
+ rc = bnxt_hwrm_queue_qportcfg(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
+ rc);
+ rc = -1;
+ goto init_err;
+ }
+
+ bnxt_set_tpa_flags(bp);
+ bnxt_set_ring_params(bp);
+ dflt_rings = netif_get_num_default_rss_queues();
+ if (BNXT_PF(bp)) {
+ memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+ bp->pf.max_irqs = max_irqs;
+ } else {
+#if defined(CONFIG_BNXT_SRIOV)
+ memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+ bp->vf.max_irqs = max_irqs;
+#endif
+ }
+ bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+ bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
+ bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+
+ if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
+ bp->flags |= BNXT_FLAG_STRIP_VLAN;
+
+ rc = bnxt_probe_phy(bp);
+ if (rc)
+ goto init_err;
+
+ rc = register_netdev(dev);
+ if (rc)
+ goto init_err;
+
+ netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ (long)pci_resource_start(pdev, 0), dev->dev_addr);
+
+ return 0;
+
+init_err:
+ pci_iounmap(pdev, bp->bar0);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+init_err_free:
+ free_netdev(dev);
+ return rc;
+}
+
+static struct pci_driver bnxt_pci_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = bnxt_pci_tbl,
+ .probe = bnxt_init_one,
+ .remove = bnxt_remove_one,
+#if defined(CONFIG_BNXT_SRIOV)
+ .sriov_configure = bnxt_sriov_configure,
+#endif
+};
+
+module_pci_driver(bnxt_pci_driver);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
new file mode 100644
index 000000000000..4f2267ca482d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -0,0 +1,1086 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_H
+#define BNXT_H
+
+#define DRV_MODULE_NAME "bnxt_en"
+#define DRV_MODULE_VERSION "0.1.24"
+
+#define DRV_VER_MAJ 0
+#define DRV_VER_MIN 1
+#define DRV_VER_UPD 24
+
+struct tx_bd {
+ __le32 tx_bd_len_flags_type;
+ #define TX_BD_TYPE (0x3f << 0)
+ #define TX_BD_TYPE_SHORT_TX_BD (0x00 << 0)
+ #define TX_BD_TYPE_LONG_TX_BD (0x10 << 0)
+ #define TX_BD_FLAGS_PACKET_END (1 << 6)
+ #define TX_BD_FLAGS_NO_CMPL (1 << 7)
+ #define TX_BD_FLAGS_BD_CNT (0x1f << 8)
+ #define TX_BD_FLAGS_BD_CNT_SHIFT 8
+ #define TX_BD_FLAGS_LHINT (3 << 13)
+ #define TX_BD_FLAGS_LHINT_SHIFT 13
+ #define TX_BD_FLAGS_LHINT_512_AND_SMALLER (0 << 13)
+ #define TX_BD_FLAGS_LHINT_512_TO_1023 (1 << 13)
+ #define TX_BD_FLAGS_LHINT_1024_TO_2047 (2 << 13)
+ #define TX_BD_FLAGS_LHINT_2048_AND_LARGER (3 << 13)
+ #define TX_BD_FLAGS_COAL_NOW (1 << 15)
+ #define TX_BD_LEN (0xffff << 16)
+ #define TX_BD_LEN_SHIFT 16
+
+ u32 tx_bd_opaque;
+ __le64 tx_bd_haddr;
+} __packed;
+
+struct tx_bd_ext {
+ __le32 tx_bd_hsize_lflags;
+ #define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0)
+ #define TX_BD_FLAGS_IP_CKSUM (1 << 1)
+ #define TX_BD_FLAGS_NO_CRC (1 << 2)
+ #define TX_BD_FLAGS_STAMP (1 << 3)
+ #define TX_BD_FLAGS_T_IP_CHKSUM (1 << 4)
+ #define TX_BD_FLAGS_LSO (1 << 5)
+ #define TX_BD_FLAGS_IPID_FMT (1 << 6)
+ #define TX_BD_FLAGS_T_IPID (1 << 7)
+ #define TX_BD_HSIZE (0xff << 16)
+ #define TX_BD_HSIZE_SHIFT 16
+
+ __le32 tx_bd_mss;
+ __le32 tx_bd_cfa_action;
+ #define TX_BD_CFA_ACTION (0xffff << 16)
+ #define TX_BD_CFA_ACTION_SHIFT 16
+
+ __le32 tx_bd_cfa_meta;
+ #define TX_BD_CFA_META_MASK 0xfffffff
+ #define TX_BD_CFA_META_VID_MASK 0xfff
+ #define TX_BD_CFA_META_PRI_MASK (0xf << 12)
+ #define TX_BD_CFA_META_PRI_SHIFT 12
+ #define TX_BD_CFA_META_TPID_MASK (3 << 16)
+ #define TX_BD_CFA_META_TPID_SHIFT 16
+ #define TX_BD_CFA_META_KEY (0xf << 28)
+ #define TX_BD_CFA_META_KEY_SHIFT 28
+ #define TX_BD_CFA_META_KEY_VLAN (1 << 28)
+};
+
+struct rx_bd {
+ __le32 rx_bd_len_flags_type;
+ #define RX_BD_TYPE (0x3f << 0)
+ #define RX_BD_TYPE_RX_PACKET_BD 0x4
+ #define RX_BD_TYPE_RX_BUFFER_BD 0x5
+ #define RX_BD_TYPE_RX_AGG_BD 0x6
+ #define RX_BD_TYPE_16B_BD_SIZE (0 << 4)
+ #define RX_BD_TYPE_32B_BD_SIZE (1 << 4)
+ #define RX_BD_TYPE_48B_BD_SIZE (2 << 4)
+ #define RX_BD_TYPE_64B_BD_SIZE (3 << 4)
+ #define RX_BD_FLAGS_SOP (1 << 6)
+ #define RX_BD_FLAGS_EOP (1 << 7)
+ #define RX_BD_FLAGS_BUFFERS (3 << 8)
+ #define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8)
+ #define RX_BD_FLAGS_2_BUFFER_PACKET (1 << 8)
+ #define RX_BD_FLAGS_3_BUFFER_PACKET (2 << 8)
+ #define RX_BD_FLAGS_4_BUFFER_PACKET (3 << 8)
+ #define RX_BD_LEN (0xffff << 16)
+ #define RX_BD_LEN_SHIFT 16
+
+ u32 rx_bd_opaque;
+ __le64 rx_bd_haddr;
+};
+
+struct tx_cmp {
+ __le32 tx_cmp_flags_type;
+ #define CMP_TYPE (0x3f << 0)
+ #define CMP_TYPE_TX_L2_CMP 0
+ #define CMP_TYPE_RX_L2_CMP 17
+ #define CMP_TYPE_RX_AGG_CMP 18
+ #define CMP_TYPE_RX_L2_TPA_START_CMP 19
+ #define CMP_TYPE_RX_L2_TPA_END_CMP 21
+ #define CMP_TYPE_STATUS_CMP 32
+ #define CMP_TYPE_REMOTE_DRIVER_REQ 34
+ #define CMP_TYPE_REMOTE_DRIVER_RESP 36
+ #define CMP_TYPE_ERROR_STATUS 48
+ #define CMPL_BASE_TYPE_STAT_EJECT (0x1aUL << 0)
+ #define CMPL_BASE_TYPE_HWRM_DONE (0x20UL << 0)
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ (0x22UL << 0)
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP (0x24UL << 0)
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+
+ #define TX_CMP_FLAGS_ERROR (1 << 6)
+ #define TX_CMP_FLAGS_PUSH (1 << 7)
+
+ u32 tx_cmp_opaque;
+ __le32 tx_cmp_errors_v;
+ #define TX_CMP_V (1 << 0)
+ #define TX_CMP_ERRORS_BUFFER_ERROR (7 << 1)
+ #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR 0
+ #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT 2
+ #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG 4
+ #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS 5
+ #define TX_CMP_ERRORS_ZERO_LENGTH_PKT (1 << 4)
+ #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN (1 << 5)
+ #define TX_CMP_ERRORS_DMA_ERROR (1 << 6)
+ #define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7)
+
+ __le32 tx_cmp_unsed_3;
+};
+
+struct rx_cmp {
+ __le32 rx_cmp_len_flags_type;
+ #define RX_CMP_CMP_TYPE (0x3f << 0)
+ #define RX_CMP_FLAGS_ERROR (1 << 6)
+ #define RX_CMP_FLAGS_PLACEMENT (7 << 7)
+ #define RX_CMP_FLAGS_RSS_VALID (1 << 10)
+ #define RX_CMP_FLAGS_UNUSED (1 << 11)
+ #define RX_CMP_FLAGS_ITYPES_SHIFT 12
+ #define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12)
+ #define RX_CMP_FLAGS_ITYPE_IP (1 << 12)
+ #define RX_CMP_FLAGS_ITYPE_TCP (2 << 12)
+ #define RX_CMP_FLAGS_ITYPE_UDP (3 << 12)
+ #define RX_CMP_FLAGS_ITYPE_FCOE (4 << 12)
+ #define RX_CMP_FLAGS_ITYPE_ROCE (5 << 12)
+ #define RX_CMP_FLAGS_ITYPE_PTP_WO_TS (8 << 12)
+ #define RX_CMP_FLAGS_ITYPE_PTP_W_TS (9 << 12)
+ #define RX_CMP_LEN (0xffff << 16)
+ #define RX_CMP_LEN_SHIFT 16
+
+ u32 rx_cmp_opaque;
+ __le32 rx_cmp_misc_v1;
+ #define RX_CMP_V1 (1 << 0)
+ #define RX_CMP_AGG_BUFS (0x1f << 1)
+ #define RX_CMP_AGG_BUFS_SHIFT 1
+ #define RX_CMP_RSS_HASH_TYPE (0x7f << 9)
+ #define RX_CMP_RSS_HASH_TYPE_SHIFT 9
+ #define RX_CMP_PAYLOAD_OFFSET (0xff << 16)
+ #define RX_CMP_PAYLOAD_OFFSET_SHIFT 16
+
+ __le32 rx_cmp_rss_hash;
+};
+
+#define RX_CMP_HASH_VALID(rxcmp) \
+ ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
+
+#define RX_CMP_HASH_TYPE(rxcmp) \
+ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
+ RX_CMP_RSS_HASH_TYPE_SHIFT)
+
+struct rx_cmp_ext {
+ __le32 rx_cmp_flags2;
+ #define RX_CMP_FLAGS2_IP_CS_CALC 0x1
+ #define RX_CMP_FLAGS2_L4_CS_CALC (0x1 << 1)
+ #define RX_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
+ #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
+ #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4)
+ __le32 rx_cmp_meta_data;
+ #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff
+ #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000
+ #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16
+ __le32 rx_cmp_cfa_code_errors_v2;
+ #define RX_CMP_V (1 << 0)
+ #define RX_CMPL_ERRORS_MASK (0x7fff << 1)
+ #define RX_CMPL_ERRORS_SFT 1
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_MASK (0x7 << 1)
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1 << 1)
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1)
+ #define RX_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+ #define RX_CMPL_ERRORS_IP_CS_ERROR (0x1 << 4)
+ #define RX_CMPL_ERRORS_L4_CS_ERROR (0x1 << 5)
+ #define RX_CMPL_ERRORS_T_IP_CS_ERROR (0x1 << 6)
+ #define RX_CMPL_ERRORS_T_L4_CS_ERROR (0x1 << 7)
+ #define RX_CMPL_ERRORS_CRC_ERROR (0x1 << 8)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_MASK (0x7 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (0x0 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5 << 9)
+ #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6 << 9)
+ #define RX_CMPL_ERRORS_PKT_ERROR_MASK (0xf << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_NO_ERROR (0x0 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12)
+ #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8 << 12)
+
+ #define RX_CMPL_CFA_CODE_MASK (0xffff << 16)
+ #define RX_CMPL_CFA_CODE_SFT 16
+
+ __le32 rx_cmp_unused3;
+};
+
+#define RX_CMP_L2_ERRORS \
+ cpu_to_le32(RX_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_CMPL_ERRORS_CRC_ERROR)
+
+#define RX_CMP_L4_CS_BITS \
+ (cpu_to_le32(RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC))
+
+#define RX_CMP_L4_CS_ERR_BITS \
+ (cpu_to_le32(RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR))
+
+#define RX_CMP_L4_CS_OK(rxcmp1) \
+ (((rxcmp1)->rx_cmp_flags2 & RX_CMP_L4_CS_BITS) && \
+ !((rxcmp1)->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS))
+
+#define RX_CMP_ENCAP(rxcmp1) \
+ ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) & \
+ RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3)
+
+struct rx_agg_cmp {
+ __le32 rx_agg_cmp_len_flags_type;
+ #define RX_AGG_CMP_TYPE (0x3f << 0)
+ #define RX_AGG_CMP_LEN (0xffff << 16)
+ #define RX_AGG_CMP_LEN_SHIFT 16
+ u32 rx_agg_cmp_opaque;
+ __le32 rx_agg_cmp_v;
+ #define RX_AGG_CMP_V (1 << 0)
+ __le32 rx_agg_cmp_unused;
+};
+
+struct rx_tpa_start_cmp {
+ __le32 rx_tpa_start_cmp_len_flags_type;
+ #define RX_TPA_START_CMP_TYPE (0x3f << 0)
+ #define RX_TPA_START_CMP_FLAGS (0x3ff << 6)
+ #define RX_TPA_START_CMP_FLAGS_SHIFT 6
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT (0x7 << 7)
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7)
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7)
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
+ #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
+ #define RX_TPA_START_CMP_FLAGS_RSS_VALID (0x1 << 10)
+ #define RX_TPA_START_CMP_FLAGS_ITYPES (0xf << 12)
+ #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12
+ #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
+ #define RX_TPA_START_CMP_LEN (0xffff << 16)
+ #define RX_TPA_START_CMP_LEN_SHIFT 16
+
+ u32 rx_tpa_start_cmp_opaque;
+ __le32 rx_tpa_start_cmp_misc_v1;
+ #define RX_TPA_START_CMP_V1 (0x1 << 0)
+ #define RX_TPA_START_CMP_RSS_HASH_TYPE (0x7f << 9)
+ #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9
+ #define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
+ #define RX_TPA_START_CMP_AGG_ID_SHIFT 25
+
+ __le32 rx_tpa_start_cmp_rss_hash;
+};
+
+#define TPA_START_HASH_VALID(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID))
+
+#define TPA_START_HASH_TYPE(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_RSS_HASH_TYPE) >> \
+ RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT)
+
+#define TPA_START_AGG_ID(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
+
+struct rx_tpa_start_cmp_ext {
+ __le32 rx_tpa_start_cmp_flags2;
+ #define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0)
+ #define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC (0x1 << 1)
+ #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
+ #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
+
+ __le32 rx_tpa_start_cmp_metadata;
+ __le32 rx_tpa_start_cmp_cfa_code_v2;
+ #define RX_TPA_START_CMP_V2 (0x1 << 0)
+ #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
+ #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
+ __le32 rx_tpa_start_cmp_unused5;
+};
+
+struct rx_tpa_end_cmp {
+ __le32 rx_tpa_end_cmp_len_flags_type;
+ #define RX_TPA_END_CMP_TYPE (0x3f << 0)
+ #define RX_TPA_END_CMP_FLAGS (0x3ff << 6)
+ #define RX_TPA_END_CMP_FLAGS_SHIFT 6
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT (0x7 << 7)
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT 7
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7)
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7)
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
+ #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
+ #define RX_TPA_END_CMP_FLAGS_RSS_VALID (0x1 << 10)
+ #define RX_TPA_END_CMP_FLAGS_ITYPES (0xf << 12)
+ #define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT 12
+ #define RX_TPA_END_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
+ #define RX_TPA_END_CMP_LEN (0xffff << 16)
+ #define RX_TPA_END_CMP_LEN_SHIFT 16
+
+ u32 rx_tpa_end_cmp_opaque;
+ __le32 rx_tpa_end_cmp_misc_v1;
+ #define RX_TPA_END_CMP_V1 (0x1 << 0)
+ #define RX_TPA_END_CMP_AGG_BUFS (0x3f << 1)
+ #define RX_TPA_END_CMP_AGG_BUFS_SHIFT 1
+ #define RX_TPA_END_CMP_TPA_SEGS (0xff << 8)
+ #define RX_TPA_END_CMP_TPA_SEGS_SHIFT 8
+ #define RX_TPA_END_CMP_PAYLOAD_OFFSET (0xff << 16)
+ #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
+ #define RX_TPA_END_CMP_AGG_ID (0x7f << 25)
+ #define RX_TPA_END_CMP_AGG_ID_SHIFT 25
+
+ __le32 rx_tpa_end_cmp_tsdelta;
+ #define RX_TPA_END_GRO_TS (0x1 << 31)
+};
+
+#define TPA_END_AGG_ID(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
+
+#define TPA_END_TPA_SEGS(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
+
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO \
+ cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO & \
+ RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS)
+
+#define TPA_END_GRO(rx_tpa_end) \
+ ((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type & \
+ RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
+
+#define TPA_END_GRO_TS(rx_tpa_end) \
+ ((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & cpu_to_le32(RX_TPA_END_GRO_TS))
+
+struct rx_tpa_end_cmp_ext {
+ __le32 rx_tpa_end_cmp_dup_acks;
+ #define RX_TPA_END_CMP_TPA_DUP_ACKS (0xf << 0)
+
+ __le32 rx_tpa_end_cmp_seg_len;
+ #define RX_TPA_END_CMP_TPA_SEG_LEN (0xffff << 0)
+
+ __le32 rx_tpa_end_cmp_errors_v2;
+ #define RX_TPA_END_CMP_V2 (0x1 << 0)
+ #define RX_TPA_END_CMP_ERRORS (0x7fff << 1)
+ #define RX_TPA_END_CMPL_ERRORS_SHIFT 1
+
+ u32 rx_tpa_end_cmp_start_opaque;
+};
+
+#define DB_IDX_MASK 0xffffff
+#define DB_IDX_VALID (0x1 << 26)
+#define DB_IRQ_DIS (0x1 << 27)
+#define DB_KEY_TX (0x0 << 28)
+#define DB_KEY_RX (0x1 << 28)
+#define DB_KEY_CP (0x2 << 28)
+#define DB_KEY_ST (0x3 << 28)
+#define DB_KEY_TX_PUSH (0x4 << 28)
+#define DB_LONG_TX_PUSH (0x2 << 24)
+
+#define INVALID_HW_RING_ID ((u16)-1)
+
+#define BNXT_RSS_HASH_TYPE_FLAG_IPV4 0x01
+#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 0x02
+#define BNXT_RSS_HASH_TYPE_FLAG_IPV6 0x04
+#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6 0x08
+
+/* The hardware supports certain page sizes. Use the supported page sizes
+ * to allocate the rings.
+ */
+#if (PAGE_SHIFT < 12)
+#define BNXT_PAGE_SHIFT 12
+#elif (PAGE_SHIFT <= 13)
+#define BNXT_PAGE_SHIFT PAGE_SHIFT
+#elif (PAGE_SHIFT < 16)
+#define BNXT_PAGE_SHIFT 13
+#else
+#define BNXT_PAGE_SHIFT 16
+#endif
+
+#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
+
+#define BNXT_MIN_PKT_SIZE 45
+
+#define BNXT_NUM_TESTS(bp) 0
+
+#define BNXT_DEFAULT_RX_RING_SIZE 1023
+#define BNXT_DEFAULT_TX_RING_SIZE 512
+
+#define MAX_TPA 64
+
+#define MAX_RX_PAGES 8
+#define MAX_RX_AGG_PAGES 32
+#define MAX_TX_PAGES 8
+#define MAX_CP_PAGES 64
+
+#define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
+#define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
+#define CP_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_cmp))
+
+#define SW_RXBD_RING_SIZE (sizeof(struct bnxt_sw_rx_bd) * RX_DESC_CNT)
+#define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
+
+#define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnxt_sw_rx_agg_bd) * RX_DESC_CNT)
+
+#define SW_TXBD_RING_SIZE (sizeof(struct bnxt_sw_tx_bd) * TX_DESC_CNT)
+#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
+
+#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
+
+#define BNXT_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1)
+#define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
+#define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
+
+#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
+
+#define TX_RING(x) (((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
+
+#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define CP_IDX(x) ((x) & (CP_DESC_CNT - 1))
+
+#define TX_CMP_VALID(txcmp, raw_cons) \
+ (!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) == \
+ !((raw_cons) & bp->cp_bit))
+
+#define RX_CMP_VALID(rxcmp1, raw_cons) \
+ (!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\
+ !((raw_cons) & bp->cp_bit))
+
+#define RX_AGG_CMP_VALID(agg, raw_cons) \
+ (!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) == \
+ !((raw_cons) & bp->cp_bit))
+
+#define TX_CMP_TYPE(txcmp) \
+ (le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
+
+#define RX_CMP_TYPE(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
+
+#define NEXT_RX(idx) (((idx) + 1) & bp->rx_ring_mask)
+
+#define NEXT_RX_AGG(idx) (((idx) + 1) & bp->rx_agg_ring_mask)
+
+#define NEXT_TX(idx) (((idx) + 1) & bp->tx_ring_mask)
+
+#define ADV_RAW_CMP(idx, n) ((idx) + (n))
+#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
+#define RING_CMP(idx) ((idx) & bp->cp_ring_mask)
+#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
+
+#define HWRM_CMD_TIMEOUT 500
+#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
+#define HWRM_RESP_ERR_CODE_MASK 0xffff
+#define HWRM_RESP_LEN_MASK 0xffff0000
+#define HWRM_RESP_LEN_SFT 16
+#define HWRM_RESP_VALID_MASK 0xff000000
+#define BNXT_HWRM_REQ_MAX_SIZE 128
+#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
+ BNXT_HWRM_REQ_MAX_SIZE)
+
+struct bnxt_sw_tx_bd {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ u8 is_gso;
+ u8 is_push;
+ unsigned short nr_frags;
+};
+
+struct bnxt_sw_rx_bd {
+ u8 *data;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct bnxt_sw_rx_agg_bd {
+ struct page *page;
+ dma_addr_t mapping;
+};
+
+struct bnxt_ring_struct {
+ int nr_pages;
+ int page_size;
+ void **pg_arr;
+ dma_addr_t *dma_arr;
+
+ __le64 *pg_tbl;
+ dma_addr_t pg_tbl_map;
+
+ int vmem_size;
+ void **vmem;
+
+ u16 fw_ring_id; /* Ring id filled by Chimp FW */
+ u8 queue_id;
+};
+
+struct tx_push_bd {
+ __le32 doorbell;
+ struct tx_bd txbd1;
+ struct tx_bd_ext txbd2;
+};
+
+struct bnxt_tx_ring_info {
+ u16 tx_prod;
+ u16 tx_cons;
+ void __iomem *tx_doorbell;
+
+ struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
+ struct bnxt_sw_tx_bd *tx_buf_ring;
+
+ dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
+
+ struct tx_push_bd *tx_push;
+ dma_addr_t tx_push_mapping;
+
+#define BNXT_DEV_STATE_CLOSING 0x1
+ u32 dev_state;
+
+ struct bnxt_ring_struct tx_ring_struct;
+};
+
+struct bnxt_tpa_info {
+ u8 *data;
+ dma_addr_t mapping;
+ u16 len;
+ unsigned short gso_type;
+ u32 flags2;
+ u32 metadata;
+ enum pkt_hash_types hash_type;
+ u32 rss_hash;
+};
+
+struct bnxt_rx_ring_info {
+ u16 rx_prod;
+ u16 rx_agg_prod;
+ u16 rx_sw_agg_prod;
+ void __iomem *rx_doorbell;
+ void __iomem *rx_agg_doorbell;
+
+ struct rx_bd *rx_desc_ring[MAX_RX_PAGES];
+ struct bnxt_sw_rx_bd *rx_buf_ring;
+
+ struct rx_bd *rx_agg_desc_ring[MAX_RX_AGG_PAGES];
+ struct bnxt_sw_rx_agg_bd *rx_agg_ring;
+
+ unsigned long *rx_agg_bmap;
+ u16 rx_agg_bmap_size;
+
+ dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
+ dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
+
+ struct bnxt_tpa_info *rx_tpa;
+
+ struct bnxt_ring_struct rx_ring_struct;
+ struct bnxt_ring_struct rx_agg_ring_struct;
+};
+
+struct bnxt_cp_ring_info {
+ u32 cp_raw_cons;
+ void __iomem *cp_doorbell;
+
+ struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
+
+ dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
+
+ struct ctx_hw_stats *hw_stats;
+ dma_addr_t hw_stats_map;
+ u32 hw_stats_ctx_id;
+ u64 rx_l4_csum_errors;
+
+ struct bnxt_ring_struct cp_ring_struct;
+};
+
+struct bnxt_napi {
+ struct napi_struct napi;
+ struct bnxt *bp;
+
+ int index;
+ struct bnxt_cp_ring_info cp_ring;
+ struct bnxt_rx_ring_info rx_ring;
+ struct bnxt_tx_ring_info tx_ring;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ atomic_t poll_state;
+#endif
+};
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+enum bnxt_poll_state_t {
+ BNXT_STATE_IDLE = 0,
+ BNXT_STATE_NAPI,
+ BNXT_STATE_POLL,
+ BNXT_STATE_DISABLE,
+};
+#endif
+
+struct bnxt_irq {
+ irq_handler_t handler;
+ unsigned int vector;
+ u8 requested;
+ char name[IFNAMSIZ + 2];
+};
+
+#define HWRM_RING_ALLOC_TX 0x1
+#define HWRM_RING_ALLOC_RX 0x2
+#define HWRM_RING_ALLOC_AGG 0x4
+#define HWRM_RING_ALLOC_CMPL 0x8
+
+#define INVALID_STATS_CTX_ID -1
+
+struct hwrm_cmd_req_hdr {
+#define HWRM_CMPL_RING_MASK 0xffff0000
+#define HWRM_CMPL_RING_SFT 16
+ __le32 cmpl_ring_req_type;
+#define HWRM_SEQ_ID_MASK 0xffff
+#define HWRM_SEQ_ID_INVALID -1
+#define HWRM_RESP_LEN_OFFSET 4
+#define HWRM_TARGET_FID_MASK 0xffff0000
+#define HWRM_TARGET_FID_SFT 16
+ __le32 target_id_seq_id;
+ __le64 resp_addr;
+};
+
+struct bnxt_ring_grp_info {
+ u16 fw_stats_ctx;
+ u16 fw_grp_id;
+ u16 rx_fw_ring_id;
+ u16 agg_fw_ring_id;
+ u16 cp_fw_ring_id;
+};
+
+struct bnxt_vnic_info {
+ u16 fw_vnic_id; /* returned by Chimp during alloc */
+ u16 fw_rss_cos_lb_ctx;
+ u16 fw_l2_ctx_id;
+#define BNXT_MAX_UC_ADDRS 4
+ __le64 fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
+ /* index 0 always dev_addr */
+ u16 uc_filter_count;
+ u8 *uc_list;
+
+ u16 *fw_grp_ids;
+ u16 hash_type;
+ dma_addr_t rss_table_dma_addr;
+ __le16 *rss_table;
+ dma_addr_t rss_hash_key_dma_addr;
+ u64 *rss_hash_key;
+ u32 rx_mask;
+
+ u8 *mc_list;
+ int mc_list_size;
+ int mc_list_count;
+ dma_addr_t mc_list_mapping;
+#define BNXT_MAX_MC_ADDRS 16
+
+ u32 flags;
+#define BNXT_VNIC_RSS_FLAG 1
+#define BNXT_VNIC_RFS_FLAG 2
+#define BNXT_VNIC_MCAST_FLAG 4
+#define BNXT_VNIC_UCAST_FLAG 8
+};
+
+#if defined(CONFIG_BNXT_SRIOV)
+struct bnxt_vf_info {
+ u16 fw_fid;
+ u8 mac_addr[ETH_ALEN];
+ u16 max_rsscos_ctxs;
+ u16 max_cp_rings;
+ u16 max_tx_rings;
+ u16 max_rx_rings;
+ u16 max_l2_ctxs;
+ u16 max_irqs;
+ u16 max_vnics;
+ u16 max_stat_ctxs;
+ u16 vlan;
+ u32 flags;
+#define BNXT_VF_QOS 0x1
+#define BNXT_VF_SPOOFCHK 0x2
+#define BNXT_VF_LINK_FORCED 0x4
+#define BNXT_VF_LINK_UP 0x8
+ u32 func_flags; /* func cfg flags */
+ u32 min_tx_rate;
+ u32 max_tx_rate;
+ void *hwrm_cmd_req_addr;
+ dma_addr_t hwrm_cmd_req_dma_addr;
+};
+#endif
+
+struct bnxt_pf_info {
+#define BNXT_FIRST_PF_FID 1
+#define BNXT_FIRST_VF_FID 128
+ u32 fw_fid;
+ u8 port_id;
+ u8 mac_addr[ETH_ALEN];
+ u16 max_rsscos_ctxs;
+ u16 max_cp_rings;
+ u16 max_tx_rings; /* HW assigned max tx rings for this PF */
+ u16 max_pf_tx_rings; /* runtime max tx rings owned by PF */
+ u16 max_rx_rings; /* HW assigned max rx rings for this PF */
+ u16 max_pf_rx_rings; /* runtime max rx rings owned by PF */
+ u16 max_irqs;
+ u16 max_l2_ctxs;
+ u16 max_vnics;
+ u16 max_stat_ctxs;
+ u32 first_vf_id;
+ u16 active_vfs;
+ u16 max_vfs;
+ u32 max_encap_records;
+ u32 max_decap_records;
+ u32 max_tx_em_flows;
+ u32 max_tx_wm_flows;
+ u32 max_rx_em_flows;
+ u32 max_rx_wm_flows;
+ unsigned long *vf_event_bmap;
+ u16 hwrm_cmd_req_pages;
+ void *hwrm_cmd_req_addr[4];
+ dma_addr_t hwrm_cmd_req_dma_addr[4];
+ struct bnxt_vf_info *vf;
+};
+
+struct bnxt_ntuple_filter {
+ struct hlist_node hash;
+ u8 src_mac_addr[ETH_ALEN];
+ struct flow_keys fkeys;
+ __le64 filter_id;
+ u16 sw_id;
+ u16 rxq;
+ u32 flow_id;
+ unsigned long state;
+#define BNXT_FLTR_VALID 0
+#define BNXT_FLTR_UPDATE 1
+};
+
+#define BNXT_ALL_COPPER_ETHTOOL_SPEED \
+ (ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | \
+ ADVERTISED_10000baseT_Full)
+
+struct bnxt_link_info {
+ u8 media_type;
+ u8 transceiver;
+ u8 phy_addr;
+ u8 phy_link_status;
+#define BNXT_LINK_NO_LINK PORT_PHY_QCFG_RESP_LINK_NO_LINK
+#define BNXT_LINK_SIGNAL PORT_PHY_QCFG_RESP_LINK_SIGNAL
+#define BNXT_LINK_LINK PORT_PHY_QCFG_RESP_LINK_LINK
+ u8 wire_speed;
+ u8 loop_back;
+ u8 link_up;
+ u8 duplex;
+#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_HALF
+#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_FULL
+ u8 pause;
+#define BNXT_LINK_PAUSE_TX PORT_PHY_QCFG_RESP_PAUSE_TX
+#define BNXT_LINK_PAUSE_RX PORT_PHY_QCFG_RESP_PAUSE_RX
+#define BNXT_LINK_PAUSE_BOTH (PORT_PHY_QCFG_RESP_PAUSE_RX | \
+ PORT_PHY_QCFG_RESP_PAUSE_TX)
+ u8 auto_pause_setting;
+ u8 force_pause_setting;
+ u8 duplex_setting;
+ u8 auto_mode;
+#define BNXT_AUTO_MODE(mode) ((mode) > BNXT_LINK_AUTO_NONE && \
+ (mode) <= BNXT_LINK_AUTO_MSK)
+#define BNXT_LINK_AUTO_NONE PORT_PHY_QCFG_RESP_AUTO_MODE_NONE
+#define BNXT_LINK_AUTO_ALLSPDS PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS
+#define BNXT_LINK_AUTO_ONESPD PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED
+#define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW
+#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_MASK
+#define PHY_VER_LEN 3
+ u8 phy_ver[PHY_VER_LEN];
+ u16 link_speed;
+#define BNXT_LINK_SPEED_100MB PORT_PHY_QCFG_RESP_LINK_SPEED_100MB
+#define BNXT_LINK_SPEED_1GB PORT_PHY_QCFG_RESP_LINK_SPEED_1GB
+#define BNXT_LINK_SPEED_2GB PORT_PHY_QCFG_RESP_LINK_SPEED_2GB
+#define BNXT_LINK_SPEED_2_5GB PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB
+#define BNXT_LINK_SPEED_10GB PORT_PHY_QCFG_RESP_LINK_SPEED_10GB
+#define BNXT_LINK_SPEED_20GB PORT_PHY_QCFG_RESP_LINK_SPEED_20GB
+#define BNXT_LINK_SPEED_25GB PORT_PHY_QCFG_RESP_LINK_SPEED_25GB
+#define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
+#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
+ u16 support_speeds;
+ u16 auto_link_speeds;
+#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
+#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
+#define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB
+#define BNXT_LINK_SPEED_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB
+#define BNXT_LINK_SPEED_MSK_2_5GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB
+#define BNXT_LINK_SPEED_MSK_20GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB
+#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
+#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
+#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
+ u16 auto_link_speed;
+ u16 force_link_speed;
+ u32 preemphasis;
+
+ /* copy of requested setting from ethtool cmd */
+ u8 autoneg;
+#define BNXT_AUTONEG_SPEED 1
+#define BNXT_AUTONEG_FLOW_CTRL 2
+ u8 req_duplex;
+ u8 req_flow_ctrl;
+ u16 req_link_speed;
+ u32 advertising;
+ bool force_link_chng;
+ /* a copy of phy_qcfg output used to report link
+ * info to VF
+ */
+ struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+};
+
+#define BNXT_MAX_QUEUE 8
+
+struct bnxt_queue_info {
+ u8 queue_id;
+ u8 queue_profile;
+};
+
+struct bnxt {
+ void __iomem *bar0;
+ void __iomem *bar1;
+ void __iomem *bar2;
+
+ u32 reg_base;
+
+ struct net_device *dev;
+ struct pci_dev *pdev;
+
+ atomic_t intr_sem;
+
+ u32 flags;
+ #define BNXT_FLAG_DCB_ENABLED 0x1
+ #define BNXT_FLAG_VF 0x2
+ #define BNXT_FLAG_LRO 0x4
+#ifdef CONFIG_INET
+ #define BNXT_FLAG_GRO 0x8
+#else
+ /* Cannot support hardware GRO if CONFIG_INET is not set */
+ #define BNXT_FLAG_GRO 0x0
+#endif
+ #define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO)
+ #define BNXT_FLAG_JUMBO 0x10
+ #define BNXT_FLAG_STRIP_VLAN 0x20
+ #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
+ BNXT_FLAG_LRO)
+ #define BNXT_FLAG_USING_MSIX 0x40
+ #define BNXT_FLAG_MSIX_CAP 0x80
+ #define BNXT_FLAG_RFS 0x100
+ #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
+ BNXT_FLAG_RFS | \
+ BNXT_FLAG_STRIP_VLAN)
+
+#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
+#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+
+ struct bnxt_napi **bnapi;
+
+ u32 rx_buf_size;
+ u32 rx_buf_use_size; /* useable size */
+ u32 rx_ring_size;
+ u32 rx_agg_ring_size;
+ u32 rx_copy_thresh;
+ u32 rx_ring_mask;
+ u32 rx_agg_ring_mask;
+ int rx_nr_pages;
+ int rx_agg_nr_pages;
+ int rx_nr_rings;
+ int rsscos_nr_ctxs;
+
+ u32 tx_ring_size;
+ u32 tx_ring_mask;
+ int tx_nr_pages;
+ int tx_nr_rings;
+ int tx_nr_rings_per_tc;
+
+ int tx_wake_thresh;
+ int tx_push_thresh;
+ int tx_push_size;
+
+ u32 cp_ring_size;
+ u32 cp_ring_mask;
+ u32 cp_bit;
+ int cp_nr_pages;
+ int cp_nr_rings;
+
+ int num_stat_ctxs;
+ struct bnxt_ring_grp_info *grp_info;
+ struct bnxt_vnic_info *vnic_info;
+ int nr_vnics;
+
+ u8 max_tc;
+ struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
+
+ unsigned int current_interval;
+#define BNXT_TIMER_INTERVAL (HZ / 2)
+
+ struct timer_list timer;
+
+ int state;
+#define BNXT_STATE_CLOSED 0
+#define BNXT_STATE_OPEN 1
+
+ struct bnxt_irq *irq_tbl;
+ u8 mac_addr[ETH_ALEN];
+
+ u32 msg_enable;
+
+ u16 hwrm_cmd_seq;
+ u32 hwrm_intr_seq_id;
+ void *hwrm_cmd_resp_addr;
+ dma_addr_t hwrm_cmd_resp_dma_addr;
+ void *hwrm_dbg_resp_addr;
+ dma_addr_t hwrm_dbg_resp_dma_addr;
+#define HWRM_DBG_REG_BUF_SIZE 128
+ struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
+ struct hwrm_ver_get_output ver_resp;
+#define FW_VER_STR_LEN 32
+#define BC_HWRM_STR_LEN 21
+#define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
+ char fw_ver_str[FW_VER_STR_LEN];
+ __be16 vxlan_port;
+ u8 vxlan_port_cnt;
+ __le16 vxlan_fw_dst_port_id;
+ u8 nge_port_cnt;
+ __le16 nge_fw_dst_port_id;
+ u16 coal_ticks;
+ u16 coal_ticks_irq;
+ u16 coal_bufs;
+ u16 coal_bufs_irq;
+
+#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
+#define BNXT_COAL_TIMER_TO_USEC(x) ((x) * 2 / 25)
+
+ struct work_struct sp_task;
+ unsigned long sp_event;
+#define BNXT_RX_MASK_SP_EVENT 0
+#define BNXT_RX_NTP_FLTR_SP_EVENT 1
+#define BNXT_LINK_CHNG_SP_EVENT 2
+#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT 4
+#define BNXT_VXLAN_ADD_PORT_SP_EVENT 8
+#define BNXT_VXLAN_DEL_PORT_SP_EVENT 16
+#define BNXT_RESET_TASK_SP_EVENT 32
+#define BNXT_RST_RING_SP_EVENT 64
+
+ struct bnxt_pf_info pf;
+#ifdef CONFIG_BNXT_SRIOV
+ int nr_vfs;
+ struct bnxt_vf_info vf;
+ wait_queue_head_t sriov_cfg_wait;
+ bool sriov_cfg;
+#define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000)
+#endif
+
+#define BNXT_NTP_FLTR_MAX_FLTR 4096
+#define BNXT_NTP_FLTR_HASH_SIZE 512
+#define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1)
+ struct hlist_head ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
+ spinlock_t ntp_fltr_lock; /* for hash table add, del */
+
+ unsigned long *ntp_fltr_bmap;
+ int ntp_fltr_count;
+
+ struct bnxt_link_info link_info;
+};
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
+{
+ atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+/* called from the NAPI poll routine to get ownership of a bnapi */
+static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
+{
+ int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+ BNXT_STATE_NAPI);
+
+ return rc == BNXT_STATE_IDLE;
+}
+
+static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
+{
+ atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+/* called from the busy poll routine to get ownership of a bnapi */
+static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
+{
+ int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+ BNXT_STATE_POLL);
+
+ return rc == BNXT_STATE_IDLE;
+}
+
+static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
+{
+ atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
+{
+ return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL;
+}
+
+static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
+{
+ int old;
+
+ while (1) {
+ old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+ BNXT_STATE_DISABLE);
+ if (old == BNXT_STATE_IDLE)
+ break;
+ usleep_range(500, 5000);
+ }
+}
+
+#else
+
+static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
+{
+ return true;
+}
+
+static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
+{
+ return false;
+}
+
+static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
+{
+ return false;
+}
+
+static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
+{
+}
+
+#endif
+
+void bnxt_set_ring_params(struct bnxt *);
+void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
+int _hwrm_send_message(struct bnxt *, void *, u32, int);
+int hwrm_send_message(struct bnxt *, void *, u32, int);
+int bnxt_hwrm_set_coal(struct bnxt *);
+int bnxt_hwrm_set_pause(struct bnxt *);
+int bnxt_hwrm_set_link_setting(struct bnxt *, bool);
+int bnxt_open_nic(struct bnxt *, bool, bool);
+int bnxt_close_nic(struct bnxt *, bool, bool);
+void bnxt_get_max_rings(struct bnxt *, int *, int *);
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
new file mode 100644
index 000000000000..45bd628eaf3a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -0,0 +1,1149 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/firmware.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_ethtool.h"
+#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
+#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
+#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
+
+static u32 bnxt_get_msglevel(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ return bp->msg_enable;
+}
+
+static void bnxt_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ bp->msg_enable = value;
+}
+
+static int bnxt_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ memset(coal, 0, sizeof(*coal));
+
+ coal->rx_coalesce_usecs =
+ max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks), 1);
+ coal->rx_max_coalesced_frames = bp->coal_bufs / 2;
+ coal->rx_coalesce_usecs_irq =
+ max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks_irq), 1);
+ coal->rx_max_coalesced_frames_irq = bp->coal_bufs_irq / 2;
+
+ return 0;
+}
+
+static int bnxt_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs);
+ bp->coal_bufs = coal->rx_max_coalesced_frames * 2;
+ bp->coal_ticks_irq =
+ BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs_irq);
+ bp->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_coal(bp);
+
+ return rc;
+}
+
+#define BNXT_NUM_STATS 21
+
+static int bnxt_get_sset_count(struct net_device *dev, int sset)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return BNXT_NUM_STATS * bp->cp_nr_rings;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void bnxt_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *buf)
+{
+ u32 i, j = 0;
+ struct bnxt *bp = netdev_priv(dev);
+ u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings;
+ u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
+
+ memset(buf, 0, buf_size);
+
+ if (!bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ __le64 *hw_stats = (__le64 *)cpr->hw_stats;
+ int k;
+
+ for (k = 0; k < stat_fields; j++, k++)
+ buf[j] = le64_to_cpu(hw_stats[k]);
+ buf[j++] = cpr->rx_l4_csum_errors;
+ }
+}
+
+static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u32 i;
+
+ switch (stringset) {
+ /* The number of strings must match BNXT_NUM_STATS defined above. */
+ case ETH_SS_STATS:
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ sprintf(buf, "[%d]: rx_ucast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_mcast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_bcast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_discards", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_drops", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_ucast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_mcast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_bcast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_ucast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_mcast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_bcast_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_discards", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_drops", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_ucast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_mcast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tx_bcast_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tpa_packets", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tpa_bytes", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tpa_events", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: tpa_aborts", i);
+ buf += ETH_GSTRING_LEN;
+ sprintf(buf, "[%d]: rx_l4_csum_errors", i);
+ buf += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
+ stringset);
+ break;
+ }
+}
+
+static void bnxt_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
+ ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
+ ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
+
+ ering->rx_pending = bp->rx_ring_size;
+ ering->rx_jumbo_pending = bp->rx_agg_ring_size;
+ ering->tx_pending = bp->tx_ring_size;
+}
+
+static int bnxt_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
+ (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
+ (ering->tx_pending <= MAX_SKB_FRAGS))
+ return -EINVAL;
+
+ if (netif_running(dev))
+ bnxt_close_nic(bp, false, false);
+
+ bp->rx_ring_size = ering->rx_pending;
+ bp->tx_ring_size = ering->tx_pending;
+ bnxt_set_ring_params(bp);
+
+ if (netif_running(dev))
+ return bnxt_open_nic(bp, false, false);
+
+ return 0;
+}
+
+static void bnxt_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int max_rx_rings, max_tx_rings, tcs;
+
+ bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+ tcs = netdev_get_num_tc(dev);
+ if (tcs > 1)
+ max_tx_rings /= tcs;
+
+ channel->max_rx = max_rx_rings;
+ channel->max_tx = max_tx_rings;
+ channel->max_other = 0;
+ channel->max_combined = 0;
+ channel->rx_count = bp->rx_nr_rings;
+ channel->tx_count = bp->tx_nr_rings_per_tc;
+}
+
+static int bnxt_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int max_rx_rings, max_tx_rings, tcs;
+ u32 rc = 0;
+
+ if (channel->other_count || channel->combined_count ||
+ !channel->rx_count || !channel->tx_count)
+ return -EINVAL;
+
+ bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+ tcs = netdev_get_num_tc(dev);
+ if (tcs > 1)
+ max_tx_rings /= tcs;
+
+ if (channel->rx_count > max_rx_rings ||
+ channel->tx_count > max_tx_rings)
+ return -EINVAL;
+
+ if (netif_running(dev)) {
+ if (BNXT_PF(bp)) {
+ /* TODO CHIMP_FW: Send message to all VF's
+ * before PF unload
+ */
+ }
+ rc = bnxt_close_nic(bp, true, false);
+ if (rc) {
+ netdev_err(bp->dev, "Set channel failure rc :%x\n",
+ rc);
+ return rc;
+ }
+ }
+
+ bp->rx_nr_rings = channel->rx_count;
+ bp->tx_nr_rings_per_tc = channel->tx_count;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ if (tcs > 1)
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+ bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+
+ if (netif_running(dev)) {
+ rc = bnxt_open_nic(bp, true, false);
+ if ((!rc) && BNXT_PF(bp)) {
+ /* TODO CHIMP_FW: Send message to all VF's
+ * to renable
+ */
+ }
+ }
+
+ return rc;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ int i, j = 0;
+
+ cmd->data = bp->ntp_fltr_count;
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+ struct bnxt_ntuple_filter *fltr;
+
+ head = &bp->ntp_fltr_hash_tbl[i];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(fltr, head, hash) {
+ if (j == cmd->rule_cnt)
+ break;
+ rule_locs[j++] = fltr->sw_id;
+ }
+ rcu_read_unlock();
+ if (j == cmd->rule_cnt)
+ break;
+ }
+ cmd->rule_cnt = j;
+ return 0;
+}
+
+static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct bnxt_ntuple_filter *fltr;
+ struct flow_keys *fkeys;
+ int i, rc = -EINVAL;
+
+ if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
+ return rc;
+
+ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+
+ head = &bp->ntp_fltr_hash_tbl[i];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(fltr, head, hash) {
+ if (fltr->sw_id == fs->location)
+ goto fltr_found;
+ }
+ rcu_read_unlock();
+ }
+ return rc;
+
+fltr_found:
+ fkeys = &fltr->fkeys;
+ if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ fs->flow_type = TCP_V4_FLOW;
+ else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ fs->flow_type = UDP_V4_FLOW;
+ else
+ goto fltr_err;
+
+ fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+ fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+
+ fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+ fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+
+ fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
+ fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+
+ fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
+ fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+
+ fs->ring_cookie = fltr->rxq;
+ rc = 0;
+
+fltr_err:
+ rcu_read_unlock();
+
+ return rc;
+}
+
+static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = bp->rx_nr_rings;
+ break;
+
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = bp->ntp_fltr_count;
+ cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
+ break;
+
+ case ETHTOOL_GRXCLSRLALL:
+ rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
+ break;
+
+ case ETHTOOL_GRXCLSRULE:
+ rc = bnxt_grxclsrule(bp, cmd);
+ break;
+
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ return rc;
+}
+#endif
+
+static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
+{
+ return HW_HASH_INDEX_SIZE;
+}
+
+static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
+{
+ return HW_HASH_KEY_SIZE;
+}
+
+static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ int i = 0;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (indir)
+ for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
+ indir[i] = le16_to_cpu(vnic->rss_table[i]);
+
+ if (key)
+ memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+
+ return 0;
+}
+
+static void bnxt_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+ info->testinfo_len = BNXT_NUM_TESTS(bp);
+ /* TODO CHIMP_FW: eeprom dump details */
+ info->eedump_len = 0;
+ /* TODO CHIMP FW: reg dump details */
+ info->regdump_len = 0;
+}
+
+static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
+{
+ u16 fw_speeds = link_info->support_speeds;
+ u32 speed_mask = 0;
+
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
+ speed_mask |= SUPPORTED_100baseT_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
+ speed_mask |= SUPPORTED_1000baseT_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
+ speed_mask |= SUPPORTED_2500baseX_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
+ speed_mask |= SUPPORTED_10000baseT_Full;
+ /* TODO: support 25GB, 50GB with different cable type */
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
+ speed_mask |= SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_20000baseKR2_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
+ speed_mask |= SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_40000baseCR4_Full |
+ SUPPORTED_40000baseSR4_Full |
+ SUPPORTED_40000baseLR4_Full;
+
+ return speed_mask;
+}
+
+static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
+{
+ u16 fw_speeds = link_info->auto_link_speeds;
+ u32 speed_mask = 0;
+
+ /* TODO: support 25GB, 40GB, 50GB with different cable type */
+ /* set the advertised speeds */
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
+ speed_mask |= ADVERTISED_100baseT_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
+ speed_mask |= ADVERTISED_1000baseT_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
+ speed_mask |= ADVERTISED_2500baseX_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
+ speed_mask |= ADVERTISED_10000baseT_Full;
+ /* TODO: how to advertise 20, 25, 40, 50GB with different cable type ?*/
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
+ speed_mask |= ADVERTISED_20000baseMLD2_Full |
+ ADVERTISED_20000baseKR2_Full;
+ if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
+ speed_mask |= ADVERTISED_40000baseKR4_Full |
+ ADVERTISED_40000baseCR4_Full |
+ ADVERTISED_40000baseSR4_Full |
+ ADVERTISED_40000baseLR4_Full;
+ return speed_mask;
+}
+
+u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
+{
+ switch (fw_link_speed) {
+ case BNXT_LINK_SPEED_100MB:
+ return SPEED_100;
+ case BNXT_LINK_SPEED_1GB:
+ return SPEED_1000;
+ case BNXT_LINK_SPEED_2_5GB:
+ return SPEED_2500;
+ case BNXT_LINK_SPEED_10GB:
+ return SPEED_10000;
+ case BNXT_LINK_SPEED_20GB:
+ return SPEED_20000;
+ case BNXT_LINK_SPEED_25GB:
+ return SPEED_25000;
+ case BNXT_LINK_SPEED_40GB:
+ return SPEED_40000;
+ case BNXT_LINK_SPEED_50GB:
+ return SPEED_50000;
+ default:
+ return SPEED_UNKNOWN;
+ }
+}
+
+static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+ u16 ethtool_speed;
+
+ cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
+
+ if (link_info->auto_link_speeds)
+ cmd->supported |= SUPPORTED_Autoneg;
+
+ if (BNXT_AUTO_MODE(link_info->auto_mode)) {
+ cmd->advertising =
+ bnxt_fw_to_ethtool_advertised_spds(link_info);
+ cmd->advertising |= ADVERTISED_Autoneg;
+ cmd->autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->advertising = 0;
+ }
+ if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
+ BNXT_LINK_PAUSE_BOTH) {
+ cmd->advertising |= ADVERTISED_Pause;
+ cmd->supported |= SUPPORTED_Pause;
+ } else {
+ cmd->advertising |= ADVERTISED_Asym_Pause;
+ cmd->supported |= SUPPORTED_Asym_Pause;
+ if (link_info->auto_pause_setting &
+ BNXT_LINK_PAUSE_RX)
+ cmd->advertising |= ADVERTISED_Pause;
+ }
+ } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ if ((link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
+ BNXT_LINK_PAUSE_BOTH) {
+ cmd->supported |= SUPPORTED_Pause;
+ } else {
+ cmd->supported |= SUPPORTED_Asym_Pause;
+ if (link_info->force_pause_setting &
+ BNXT_LINK_PAUSE_RX)
+ cmd->supported |= SUPPORTED_Pause;
+ }
+ }
+
+ cmd->port = PORT_NONE;
+ if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+ cmd->port = PORT_TP;
+ cmd->supported |= SUPPORTED_TP;
+ cmd->advertising |= ADVERTISED_TP;
+ } else {
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_FIBRE;
+
+ if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
+ cmd->port = PORT_DA;
+ else if (link_info->media_type ==
+ PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
+ cmd->port = PORT_FIBRE;
+ }
+
+ if (link_info->phy_link_status == BNXT_LINK_LINK) {
+ if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+ cmd->duplex = DUPLEX_FULL;
+ } else {
+ cmd->duplex = DUPLEX_UNKNOWN;
+ }
+ ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
+ ethtool_cmd_speed_set(cmd, ethtool_speed);
+ if (link_info->transceiver ==
+ PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL)
+ cmd->transceiver = XCVR_INTERNAL;
+ else
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->phy_address = link_info->phy_addr;
+
+ return 0;
+}
+
+static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
+{
+ switch (ethtool_speed) {
+ case SPEED_100:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+ case SPEED_1000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+ case SPEED_2500:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+ case SPEED_10000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+ case SPEED_20000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+ case SPEED_25000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+ case SPEED_40000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+ case SPEED_50000:
+ return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+ default:
+ netdev_err(dev, "unsupported speed!\n");
+ break;
+ }
+ return 0;
+}
+
+static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+{
+ u16 fw_speed_mask = 0;
+
+ /* only support autoneg at speed 100, 1000, and 10000 */
+ if (advertising & (ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half)) {
+ fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
+ }
+ if (advertising & (ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseT_Half)) {
+ fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
+ }
+ if (advertising & ADVERTISED_10000baseT_Full)
+ fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
+
+ return fw_speed_mask;
+}
+
+static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ int rc = 0;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+ u32 speed, fw_advertising = 0;
+ bool set_pause = false;
+
+ if (BNXT_VF(bp))
+ return rc;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (link_info->media_type != PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+ netdev_err(dev, "Media type doesn't support autoneg\n");
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ if (cmd->advertising & ~(BNXT_ALL_COPPER_ETHTOOL_SPEED |
+ ADVERTISED_Autoneg |
+ ADVERTISED_TP |
+ ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause)) {
+ netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n",
+ cmd->advertising);
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising);
+ if (fw_advertising & ~link_info->support_speeds) {
+ netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n",
+ cmd->advertising);
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ link_info->autoneg |= BNXT_AUTONEG_SPEED;
+ if (!fw_advertising)
+ link_info->advertising = link_info->support_speeds;
+ else
+ link_info->advertising = fw_advertising;
+ /* any change to autoneg will cause link change, therefore the
+ * driver should put back the original pause setting in autoneg
+ */
+ set_pause = true;
+ } else {
+ /* TODO: currently don't support half duplex */
+ if (cmd->duplex == DUPLEX_HALF) {
+ netdev_err(dev, "HALF DUPLEX is not supported!\n");
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ /* If received a request for an unknown duplex, assume full*/
+ if (cmd->duplex == DUPLEX_UNKNOWN)
+ cmd->duplex = DUPLEX_FULL;
+ speed = ethtool_cmd_speed(cmd);
+ link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
+ link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+ link_info->advertising = 0;
+ }
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_link_setting(bp, set_pause);
+
+set_setting_exit:
+ return rc;
+}
+
+static void bnxt_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (BNXT_VF(bp))
+ return;
+ epause->autoneg = !!(link_info->auto_pause_setting &
+ BNXT_LINK_PAUSE_BOTH);
+ epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0);
+ epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0);
+}
+
+static int bnxt_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ int rc = 0;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (BNXT_VF(bp))
+ return rc;
+
+ if (epause->autoneg) {
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
+ } else {
+ /* when transition from auto pause to force pause,
+ * force a link change
+ */
+ if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+ link_info->force_link_chng = true;
+ link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
+ link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_BOTH;
+ }
+ if (epause->rx_pause)
+ link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
+ else
+ link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_RX;
+
+ if (epause->tx_pause)
+ link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
+ else
+ link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_TX;
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_pause(bp);
+ return rc;
+}
+
+static u32 bnxt_get_link(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ /* TODO: handle MF, VF, driver close case */
+ return bp->link_info.link_up;
+}
+
+static int bnxt_flash_nvram(struct net_device *dev,
+ u16 dir_type,
+ u16 dir_ordinal,
+ u16 dir_ext,
+ u16 dir_attr,
+ const u8 *data,
+ size_t data_len)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ struct hwrm_nvm_write_input req = {0};
+ dma_addr_t dma_handle;
+ u8 *kmem;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
+
+ req.dir_type = cpu_to_le16(dir_type);
+ req.dir_ordinal = cpu_to_le16(dir_ordinal);
+ req.dir_ext = cpu_to_le16(dir_ext);
+ req.dir_attr = cpu_to_le16(dir_attr);
+ req.dir_data_length = cpu_to_le32(data_len);
+
+ kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
+ GFP_KERNEL);
+ if (!kmem) {
+ netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+ (unsigned)data_len);
+ return -ENOMEM;
+ }
+ memcpy(kmem, data, data_len);
+ req.host_src_addr = cpu_to_le64(dma_handle);
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
+ dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
+
+ return rc;
+}
+
+static int bnxt_flash_firmware(struct net_device *dev,
+ u16 dir_type,
+ const u8 *fw_data,
+ size_t fw_size)
+{
+ int rc = 0;
+ u16 code_type;
+ u32 stored_crc;
+ u32 calculated_crc;
+ struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
+
+ switch (dir_type) {
+ case BNX_DIR_TYPE_BOOTCODE:
+ case BNX_DIR_TYPE_BOOTCODE_2:
+ code_type = CODE_BOOT;
+ break;
+ default:
+ netdev_err(dev, "Unsupported directory entry type: %u\n",
+ dir_type);
+ return -EINVAL;
+ }
+ if (fw_size < sizeof(struct bnxt_fw_header)) {
+ netdev_err(dev, "Invalid firmware file size: %u\n",
+ (unsigned int)fw_size);
+ return -EINVAL;
+ }
+ if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
+ netdev_err(dev, "Invalid firmware signature: %08X\n",
+ le32_to_cpu(header->signature));
+ return -EINVAL;
+ }
+ if (header->code_type != code_type) {
+ netdev_err(dev, "Expected firmware type: %d, read: %d\n",
+ code_type, header->code_type);
+ return -EINVAL;
+ }
+ if (header->device != DEVICE_CUMULUS_FAMILY) {
+ netdev_err(dev, "Expected firmware device family %d, read: %d\n",
+ DEVICE_CUMULUS_FAMILY, header->device);
+ return -EINVAL;
+ }
+ /* Confirm the CRC32 checksum of the file: */
+ stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
+ sizeof(stored_crc)));
+ calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
+ if (calculated_crc != stored_crc) {
+ netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
+ (unsigned long)stored_crc,
+ (unsigned long)calculated_crc);
+ return -EINVAL;
+ }
+ /* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */
+ rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+ 0, 0, fw_data, fw_size);
+ if (rc == 0) { /* Firmware update successful */
+ /* TODO: Notify processor it needs to reset itself
+ */
+ }
+ return rc;
+}
+
+static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
+{
+ switch (dir_type) {
+ case BNX_DIR_TYPE_CHIMP_PATCH:
+ case BNX_DIR_TYPE_BOOTCODE:
+ case BNX_DIR_TYPE_BOOTCODE_2:
+ case BNX_DIR_TYPE_APE_FW:
+ case BNX_DIR_TYPE_APE_PATCH:
+ case BNX_DIR_TYPE_KONG_FW:
+ case BNX_DIR_TYPE_KONG_PATCH:
+ return true;
+ }
+
+ return false;
+}
+
+static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
+{
+ switch (dir_type) {
+ case BNX_DIR_TYPE_AVS:
+ case BNX_DIR_TYPE_EXP_ROM_MBA:
+ case BNX_DIR_TYPE_PCIE:
+ case BNX_DIR_TYPE_TSCF_UCODE:
+ case BNX_DIR_TYPE_EXT_PHY:
+ case BNX_DIR_TYPE_CCM:
+ case BNX_DIR_TYPE_ISCSI_BOOT:
+ case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
+ case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
+ return true;
+ }
+
+ return false;
+}
+
+static bool bnxt_dir_type_is_executable(u16 dir_type)
+{
+ return bnxt_dir_type_is_ape_bin_format(dir_type) ||
+ bnxt_dir_type_is_unprotected_exec_format(dir_type);
+}
+
+static int bnxt_flash_firmware_from_file(struct net_device *dev,
+ u16 dir_type,
+ const char *filename)
+{
+ const struct firmware *fw;
+ int rc;
+
+ if (bnxt_dir_type_is_executable(dir_type) == false)
+ return -EINVAL;
+
+ rc = request_firmware(&fw, filename, &dev->dev);
+ if (rc != 0) {
+ netdev_err(dev, "Error %d requesting firmware file: %s\n",
+ rc, filename);
+ return rc;
+ }
+ if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
+ rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
+ else
+ rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+ 0, 0, fw->data, fw->size);
+ release_firmware(fw);
+ return rc;
+}
+
+static int bnxt_flash_package_from_file(struct net_device *dev,
+ char *filename)
+{
+ netdev_err(dev, "packages are not yet supported\n");
+ return -EINVAL;
+}
+
+static int bnxt_flash_device(struct net_device *dev,
+ struct ethtool_flash *flash)
+{
+ if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
+ netdev_err(dev, "flashdev not supported from a virtual function\n");
+ return -EINVAL;
+ }
+
+ if (flash->region == ETHTOOL_FLASH_ALL_REGIONS)
+ return bnxt_flash_package_from_file(dev, flash->data);
+
+ return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
+}
+
+static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ struct hwrm_nvm_get_dir_info_input req = {0};
+ struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ *entries = le32_to_cpu(output->entries);
+ *length = le32_to_cpu(output->entry_length);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_get_eeprom_len(struct net_device *dev)
+{
+ /* The -1 return value allows the entire 32-bit range of offsets to be
+ * passed via the ethtool command-line utility.
+ */
+ return -1;
+}
+
+static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ u32 dir_entries;
+ u32 entry_length;
+ u8 *buf;
+ size_t buflen;
+ dma_addr_t dma_handle;
+ struct hwrm_nvm_get_dir_entries_input req = {0};
+
+ rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
+ if (rc != 0)
+ return rc;
+
+ /* Insert 2 bytes of directory info (count and size of entries) */
+ if (len < 2)
+ return -EINVAL;
+
+ *data++ = dir_entries;
+ *data++ = entry_length;
+ len -= 2;
+ memset(data, 0xff, len);
+
+ buflen = dir_entries * entry_length;
+ buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
+ GFP_KERNEL);
+ if (!buf) {
+ netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+ (unsigned)buflen);
+ return -ENOMEM;
+ }
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
+ req.host_dest_addr = cpu_to_le64(dma_handle);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == 0)
+ memcpy(data, buf, len > buflen ? buflen : len);
+ dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
+ return rc;
+}
+
+static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ u8 *buf;
+ dma_addr_t dma_handle;
+ struct hwrm_nvm_read_input req = {0};
+
+ buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
+ GFP_KERNEL);
+ if (!buf) {
+ netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+ (unsigned)length);
+ return -ENOMEM;
+ }
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
+ req.host_dest_addr = cpu_to_le64(dma_handle);
+ req.dir_idx = cpu_to_le16(index);
+ req.offset = cpu_to_le32(offset);
+ req.len = cpu_to_le32(length);
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == 0)
+ memcpy(data, buf, length);
+ dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
+ return rc;
+}
+
+static int bnxt_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ u32 index;
+ u32 offset;
+
+ if (eeprom->offset == 0) /* special offset value to get directory */
+ return bnxt_get_nvram_directory(dev, eeprom->len, data);
+
+ index = eeprom->offset >> 24;
+ offset = eeprom->offset & 0xffffff;
+
+ if (index == 0) {
+ netdev_err(dev, "unsupported index value: %d\n", index);
+ return -EINVAL;
+ }
+
+ return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
+}
+
+static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct hwrm_nvm_erase_dir_entry_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
+ req.dir_idx = cpu_to_le16(index);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u8 index, dir_op;
+ u16 type, ext, ordinal, attr;
+
+ if (!BNXT_PF(bp)) {
+ netdev_err(dev, "NVM write not supported from a virtual function\n");
+ return -EINVAL;
+ }
+
+ type = eeprom->magic >> 16;
+
+ if (type == 0xffff) { /* special value for directory operations */
+ index = eeprom->magic & 0xff;
+ dir_op = eeprom->magic >> 8;
+ if (index == 0)
+ return -EINVAL;
+ switch (dir_op) {
+ case 0x0e: /* erase */
+ if (eeprom->offset != ~eeprom->magic)
+ return -EINVAL;
+ return bnxt_erase_nvram_directory(dev, index - 1);
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Create or re-write an NVM item: */
+ if (bnxt_dir_type_is_executable(type) == true)
+ return -EINVAL;
+ ext = eeprom->magic & 0xffff;
+ ordinal = eeprom->offset >> 16;
+ attr = eeprom->offset & 0xffff;
+
+ return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
+ eeprom->len);
+}
+
+const struct ethtool_ops bnxt_ethtool_ops = {
+ .get_settings = bnxt_get_settings,
+ .set_settings = bnxt_set_settings,
+ .get_pauseparam = bnxt_get_pauseparam,
+ .set_pauseparam = bnxt_set_pauseparam,
+ .get_drvinfo = bnxt_get_drvinfo,
+ .get_coalesce = bnxt_get_coalesce,
+ .set_coalesce = bnxt_set_coalesce,
+ .get_msglevel = bnxt_get_msglevel,
+ .set_msglevel = bnxt_set_msglevel,
+ .get_sset_count = bnxt_get_sset_count,
+ .get_strings = bnxt_get_strings,
+ .get_ethtool_stats = bnxt_get_ethtool_stats,
+ .set_ringparam = bnxt_set_ringparam,
+ .get_ringparam = bnxt_get_ringparam,
+ .get_channels = bnxt_get_channels,
+ .set_channels = bnxt_set_channels,
+#ifdef CONFIG_RFS_ACCEL
+ .get_rxnfc = bnxt_get_rxnfc,
+#endif
+ .get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
+ .get_rxfh_key_size = bnxt_get_rxfh_key_size,
+ .get_rxfh = bnxt_get_rxfh,
+ .flash_device = bnxt_flash_device,
+ .get_eeprom_len = bnxt_get_eeprom_len,
+ .get_eeprom = bnxt_get_eeprom,
+ .set_eeprom = bnxt_set_eeprom,
+ .get_link = bnxt_get_link,
+};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
new file mode 100644
index 000000000000..98fa81e08b58
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -0,0 +1,17 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_ETHTOOL_H
+#define BNXT_ETHTOOL_H
+
+extern const struct ethtool_ops bnxt_ethtool_ops;
+
+u32 bnxt_fw_to_ethtool_speed(u16);
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
new file mode 100644
index 000000000000..e0aac65c6d82
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -0,0 +1,104 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __BNXT_FW_HDR_H__
+#define __BNXT_FW_HDR_H__
+
+#define BNXT_FIRMWARE_BIN_SIGNATURE 0x1a4d4342 /* "BCM"+0x1a */
+
+enum SUPPORTED_FAMILY {
+ DEVICE_5702_3_4_FAMILY, /* 0 - Denali, Vinson, K2 */
+ DEVICE_5705_FAMILY, /* 1 - Bachelor */
+ DEVICE_SHASTA_FAMILY, /* 2 - 5751 */
+ DEVICE_5706_FAMILY, /* 3 - Teton */
+ DEVICE_5714_FAMILY, /* 4 - Hamilton */
+ DEVICE_STANFORD_FAMILY, /* 5 - 5755 */
+ DEVICE_STANFORD_ME_FAMILY, /* 6 - 5756 */
+ DEVICE_SOLEDAD_FAMILY, /* 7 - 5761[E] */
+ DEVICE_CILAI_FAMILY, /* 8 - 57780/60/90/91 */
+ DEVICE_ASPEN_FAMILY, /* 9 - 57781/85/61/65/91/95 */
+ DEVICE_ASPEN_PLUS_FAMILY, /* 10 - 57786 */
+ DEVICE_LOGAN_FAMILY, /* 11 - Any device in the Logan family
+ */
+ DEVICE_LOGAN_5762, /* 12 - Logan Enterprise (aka Columbia)
+ */
+ DEVICE_LOGAN_57767, /* 13 - Logan Client */
+ DEVICE_LOGAN_57787, /* 14 - Logan Consumer */
+ DEVICE_LOGAN_5725, /* 15 - Logan Server (TruManage-enabled)
+ */
+ DEVICE_SAWTOOTH_FAMILY, /* 16 - 5717/18 */
+ DEVICE_COTOPAXI_FAMILY, /* 17 - 5719 */
+ DEVICE_SNAGGLETOOTH_FAMILY, /* 18 - 5720 */
+ DEVICE_CUMULUS_FAMILY, /* 19 - Cumulus/Whitney */
+ MAX_DEVICE_FAMILY
+};
+
+enum SUPPORTED_CODE {
+ CODE_ASF1, /* 0 - ASF VERSION 1.03 <deprecated> */
+ CODE_ASF2, /* 1 - ASF VERSION 2.00 <deprecated> */
+ CODE_PASSTHRU, /* 2 - PassThru <deprecated> */
+ CODE_PT_SEC, /* 3 - PassThru with security <deprecated> */
+ CODE_UMP, /* 4 - UMP <deprecated> */
+ CODE_BOOT, /* 5 - Bootcode */
+ CODE_DASH, /* 6 - TruManage (DASH + ASF + PMCI)
+ * Management firmwares
+ */
+ CODE_MCTP_PASSTHRU, /* 7 - NCSI / MCTP Passt-hrough firmware */
+ CODE_PM_OFFLOAD, /* 8 - Power-Management Proxy Offload firmwares
+ */
+ CODE_MDNS_SD_OFFLOAD, /* 9 - Multicast DNS Service Discovery Proxys
+ * Offload firmware
+ */
+ CODE_DISC_OFFLOAD, /* 10 - Discovery Offload firmware */
+ CODE_MUSTANG, /* 11 - I2C Error reporting APE firmwares
+ * <deprecated>
+ */
+ CODE_ARP_BATCH, /* 12 - ARP Batch firmware */
+ CODE_SMASH, /* 13 - TruManage (SMASH + DCMI/IPMI + PMCI)
+ * Management firmware
+ */
+ CODE_APE_DIAG, /* 14 - APE Test Diag firmware */
+ CODE_APE_PATCH, /* 15 - APE Patch firmware */
+ CODE_TANG_PATCH, /* 16 - TANG Patch firmware */
+ CODE_KONG_FW, /* 17 - KONG firmware */
+ CODE_KONG_PATCH, /* 18 - KONG Patch firmware */
+ CODE_BONO_FW, /* 19 - BONO firmware */
+ CODE_BONO_PATCH, /* 20 - BONO Patch firmware */
+
+ MAX_CODE_TYPE,
+};
+
+enum SUPPORTED_MEDIA {
+ MEDIA_COPPER, /* 0 */
+ MEDIA_FIBER, /* 1 */
+ MEDIA_NONE, /* 2 */
+ MEDIA_COPPER_FIBER, /* 3 */
+ MAX_MEDIA_TYPE,
+};
+
+struct bnxt_fw_header {
+ __le32 signature; /* constains the constant value of
+ * BNXT_Firmware_Bin_Signatures
+ */
+ u8 flags; /* reserved for ChiMP use */
+ u8 code_type; /* enum SUPPORTED_CODE */
+ u8 device; /* enum SUPPORTED_FAMILY */
+ u8 media; /* enum SUPPORTED_MEDIA */
+ u8 version[16]; /* the null terminated version string to
+ * indicate the version of the
+ * file, this will be copied from the binary
+ * file version string
+ */
+ u8 build;
+ u8 revision;
+ u8 minor_ver;
+ u8 major_ver;
+};
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
new file mode 100644
index 000000000000..70fc8253c07f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -0,0 +1,4046 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_HSI_H
+#define BNXT_HSI_H
+
+/* per-context HW statistics -- chip view */
+struct ctx_hw_stats {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 tpa_pkts;
+ __le64 tpa_bytes;
+ __le64 tpa_events;
+ __le64 tpa_aborts;
+};
+
+/* Statistics Ejection Buffer Completion Record (16 bytes) */
+struct eject_cmpl {
+ __le16 type;
+ #define EJECT_CMPL_TYPE_MASK 0x3fUL
+ #define EJECT_CMPL_TYPE_SFT 0
+ #define EJECT_CMPL_TYPE_STAT_EJECT (0x1aUL << 0)
+ __le16 len;
+ __le32 opaque;
+ __le32 v;
+ #define EJECT_CMPL_V 0x1UL
+ __le32 unused_2;
+};
+
+/* HWRM Completion Record (16 bytes) */
+struct hwrm_cmpl {
+ __le16 type;
+ #define HWRM_CMPL_TYPE_MASK 0x3fUL
+ #define HWRM_CMPL_TYPE_SFT 0
+ #define HWRM_CMPL_TYPE_HWRM_DONE (0x20UL << 0)
+ __le16 sequence_id;
+ __le32 unused_1;
+ __le32 v;
+ #define HWRM_CMPL_V 0x1UL
+ __le32 unused_3;
+};
+
+/* HWRM Forwarded Request (16 bytes) */
+struct hwrm_fwd_req_cmpl {
+ __le16 req_len_type;
+ #define HWRM_FWD_REQ_CMPL_TYPE_MASK 0x3fUL
+ #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0
+ #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ (0x22UL << 0)
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
+ __le16 source_id;
+ __le32 unused_0;
+ __le32 req_buf_addr_v[2];
+ #define HWRM_FWD_REQ_CMPL_V 0x1UL
+ #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+ #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+};
+
+/* HWRM Forwarded Response (16 bytes) */
+struct hwrm_fwd_resp_cmpl {
+ __le16 type;
+ #define HWRM_FWD_RESP_CMPL_TYPE_MASK 0x3fUL
+ #define HWRM_FWD_RESP_CMPL_TYPE_SFT 0
+ #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP (0x24UL << 0)
+ __le16 source_id;
+ __le16 resp_len;
+ __le16 unused_1;
+ __le32 resp_buf_addr_v[2];
+ #define HWRM_FWD_RESP_CMPL_V 0x1UL
+ #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+ #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+};
+
+/* HWRM Asynchronous Event Completion Record (16 bytes) */
+struct hwrm_async_event_cmpl {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+};
+
+/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */
+struct hwrm_async_event_cmpl_link_status_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_UP 0x1UL
+};
+
+/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
+struct hwrm_async_event_cmpl_link_mtu_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
+};
+
+/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */
+struct hwrm_async_event_cmpl_dcb_config_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */
+struct hwrm_async_event_cmpl_port_conn_not_allowed {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
+struct hwrm_async_event_cmpl_func_drvr_unload {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */
+struct hwrm_async_event_cmpl_func_drvr_load {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */
+struct hwrm_async_event_cmpl_pf_drvr_unload {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
+struct hwrm_async_event_cmpl_pf_drvr_load {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
+struct hwrm_async_event_cmpl_vf_flr {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR (0x30UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */
+struct hwrm_async_event_cmpl_vf_mac_addr_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
+struct hwrm_async_event_cmpl_hwrm_error {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+ __le32 event_data2;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0)
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+ u8 unused_1[3];
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
+};
+
+/* HW Resource Manager Specification 0.7.8 */
+#define HWRM_VERSION_MAJOR 0
+#define HWRM_VERSION_MINOR 7
+#define HWRM_VERSION_UPDATE 8
+
+#define HWRM_VERSION_STR "0.7.8"
+/* Following is the signature for HWRM message field that indicates not
+ * applicable (All F's). Need to cast it the size of the field if needed.
+ */
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
+#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */
+#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
+/* Input (16 bytes) */
+struct input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (8 bytes) */
+struct output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+/* Command numbering (8 bytes) */
+struct cmd_nums {
+ __le16 req_type;
+ #define HWRM_VER_GET (0x0UL)
+ #define HWRM_FUNC_DISABLE (0x10UL)
+ #define HWRM_FUNC_RESET (0x11UL)
+ #define HWRM_FUNC_GETFID (0x12UL)
+ #define HWRM_FUNC_VF_ALLOC (0x13UL)
+ #define HWRM_FUNC_VF_FREE (0x14UL)
+ #define HWRM_FUNC_QCAPS (0x15UL)
+ #define HWRM_FUNC_QCFG (0x16UL)
+ #define HWRM_FUNC_CFG (0x17UL)
+ #define HWRM_FUNC_QSTATS (0x18UL)
+ #define HWRM_FUNC_CLR_STATS (0x19UL)
+ #define HWRM_FUNC_DRV_UNRGTR (0x1aUL)
+ #define HWRM_FUNC_VF_RESC_FREE (0x1bUL)
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY (0x1cUL)
+ #define HWRM_FUNC_DRV_RGTR (0x1dUL)
+ #define HWRM_FUNC_DRV_QVER (0x1eUL)
+ #define HWRM_FUNC_BUF_RGTR (0x1fUL)
+ #define HWRM_FUNC_VF_CFG (0x20UL)
+ #define HWRM_PORT_PHY_CFG (0x20UL)
+ #define HWRM_PORT_MAC_CFG (0x21UL)
+ #define HWRM_PORT_ENABLE (0x22UL)
+ #define HWRM_PORT_QSTATS (0x23UL)
+ #define HWRM_PORT_LPBK_QSTATS (0x24UL)
+ #define HWRM_PORT_CLR_STATS (0x25UL)
+ #define HWRM_PORT_LPBK_CLR_STATS (0x26UL)
+ #define HWRM_PORT_PHY_QCFG (0x27UL)
+ #define HWRM_PORT_MAC_QCFG (0x28UL)
+ #define HWRM_PORT_BLINK_LED (0x29UL)
+ #define HWRM_QUEUE_QPORTCFG (0x30UL)
+ #define HWRM_QUEUE_QCFG (0x31UL)
+ #define HWRM_QUEUE_CFG (0x32UL)
+ #define HWRM_QUEUE_BUFFERS_QCFG (0x33UL)
+ #define HWRM_QUEUE_BUFFERS_CFG (0x34UL)
+ #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
+ #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
+ #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
+ #define HWRM_QUEUE_PRI2COS_CFG (0x38UL)
+ #define HWRM_QUEUE_COS2BW_QCFG (0x39UL)
+ #define HWRM_QUEUE_COS2BW_CFG (0x3aUL)
+ #define HWRM_VNIC_ALLOC (0x40UL)
+ #define HWRM_VNIC_FREE (0x41UL)
+ #define HWRM_VNIC_CFG (0x42UL)
+ #define HWRM_VNIC_QCFG (0x43UL)
+ #define HWRM_VNIC_TPA_CFG (0x44UL)
+ #define HWRM_VNIC_TPA_QCFG (0x45UL)
+ #define HWRM_VNIC_RSS_CFG (0x46UL)
+ #define HWRM_VNIC_RSS_QCFG (0x47UL)
+ #define HWRM_VNIC_PLCMODES_CFG (0x48UL)
+ #define HWRM_VNIC_PLCMODES_QCFG (0x49UL)
+ #define HWRM_RING_ALLOC (0x50UL)
+ #define HWRM_RING_FREE (0x51UL)
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL)
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (0x53UL)
+ #define HWRM_RING_RESET (0x5eUL)
+ #define HWRM_RING_GRP_ALLOC (0x60UL)
+ #define HWRM_RING_GRP_FREE (0x61UL)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL)
+ #define HWRM_ARB_GRP_ALLOC (0x80UL)
+ #define HWRM_ARB_GRP_CFG (0x81UL)
+ #define HWRM_CFA_L2_FILTER_ALLOC (0x90UL)
+ #define HWRM_CFA_L2_FILTER_FREE (0x91UL)
+ #define HWRM_CFA_L2_FILTER_CFG (0x92UL)
+ #define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
+ #define HWRM_CFA_L2_SET_BCASTMCAST_MIRRORING (0x94UL)
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
+ #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
+ #define HWRM_CFA_ENCAP_RECORD_FREE (0x98UL)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC (0x99UL)
+ #define HWRM_CFA_NTUPLE_FILTER_FREE (0x9aUL)
+ #define HWRM_CFA_NTUPLE_FILTER_CFG (0x9bUL)
+ #define HWRM_TUNNEL_DST_PORT_QUERY (0xa0UL)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC (0xa1UL)
+ #define HWRM_TUNNEL_DST_PORT_FREE (0xa2UL)
+ #define HWRM_STAT_CTX_ALLOC (0xb0UL)
+ #define HWRM_STAT_CTX_FREE (0xb1UL)
+ #define HWRM_STAT_CTX_QUERY (0xb2UL)
+ #define HWRM_STAT_CTX_CLR_STATS (0xb3UL)
+ #define HWRM_FW_RESET (0xc0UL)
+ #define HWRM_FW_QSTATUS (0xc1UL)
+ #define HWRM_EXEC_FWD_RESP (0xd0UL)
+ #define HWRM_REJECT_FWD_RESP (0xd1UL)
+ #define HWRM_FWD_RESP (0xd2UL)
+ #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
+ #define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
+ #define HWRM_MGMT_L2_FILTER_ALLOC (0x100UL)
+ #define HWRM_MGMT_L2_FILTER_FREE (0x101UL)
+ #define HWRM_DBG_READ_DIRECT (0xff10UL)
+ #define HWRM_DBG_READ_INDIRECT (0xff11UL)
+ #define HWRM_DBG_WRITE_DIRECT (0xff12UL)
+ #define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
+ #define HWRM_DBG_DUMP (0xff14UL)
+ #define HWRM_NVM_MODIFY (0xfff4UL)
+ #define HWRM_NVM_VERIFY_UPDATE (0xfff5UL)
+ #define HWRM_NVM_GET_DEV_INFO (0xfff6UL)
+ #define HWRM_NVM_ERASE_DIR_ENTRY (0xfff7UL)
+ #define HWRM_NVM_MOD_DIR_ENTRY (0xfff8UL)
+ #define HWRM_NVM_FIND_DIR_ENTRY (0xfff9UL)
+ #define HWRM_NVM_GET_DIR_ENTRIES (0xfffaUL)
+ #define HWRM_NVM_GET_DIR_INFO (0xfffbUL)
+ #define HWRM_NVM_RAW_DUMP (0xfffcUL)
+ #define HWRM_NVM_READ (0xfffdUL)
+ #define HWRM_NVM_WRITE (0xfffeUL)
+ #define HWRM_NVM_RAW_WRITE_BLK (0xffffUL)
+ __le16 unused_0[3];
+};
+
+/* Return Codes (8 bytes) */
+struct ret_codes {
+ __le16 error_code;
+ #define HWRM_ERR_CODE_SUCCESS (0x0UL)
+ #define HWRM_ERR_CODE_FAIL (0x1UL)
+ #define HWRM_ERR_CODE_INVALID_PARAMS (0x2UL)
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (0x3UL)
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (0x4UL)
+ #define HWRM_ERR_CODE_INVALID_FLAGS (0x5UL)
+ #define HWRM_ERR_CODE_INVALID_ENABLES (0x6UL)
+ #define HWRM_ERR_CODE_HWRM_ERROR (0xfUL)
+ #define HWRM_ERR_CODE_UNKNOWN_ERR (0xfffeUL)
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (0xffffUL)
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_err_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 opaque_0;
+ __le16 opaque_1;
+ u8 opaque_2;
+ u8 valid;
+};
+
+/* Port Tx Statistics Formats (408 bytes) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* Port Rx Statistics Formats (528 bytes) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
+/* hwrm_ver_get */
+/* Input (24 bytes) */
+struct hwrm_ver_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 unused_0[5];
+};
+
+/* Output (128 bytes) */
+struct hwrm_ver_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 hwrm_intf_rsvd;
+ u8 hwrm_fw_maj;
+ u8 hwrm_fw_min;
+ u8 hwrm_fw_bld;
+ u8 hwrm_fw_rsvd;
+ u8 ape_fw_maj;
+ u8 ape_fw_min;
+ u8 ape_fw_bld;
+ u8 ape_fw_rsvd;
+ u8 kong_fw_maj;
+ u8 kong_fw_min;
+ u8 kong_fw_bld;
+ u8 kong_fw_rsvd;
+ u8 tang_fw_maj;
+ u8 tang_fw_min;
+ u8 tang_fw_bld;
+ u8 tang_fw_rsvd;
+ u8 bono_fw_maj;
+ u8 bono_fw_min;
+ u8 bono_fw_bld;
+ u8 bono_fw_rsvd;
+ char hwrm_fw_name[16];
+ char ape_fw_name[16];
+ char kong_fw_name[16];
+ char tang_fw_name[16];
+ char bono_fw_name[16];
+ __le16 chip_num;
+ u8 chip_rev;
+ u8 chip_metal;
+ u8 chip_bond_id;
+ u8 unused_0;
+ __le16 max_req_win_len;
+ __le16 max_resp_len;
+ __le16 def_req_timeout;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_disable */
+/* Input (24 bytes) */
+struct hwrm_func_disable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_DISABLE_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_disable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_reset */
+/* Input (24 bytes) */
+struct hwrm_func_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_getfid */
+/* Input (24 bytes) */
+struct hwrm_func_getfid_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
+ __le16 pci_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_getfid_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_func_vf_alloc */
+/* Input (24 bytes) */
+struct hwrm_func_vf_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 first_vf_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_func_vf_free */
+/* Input (24 bytes) */
+struct hwrm_func_vf_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_vf_cfg */
+/* Input (24 bytes) */
+struct hwrm_func_vf_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+ __le16 mtu;
+ __le16 guest_vlan;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_qcaps */
+/* Input (24 bytes) */
+struct hwrm_func_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 unused_0[3];
+};
+
+/* Output (80 bytes) */
+struct hwrm_func_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le32 flags;
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ u8 perm_mac_address[6];
+ __le16 max_rsscos_ctx;
+ __le16 max_cmpl_rings;
+ __le16 max_tx_rings;
+ __le16 max_rx_rings;
+ __le16 max_l2_ctxs;
+ __le16 max_vnics;
+ __le16 first_vf_id;
+ __le16 max_vfs;
+ __le16 max_stat_ctx;
+ __le32 max_encap_records;
+ __le32 max_decap_records;
+ __le32 max_tx_em_flows;
+ __le32 max_tx_wm_flows;
+ __le32 max_rx_em_flows;
+ __le32 max_rx_wm_flows;
+ __le32 max_mcast_filters;
+ __le32 max_flow_id;
+ __le32 max_hw_ring_grps;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_func_cfg */
+/* Input (88 bytes) */
+struct hwrm_func_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 flags;
+ #define FUNC_CFG_REQ_FLAGS_PROM_MODE 0x1UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK 0x2UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_IP_ADDR_CHECK 0x4UL
+ #define FUNC_CFG_REQ_FLAGS_VLAN_PRI_MATCH 0x8UL
+ #define FUNC_CFG_REQ_FLAGS_DFLT_PRI_NOMATCH 0x10UL
+ #define FUNC_CFG_REQ_FLAGS_DISABLE_PAUSE 0x20UL
+ #define FUNC_CFG_REQ_FLAGS_DISABLE_STP 0x40UL
+ #define FUNC_CFG_REQ_FLAGS_DISABLE_LLDP 0x80UL
+ #define FUNC_CFG_REQ_FLAGS_DISABLE_PTPV2 0x100UL
+ __le32 enables;
+ #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ __le16 mtu;
+ __le16 mru;
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 dflt_mac_addr[6];
+ __le16 dflt_vlan;
+ __be32 dflt_ip_addr[4];
+ __le32 min_bw;
+ __le32 max_bw;
+ __le16 async_event_cr;
+ u8 vlan_antispoof_mode;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK (0x0UL << 0)
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN (0x1UL << 0)
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE (0x2UL << 0)
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+ u8 allowed_vlan_pris;
+ #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_NOCHECK (0x0UL << 0)
+ #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_VALIDATE_VLAN (0x1UL << 0)
+ #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_IF_VLANDNE (0x2UL << 0)
+ #define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+ u8 evb_mode;
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB (0x0UL << 0)
+ #define FUNC_CFG_REQ_EVB_MODE_VEB (0x1UL << 0)
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA (0x2UL << 0)
+ u8 unused_2;
+ __le16 num_mcast_filters;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_qstats */
+/* Input (24 bytes) */
+struct hwrm_func_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 unused_0[3];
+};
+
+/* Output (176 bytes) */
+struct hwrm_func_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_err_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_err_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_func_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_vf_resc_free */
+/* Input (24 bytes) */
+struct hwrm_func_vf_resc_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_resc_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_vf_vnic_ids_query */
+/* Input (32 bytes) */
+struct hwrm_func_vf_vnic_ids_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 max_vnic_id_cnt;
+ __le64 vnic_id_tbl_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_vnic_ids_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id_cnt;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_func_drv_rgtr */
+/* Input (80 bytes) */
+struct hwrm_func_drv_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ __le32 enables;
+ #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
+ __le16 os_type;
+ u8 ver_maj;
+ u8 ver_min;
+ u8 ver_upd;
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 timestamp;
+ __le32 unused_2;
+ __le32 vf_req_fwd[8];
+ __le32 async_event_fwd[8];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr */
+/* Input (24 bytes) */
+struct hwrm_func_drv_unrgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_unrgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_buf_rgtr */
+/* Input (128 bytes) */
+struct hwrm_func_buf_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
+ #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
+ __le16 vf_id;
+ __le16 req_buf_num_pages;
+ __le16 req_buf_page_size;
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B (0x4UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x16UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x17UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0)
+ __le16 req_buf_len;
+ __le16 resp_buf_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 req_buf_page_addr0;
+ __le64 req_buf_page_addr1;
+ __le64 req_buf_page_addr2;
+ __le64 req_buf_page_addr3;
+ __le64 req_buf_page_addr4;
+ __le64 req_buf_page_addr5;
+ __le64 req_buf_page_addr6;
+ __le64 req_buf_page_addr7;
+ __le64 req_buf_page_addr8;
+ __le64 req_buf_page_addr9;
+ __le64 error_buf_addr;
+ __le64 resp_buf_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_buf_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_drv_qver */
+/* Input (24 bytes) */
+struct hwrm_func_drv_qver_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_DRV_QVER_REQ_ENABLES_OS_TYPE_VALID 0x1UL
+ #define FUNC_DRV_QVER_REQ_ENABLES_VER_VALID 0x2UL
+ __le16 fid;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_qver_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 os_type;
+ u8 ver_maj;
+ u8 ver_min;
+ u8 ver_upd;
+ u8 unused_0;
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg */
+/* Input (48 bytes) */
+struct hwrm_port_phy_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ __le32 enables;
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ __le16 port_id;
+ __le16 force_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
+ u8 auto_mode;
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE (0x0UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED (0x2UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_MASK (0x4UL << 0)
+ u8 auto_duplex;
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF (0x0UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH (0x2UL << 0)
+ u8 auto_pause;
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+ u8 unused_0;
+ __le16 auto_link_speed;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ u8 wirespeed;
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF (0x0UL << 0)
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON (0x1UL << 0)
+ u8 lpbk;
+ #define PORT_PHY_CFG_REQ_LPBK_NONE (0x0UL << 0)
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE (0x2UL << 0)
+ u8 force_pause;
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
+ u8 unused_1;
+ __le32 preemphasis;
+ __le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_phy_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcfg */
+/* Input (24 bytes) */
+struct hwrm_port_phy_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (48 bytes) */
+struct hwrm_port_phy_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_LINK (0x2UL << 0)
+ u8 unused_0;
+ __le16 link_speed;
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB (0x1f4UL << 0)
+ u8 duplex;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_HALF (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_DUPLEX_FULL (0x1UL << 0)
+ u8 pause;
+ #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
+ __le16 support_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+ __le16 force_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
+ u8 auto_mode;
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_MASK (0x4UL << 0)
+ u8 auto_pause;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB (0xaUL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB (0x14UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB (0x19UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB (0x64UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB (0xc8UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB (0x190UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ u8 wirespeed;
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON (0x1UL << 0)
+ u8 lpbk;
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE (0x2UL << 0)
+ u8 force_pause;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
+ u8 duplex_setting;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_HALF (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_FULL (0x1UL << 0)
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR4 (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR4 (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR4 (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 (0x5UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX4 (0x6UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR (0x7UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET (0x8UL << 0)
+ u8 media_type;
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE (0x3UL << 0)
+ u8 transceiver_type;
+ #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_EXTERNAL (0x2UL << 0)
+ u8 phy_addr;
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
+ u8 unused_2;
+ __le16 link_partner_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+ u8 link_partner_adv_auto_mode;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_MASK (0x4UL << 0)
+ u8 link_partner_adv_pause;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
+ u8 unused_3;
+ u8 unused_4;
+ u8 unused_5;
+ u8 valid;
+};
+
+/* hwrm_port_mac_cfg */
+/* Input (32 bytes) */
+struct hwrm_port_mac_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
+ #define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ __le32 enables;
+ #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
+ #define PORT_MAC_CFG_REQ_ENABLES_IVLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI 0x8UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
+ #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ __le16 port_id;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE (0x0UL << 0)
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL (0x1UL << 0)
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE (0x2UL << 0)
+ u8 ivlan_pri2cos_map_pri;
+ u8 lcos_map_pri;
+ u8 tunnel_pri2cos_map_pri;
+ u8 dscp2pri_map_pri;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_mac_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ __le16 mtu;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_RESP_LPBK_NONE (0x0UL << 0)
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL (0x1UL << 0)
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE (0x2UL << 0)
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_port_enable */
+/* Input (24 bytes) */
+struct hwrm_port_enable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_ENABLE_REQ_FLAGS_FORWARD_TRAFFIC 0x1UL
+ __le16 port_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_enable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_qstats */
+/* Input (40 bytes) */
+struct hwrm_port_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2[3];
+ u8 unused_3;
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats */
+/* Input (16 bytes) */
+struct hwrm_port_lpbk_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (64 bytes) */
+struct hwrm_port_lpbk_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 lpbk_ucast_frames;
+ __le64 lpbk_mcast_frames;
+ __le64 lpbk_bcast_frames;
+ __le64 lpbk_ucast_bytes;
+ __le64 lpbk_mcast_bytes;
+ __le64 lpbk_bcast_bytes;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_port_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats */
+/* Input (16 bytes) */
+struct hwrm_port_lpbk_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_lpbk_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_blink_led */
+/* Input (24 bytes) */
+struct hwrm_port_blink_led_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 num_blinks;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_blink_led_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_qportcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_qportcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ __le16 port_id;
+ __le16 unused_0;
+};
+
+/* Output (32 bytes) */
+struct hwrm_queue_qportcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 max_configurable_queues;
+ u8 max_configurable_lossless_queues;
+ u8 queue_cfg_allowed;
+ u8 queue_buffers_cfg_allowed;
+ u8 queue_pfcenable_cfg_allowed;
+ u8 queue_pri2cos_cfg_allowed;
+ u8 queue_cos2bw_cfg_allowed;
+ u8 queue_id0;
+ u8 queue_id0_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id1;
+ u8 queue_id1_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id2;
+ u8 queue_id2_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id3;
+ u8 queue_id3_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id4;
+ u8 queue_id4_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id5;
+ u8 queue_id5_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id6;
+ u8 queue_id6_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 queue_id7;
+ u8 queue_id7_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 valid;
+};
+
+/* hwrm_queue_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_CFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ __le32 enables;
+ #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
+ #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
+ __le32 queue_id;
+ __le32 dflt_len;
+ u8 service_profile;
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_buffers_cfg */
+/* Input (56 bytes) */
+struct hwrm_queue_buffers_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ __le32 enables;
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_GROUP 0x4UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF 0x8UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON 0x10UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL 0x20UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL 0x40UL
+ #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX 0x80UL
+ __le32 queue_id;
+ __le32 reserved;
+ __le32 shared;
+ __le32 xoff;
+ __le32 xon;
+ __le32 full;
+ __le32 notfull;
+ __le32 max;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_buffers_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg */
+/* Input (24 bytes) */
+struct hwrm_queue_pfcenable_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI7_PFC_ENABLED 0x80UL
+ __le16 port_id;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pfcenable_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_pri2cos_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL
+ __le32 enables;
+ u8 port_id;
+ u8 pri0_cos;
+ u8 pri1_cos;
+ u8 pri2_cos;
+ u8 pri3_cos;
+ u8 pri4_cos;
+ u8 pri5_cos;
+ u8 pri6_cos;
+ u8 pri7_cos;
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pri2cos_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg */
+/* Input (128 bytes) */
+struct hwrm_queue_cos2bw_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
+ __le16 port_id;
+ u8 queue_id0;
+ u8 unused_0;
+ __le32 queue_id0_min_bw;
+ __le32 queue_id0_max_bw;
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ u8 queue_id1;
+ __le32 queue_id1_min_bw;
+ __le32 queue_id1_max_bw;
+ u8 queue_id1_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id1_pri_lvl;
+ u8 queue_id1_bw_weight;
+ u8 queue_id2;
+ __le32 queue_id2_min_bw;
+ __le32 queue_id2_max_bw;
+ u8 queue_id2_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id2_pri_lvl;
+ u8 queue_id2_bw_weight;
+ u8 queue_id3;
+ __le32 queue_id3_min_bw;
+ __le32 queue_id3_max_bw;
+ u8 queue_id3_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id3_pri_lvl;
+ u8 queue_id3_bw_weight;
+ u8 queue_id4;
+ __le32 queue_id4_min_bw;
+ __le32 queue_id4_max_bw;
+ u8 queue_id4_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id4_pri_lvl;
+ u8 queue_id4_bw_weight;
+ u8 queue_id5;
+ __le32 queue_id5_min_bw;
+ __le32 queue_id5_max_bw;
+ u8 queue_id5_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id5_pri_lvl;
+ u8 queue_id5_bw_weight;
+ u8 queue_id6;
+ __le32 queue_id6_min_bw;
+ __le32 queue_id6_max_bw;
+ u8 queue_id6_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id6_pri_lvl;
+ u8 queue_id6_bw_weight;
+ u8 queue_id7;
+ __le32 queue_id7_min_bw;
+ __le32 queue_id7_max_bw;
+ u8 queue_id7_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ u8 queue_id7_pri_lvl;
+ u8 queue_id7_bw_weight;
+ u8 unused_1[5];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_cos2bw_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_alloc */
+/* Input (24 bytes) */
+struct hwrm_vnic_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_vnic_free */
+/* Input (24 bytes) */
+struct hwrm_vnic_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
+ __le32 enables;
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ __le16 vnic_id;
+ __le16 dflt_ring_grp;
+ __le16 rss_rule;
+ __le16 cos_rule;
+ __le16 lb_rule;
+ __le16 mru;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_tpa_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le32 enables;
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
+ __le16 vnic_id;
+ __le16 max_agg_segs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 (0x0UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 (0x1UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 (0x2UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 (0x3UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX (0x1fUL << 0)
+ __le16 max_aggs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 (0x0UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 (0x1UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 (0x2UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 (0x3UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 (0x4UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX (0x7UL << 0)
+ u8 unused_0;
+ u8 unused_1;
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_tpa_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg */
+/* Input (48 bytes) */
+struct hwrm_vnic_rss_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 hash_type;
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ __le32 unused_0;
+ __le64 ring_grp_tbl_addr;
+ __le64 hash_key_tbl_addr;
+ __le16 rss_ctx_idx;
+ __le16 unused_1[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_plcmodes_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ __le32 enables;
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ __le32 vnic_id;
+ __le16 jumbo_thresh;
+ __le16 hds_offset;
+ __le16 hds_threshold;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_plcmodes_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc */
+/* Input (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free */
+/* Input (24 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_cos_lb_ctx_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_alloc */
+/* Input (80 bytes) */
+struct hwrm_ring_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define RING_ALLOC_REQ_ENABLES_ARB_GRP_ID_VALID 0x1UL
+ #define RING_ALLOC_REQ_ENABLES_INPUT_NUM_VALID 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_WEIGHT_VALID 0x4UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MIN_BW_VALID 0x10UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ u8 ring_type;
+ #define RING_ALLOC_REQ_RING_TYPE_CMPL (0x0UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_TX (0x1UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_STATUS (0x3UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_CMD (0x4UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ __le64 page_tbl_addr;
+ __le32 fbo;
+ u8 page_size;
+ u8 page_tbl_depth;
+ u8 unused_2;
+ u8 unused_3;
+ __le32 length;
+ __le16 logical_id;
+ __le16 cmpl_ring_id;
+ __le16 queue_id;
+ u8 unused_4;
+ u8 unused_5;
+ __le32 arb_grp_id;
+ __le16 input_number;
+ u8 unused_6;
+ u8 unused_7;
+ __le32 weight;
+ __le32 stat_ctx_id;
+ __le32 min_bw;
+ __le32 max_bw;
+ u8 int_mode;
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY (0x0UL << 0)
+ #define RING_ALLOC_REQ_INT_MODE_MSI (0x1UL << 0)
+ #define RING_ALLOC_REQ_INT_MODE_MSIX (0x2UL << 0)
+ #define RING_ALLOC_REQ_INT_MODE_POLL (0x3UL << 0)
+ u8 unused_8[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ring_id;
+ __le16 logical_ring_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_ring_free */
+/* Input (24 bytes) */
+struct hwrm_ring_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_FREE_REQ_RING_TYPE_CMPL (0x0UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_TX (0x1UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_STATUS (0x3UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_CMD (0x4UL << 0)
+ u8 unused_0;
+ __le16 ring_id;
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params */
+/* Input (24 bytes) */
+struct hwrm_ring_cmpl_ring_qaggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 unused_0[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_ring_cmpl_ring_qaggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params */
+/* Input (40 bytes) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_reset */
+/* Input (24 bytes) */
+struct hwrm_ring_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_RESET_REQ_RING_TYPE_CMPL (0x0UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_TX (0x1UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_STATUS (0x3UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_CMD (0x4UL << 0)
+ u8 unused_0;
+ __le16 ring_id;
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_ring_grp_alloc */
+/* Input (24 bytes) */
+struct hwrm_ring_grp_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 cr;
+ __le16 rr;
+ __le16 ar;
+ __le16 sc;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 ring_group_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_ring_grp_free */
+/* Input (24 bytes) */
+struct hwrm_ring_grp_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 ring_group_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_arb_grp_alloc */
+/* Input (24 bytes) */
+struct hwrm_arb_grp_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 input_number;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_arb_grp_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 arb_grp_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_arb_grp_cfg */
+/* Input (32 bytes) */
+struct hwrm_arb_grp_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 arb_grp_id;
+ __le16 input_number;
+ __le16 tx_ring;
+ __le32 weight;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_arb_grp_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_alloc */
+/* Input (96 bytes) */
+struct hwrm_cfa_l2_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ __le32 enables;
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x8000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ u8 l2_addr[6];
+ u8 unused_0;
+ u8 unused_1;
+ u8 l2_addr_mask[6];
+ __le16 l2_ovlan;
+ __le16 l2_ovlan_mask;
+ __le16 l2_ivlan;
+ __le16 l2_ivlan_mask;
+ u8 unused_2;
+ u8 unused_3;
+ u8 t_l2_addr[6];
+ u8 unused_4;
+ u8 unused_5;
+ u8 t_l2_addr_mask[6];
+ __le16 t_l2_ovlan;
+ __le16 t_l2_ovlan_mask;
+ __le16 t_l2_ivlan;
+ __le16 t_l2_ivlan_mask;
+ u8 src_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT (0x0UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF (0x2UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC (0x3UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG (0x4UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE (0x5UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO (0x6UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG (0x7UL << 0)
+ u8 unused_6;
+ __le32 src_id;
+ u8 tunnel_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_7;
+ __le16 dst_vnic_id;
+ __le16 mirror_vnic_id;
+ u8 pri_hint;
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER (0x2UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX (0x3UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN (0x4UL << 0)
+ u8 unused_8;
+ __le32 unused_9;
+ __le64 l2_filter_id_hint;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_l2_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 l2_filter_id;
+ __le32 flow_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_l2_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 l2_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg */
+/* Input (40 bytes) */
+struct hwrm_cfa_l2_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ __le32 enables;
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_VNIC_ID_VALID 0x1UL
+ __le64 l2_filter_id;
+ __le32 dst_vnic_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask */
+/* Input (40 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 dflt_vnic_id;
+ __le32 mask;
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST 0x1UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+ __le64 mc_tbl_addr;
+ __le32 num_mc_entries;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_bcastmcast_mirroring */
+/* Input (32 bytes) */
+struct hwrm_cfa_l2_set_bcastmcast_mirroring_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 dflt_vnic_id;
+ __le32 mirroring_flags;
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_MIRRORING 0x1UL
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_MIRRORING 0x2UL
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_SRC_KNOCKOUT 0x4UL
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_SRC_KNOCKOUT 0x8UL
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_VLAN_ID_VALID 0x10UL
+ __le16 vlan_id;
+ u8 bcast_domain;
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_PFONLY (0x0UL << 0)
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFS (0x1UL << 0)
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFSVFS (0x2UL << 0)
+ u8 mcast_domain;
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_PFONLY (0x0UL << 0)
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFS (0x1UL << 0)
+ #define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFSVFS (0x2UL << 0)
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_set_bcastmcast_mirroring_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_alloc */
+/* Input (88 bytes) */
+struct hwrm_cfa_tunnel_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ __le32 enables;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+ __le64 l2_filter_id;
+ u8 l2_addr[6];
+ __le16 l2_ivlan;
+ __le32 l3_addr[4];
+ __le32 t_l3_addr[4];
+ u8 l3_addr_type;
+ u8 t_l3_addr_type;
+ u8 tunnel_type;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_0;
+ __le32 vni;
+ __le32 dst_vnic_id;
+ __le32 mirror_vnic_id;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_tunnel_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tunnel_filter_id;
+ __le32 flow_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_tunnel_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 tunnel_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_tunnel_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_encap_record_alloc */
+/* Input (32 bytes) */
+struct hwrm_cfa_encap_record_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ u8 encap_type;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN (0x1UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE (0x2UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE (0x3UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP (0x4UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE (0x5UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS (0x6UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN (0x7UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE (0x8UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 encap_data[16];
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_encap_record_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 encap_record_id;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_encap_record_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_encap_record_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 encap_record_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_encap_record_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc */
+/* Input (128 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x10000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+ __le64 l2_filter_id;
+ u8 src_macaddr[6];
+ __be16 ethertype;
+ u8 ipaddr_type;
+ u8 ip_protocol;
+ __le16 dst_vnic_id;
+ __le16 mirror_vnic_id;
+ u8 tunnel_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 pri_hint;
+ __be32 src_ipaddr[4];
+ __be32 src_ipaddr_mask[4];
+ __be32 dst_ipaddr[4];
+ __be32 dst_ipaddr_mask[4];
+ __be16 src_port;
+ __be16 src_port_mask;
+ __be16 dst_port;
+ __be16 dst_port_mask;
+ __le64 ntuple_filter_id_hint;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ntuple_filter_id;
+ __le32 flow_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_ntuple_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 ntuple_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg */
+/* Input (40 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_VNIC_ID_VALID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID_VALID 0x2UL
+ __le32 unused_0;
+ __le64 ntuple_filter_id;
+ __le32 new_dst_vnic_id;
+ __le32 new_mirror_vnic_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_query */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ __be16 tunnel_dst_port_val;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_0;
+ __be16 tunnel_dst_port_val;
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_free */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ u8 unused_0;
+ __le16 tunnel_dst_port_id;
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_alloc */
+/* Input (32 bytes) */
+struct hwrm_stat_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 stats_dma_addr;
+ __le32 update_period_ms;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_free */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_query */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ __le32 unused_0;
+};
+
+/* Output (176 bytes) */
+struct hwrm_stat_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_err_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_err_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_mgmt_l2_filter_alloc */
+/* Input (56 bytes) */
+struct hwrm_mgmt_l2_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
+ #define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ __le32 enables;
+ #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDRESS 0x1UL
+ #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_OVLAN 0x2UL
+ #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_IVLAN 0x4UL
+ #define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_ACTION_ID 0x8UL
+ u8 l2_address[6];
+ u8 unused_0;
+ u8 unused_1;
+ u8 l2_address_mask[6];
+ __le16 ovlan;
+ __le16 ovlan_mask;
+ __le16 ivlan;
+ __le16 ivlan_mask;
+ u8 unused_2;
+ u8 unused_3;
+ __le32 action_id;
+ u8 action_bypass;
+ #define MGMT_L2_FILTER_ALLOC_REQ_ACTION_BYPASS 0x1UL
+ u8 unused_5[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_mgmt_l2_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mgmt_l2_filter_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_mgmt_l2_filter_free */
+/* Input (24 bytes) */
+struct hwrm_mgmt_l2_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 mgmt_l2_filter_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_mgmt_l2_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_raw_write_blk */
+/* Input (32 bytes) */
+struct hwrm_nvm_raw_write_blk_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le32 dest_addr;
+ __le32 len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_raw_write_blk_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_read */
+/* Input (40 bytes) */
+struct hwrm_nvm_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 offset;
+ __le32 len;
+ __le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_raw_dump */
+/* Input (32 bytes) */
+struct hwrm_nvm_raw_dump_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 offset;
+ __le32 len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_raw_dump_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries */
+/* Input (24 bytes) */
+struct hwrm_nvm_get_dir_entries_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_get_dir_entries_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_info */
+/* Input (16 bytes) */
+struct hwrm_nvm_get_dir_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (24 bytes) */
+struct hwrm_nvm_get_dir_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 entries;
+ __le32 entry_length;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_write */
+/* Input (40 bytes) */
+struct hwrm_nvm_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 dir_data_length;
+ __le16 option;
+ __le16 flags;
+ #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_modify */
+/* Input (40 bytes) */
+struct hwrm_nvm_modify_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 offset;
+ __le32 len;
+ __le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_modify_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry */
+/* Input (32 bytes) */
+struct hwrm_nvm_find_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
+ __le16 dir_idx;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 opt_ordinal;
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ (0x0UL << 0)
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE (0x1UL << 0)
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT (0x2UL << 0)
+ u8 unused_1[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_nvm_find_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le32 dir_data_length;
+ __le32 fw_ver;
+ __le16 dir_ordinal;
+ __le16 dir_idx;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry */
+/* Input (24 bytes) */
+struct hwrm_nvm_erase_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_idx;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_erase_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dev_info */
+/* Input (16 bytes) */
+struct hwrm_nvm_get_dev_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (32 bytes) */
+struct hwrm_nvm_get_dev_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 manufacturer_id;
+ __le16 device_id;
+ __le32 sector_size;
+ __le32 nvram_size;
+ __le32 reserved_size;
+ __le32 available_size;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry */
+/* Input (32 bytes) */
+struct hwrm_nvm_mod_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
+ __le16 dir_idx;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 checksum;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_mod_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_verify_update */
+/* Input (24 bytes) */
+struct hwrm_nvm_verify_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_verify_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_exec_fwd_resp */
+/* Input (120 bytes) */
+struct hwrm_exec_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[24];
+ __le16 encap_resp_target_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_exec_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_reject_fwd_resp */
+/* Input (120 bytes) */
+struct hwrm_reject_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[24];
+ __le16 encap_resp_target_id;
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_reject_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_fwd_resp */
+/* Input (40 bytes) */
+struct hwrm_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_resp_target_id;
+ __le16 encap_resp_cmpl_ring;
+ __le16 encap_resp_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 encap_resp_addr;
+ __le32 encap_resp[24];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl */
+/* Input (32 bytes) */
+struct hwrm_fwd_async_event_cmpl_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_async_event_target_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2[3];
+ u8 unused_3;
+ __le32 encap_async_event_cmpl[4];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fwd_async_event_cmpl_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_fw_reset */
+/* Input (24 bytes) */
+struct hwrm_fw_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0)
+ u8 selfrst_status;
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ __le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_fw_qstatus */
+/* Input (24 bytes) */
+struct hwrm_fw_qstatus_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIMP (0x0UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_APE (0x1UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_KONG (0x2UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BONO (0x3UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_TANG (0x4UL << 0)
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_qstatus_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_temp_monitor_query */
+/* Input (16 bytes) */
+struct hwrm_temp_monitor_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_temp_monitor_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 temp;
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
new file mode 100644
index 000000000000..3cf3e1b70b64
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -0,0 +1,59 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _BNXT_NVM_DEFS_H_
+#define _BNXT_NVM_DEFS_H_
+
+enum bnxt_nvm_directory_type {
+ BNX_DIR_TYPE_UNUSED = 0,
+ BNX_DIR_TYPE_PKG_LOG = 1,
+ BNX_DIR_TYPE_CHIMP_PATCH = 3,
+ BNX_DIR_TYPE_BOOTCODE = 4,
+ BNX_DIR_TYPE_VPD = 5,
+ BNX_DIR_TYPE_EXP_ROM_MBA = 6,
+ BNX_DIR_TYPE_AVS = 7,
+ BNX_DIR_TYPE_PCIE = 8,
+ BNX_DIR_TYPE_PORT_MACRO = 9,
+ BNX_DIR_TYPE_APE_FW = 10,
+ BNX_DIR_TYPE_APE_PATCH = 11,
+ BNX_DIR_TYPE_KONG_FW = 12,
+ BNX_DIR_TYPE_KONG_PATCH = 13,
+ BNX_DIR_TYPE_BONO_FW = 14,
+ BNX_DIR_TYPE_BONO_PATCH = 15,
+ BNX_DIR_TYPE_TANG_FW = 16,
+ BNX_DIR_TYPE_TANG_PATCH = 17,
+ BNX_DIR_TYPE_BOOTCODE_2 = 18,
+ BNX_DIR_TYPE_CCM = 19,
+ BNX_DIR_TYPE_PCI_CFG = 20,
+ BNX_DIR_TYPE_TSCF_UCODE = 21,
+ BNX_DIR_TYPE_ISCSI_BOOT = 22,
+ BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24,
+ BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25,
+ BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26,
+ BNX_DIR_TYPE_EXT_PHY = 27,
+ BNX_DIR_TYPE_SHARED_CFG = 40,
+ BNX_DIR_TYPE_PORT_CFG = 41,
+ BNX_DIR_TYPE_FUNC_CFG = 42,
+ BNX_DIR_TYPE_MGMT_CFG = 48,
+ BNX_DIR_TYPE_MGMT_DATA = 49,
+ BNX_DIR_TYPE_MGMT_WEB_DATA = 50,
+ BNX_DIR_TYPE_MGMT_WEB_META = 51,
+ BNX_DIR_TYPE_MGMT_EVENT_LOG = 52,
+ BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53
+};
+
+#define BNX_DIR_ORDINAL_FIRST 0
+
+#define BNX_DIR_EXT_INACTIVE (1 << 0)
+#define BNX_DIR_EXT_UPDATE (1 << 1)
+
+#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0)
+#define BNX_DIR_ATTR_PROP_STREAM (1 << 1)
+
+#endif /* Don't add anything after this line */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
new file mode 100644
index 000000000000..60989e7e266a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -0,0 +1,816 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_sriov.h"
+#include "bnxt_ethtool.h"
+
+#ifdef CONFIG_BNXT_SRIOV
+static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
+{
+ if (bp->state != BNXT_STATE_OPEN) {
+ netdev_err(bp->dev, "vf ndo called though PF is down\n");
+ return -EINVAL;
+ }
+ if (!bp->pf.active_vfs) {
+ netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
+ return -EINVAL;
+ }
+ if (vf_id >= bp->pf.max_vfs) {
+ netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ bool old_setting = false;
+ u32 func_flags;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ vf = &bp->pf.vf[vf_id];
+ if (vf->flags & BNXT_VF_SPOOFCHK)
+ old_setting = true;
+ if (old_setting == setting)
+ return 0;
+
+ func_flags = vf->func_flags;
+ if (setting)
+ func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+ else
+ func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+ /*TODO: if the driver supports VLAN filter on guest VLAN,
+ * the spoof check should also include vlan anti-spoofing
+ */
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(func_flags);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ vf->func_flags = func_flags;
+ if (setting)
+ vf->flags |= BNXT_VF_SPOOFCHK;
+ else
+ vf->flags &= ~BNXT_VF_SPOOFCHK;
+ }
+ return rc;
+}
+
+int bnxt_get_vf_config(struct net_device *dev, int vf_id,
+ struct ifla_vf_info *ivi)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ ivi->vf = vf_id;
+ vf = &bp->pf.vf[vf_id];
+
+ memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+ ivi->max_tx_rate = vf->max_tx_rate;
+ ivi->min_tx_rate = vf->min_tx_rate;
+ ivi->vlan = vf->vlan;
+ ivi->qos = vf->flags & BNXT_VF_QOS;
+ ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK;
+ if (!(vf->flags & BNXT_VF_LINK_FORCED))
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->flags & BNXT_VF_LINK_UP)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+
+ return 0;
+}
+
+int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+ /* reject bc or mc mac addr, zero mac addr means allow
+ * VF to use its own mac addr
+ */
+ if (is_multicast_ether_addr(mac)) {
+ netdev_err(dev, "Invalid VF ethernet address\n");
+ return -EINVAL;
+ }
+ vf = &bp->pf.vf[vf_id];
+
+ memcpy(vf->mac_addr, mac, ETH_ALEN);
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(vf->func_flags);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ u16 vlan_tag;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ /* TODO: needed to implement proper handling of user priority,
+ * currently fail the command if there is valid priority
+ */
+ if (vlan_id > 4095 || qos)
+ return -EINVAL;
+
+ vf = &bp->pf.vf[vf_id];
+ vlan_tag = vlan_id;
+ if (vlan_tag == vf->vlan)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(vf->func_flags);
+ req.dflt_vlan = cpu_to_le16(vlan_tag);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ vf->vlan = vlan_tag;
+ return rc;
+}
+
+int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ u32 pf_link_speed;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ vf = &bp->pf.vf[vf_id];
+ pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+ if (max_tx_rate > pf_link_speed) {
+ netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
+ max_tx_rate, vf_id);
+ return -EINVAL;
+ }
+
+ if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
+ netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
+ min_tx_rate, vf_id);
+ return -EINVAL;
+ }
+ if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
+ return 0;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.vf_id = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(vf->func_flags);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
+ req.max_bw = cpu_to_le32(max_tx_rate);
+ req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
+ req.min_bw = cpu_to_le32(min_tx_rate);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ vf->min_tx_rate = min_tx_rate;
+ vf->max_tx_rate = max_tx_rate;
+ }
+ return rc;
+}
+
+int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+ int rc;
+
+ rc = bnxt_vf_ndo_prep(bp, vf_id);
+ if (rc)
+ return rc;
+
+ vf = &bp->pf.vf[vf_id];
+
+ vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
+ switch (link) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ vf->flags |= BNXT_VF_LINK_UP;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ vf->flags |= BNXT_VF_LINK_FORCED;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
+ break;
+ default:
+ netdev_err(bp->dev, "Invalid link option\n");
+ rc = -EINVAL;
+ break;
+ }
+ /* CHIMP TODO: send msg to VF to update new link state */
+
+ return rc;
+}
+
+static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
+{
+ int i;
+ struct bnxt_vf_info *vf;
+
+ for (i = 0; i < num_vfs; i++) {
+ vf = &bp->pf.vf[i];
+ memset(vf, 0, sizeof(*vf));
+ vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP;
+ }
+ return 0;
+}
+
+static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp)
+{
+ int i, rc = 0;
+ struct bnxt_pf_info *pf = &bp->pf;
+ struct hwrm_func_vf_resc_free_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = pf->first_vf_id; i < pf->first_vf_id + pf->active_vfs; i++) {
+ req.vf_id = cpu_to_le16(i);
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static void bnxt_free_vf_resources(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+ int i;
+
+ kfree(bp->pf.vf_event_bmap);
+ bp->pf.vf_event_bmap = NULL;
+
+ for (i = 0; i < 4; i++) {
+ if (bp->pf.hwrm_cmd_req_addr[i]) {
+ dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
+ bp->pf.hwrm_cmd_req_addr[i],
+ bp->pf.hwrm_cmd_req_dma_addr[i]);
+ bp->pf.hwrm_cmd_req_addr[i] = NULL;
+ }
+ }
+
+ kfree(bp->pf.vf);
+ bp->pf.vf = NULL;
+}
+
+static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
+{
+ struct pci_dev *pdev = bp->pdev;
+ u32 nr_pages, size, i, j, k = 0;
+
+ bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
+ if (!bp->pf.vf)
+ return -ENOMEM;
+
+ bnxt_set_vf_attr(bp, num_vfs);
+
+ size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
+ nr_pages = size / BNXT_PAGE_SIZE;
+ if (size & (BNXT_PAGE_SIZE - 1))
+ nr_pages++;
+
+ for (i = 0; i < nr_pages; i++) {
+ bp->pf.hwrm_cmd_req_addr[i] =
+ dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
+ &bp->pf.hwrm_cmd_req_dma_addr[i],
+ GFP_KERNEL);
+
+ if (!bp->pf.hwrm_cmd_req_addr[i])
+ return -ENOMEM;
+
+ for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
+ struct bnxt_vf_info *vf = &bp->pf.vf[k];
+
+ vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
+ j * BNXT_HWRM_REQ_MAX_SIZE;
+ vf->hwrm_cmd_req_dma_addr =
+ bp->pf.hwrm_cmd_req_dma_addr[i] + j *
+ BNXT_HWRM_REQ_MAX_SIZE;
+ k++;
+ }
+ }
+
+ /* Max 128 VF's */
+ bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
+ if (!bp->pf.vf_event_bmap)
+ return -ENOMEM;
+
+ bp->pf.hwrm_cmd_req_pages = nr_pages;
+ return 0;
+}
+
+static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
+{
+ struct hwrm_func_buf_rgtr_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
+
+ req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
+ req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
+ req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
+ req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
+ req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
+ req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
+ req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
+
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+/* only call by PF to reserve resources for VF */
+static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
+{
+ u32 rc = 0, mtu, i;
+ u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+
+ /* Remaining rings are distributed equally amongs VF's for now */
+ /* TODO: the following workaroud is needed to restrict total number
+ * of vf_cp_rings not exceed number of HW ring groups. This WA should
+ * be removed once new HWRM provides HW ring groups capability in
+ * hwrm_func_qcap.
+ */
+ vf_cp_rings = min_t(u16, bp->pf.max_cp_rings, bp->pf.max_stat_ctxs);
+ vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / *num_vfs;
+ /* TODO: restore this logic below once the WA above is removed */
+ /* vf_cp_rings = (bp->pf.max_cp_rings - bp->cp_nr_rings) / *num_vfs; */
+ vf_stat_ctx = (bp->pf.max_stat_ctxs - bp->num_stat_ctxs) / *num_vfs;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings * 2) /
+ *num_vfs;
+ else
+ vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings) /
+ *num_vfs;
+ vf_tx_rings = (bp->pf.max_tx_rings - bp->tx_nr_rings) / *num_vfs;
+
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
+ FUNC_CFG_REQ_ENABLES_MRU |
+ FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
+ FUNC_CFG_REQ_ENABLES_NUM_VNICS);
+
+ mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ req.mru = cpu_to_le16(mtu);
+ req.mtu = cpu_to_le16(mtu);
+
+ req.num_rsscos_ctxs = cpu_to_le16(1);
+ req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
+ req.num_tx_rings = cpu_to_le16(vf_tx_rings);
+ req.num_rx_rings = cpu_to_le16(vf_rx_rings);
+ req.num_l2_ctxs = cpu_to_le16(4);
+ vf_vnics = 1;
+
+ req.num_vnics = cpu_to_le16(vf_vnics);
+ /* FIXME spec currently uses 1 bit for stats ctx */
+ req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ for (i = 0; i < *num_vfs; i++) {
+ req.vf_id = cpu_to_le16(pf->first_vf_id + i);
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ bp->pf.active_vfs = i + 1;
+ bp->pf.vf[i].fw_fid = le16_to_cpu(req.vf_id);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ if (!rc) {
+ bp->pf.max_pf_tx_rings = bp->tx_nr_rings;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bp->pf.max_pf_rx_rings = bp->rx_nr_rings * 2;
+ else
+ bp->pf.max_pf_rx_rings = bp->rx_nr_rings;
+ }
+ return rc;
+}
+
+static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
+{
+ int rc = 0, vfs_supported;
+ int min_rx_rings, min_tx_rings, min_rss_ctxs;
+ int tx_ok = 0, rx_ok = 0, rss_ok = 0;
+
+ /* Check if we can enable requested num of vf's. At a mininum
+ * we require 1 RX 1 TX rings for each VF. In this minimum conf
+ * features like TPA will not be available.
+ */
+ vfs_supported = *num_vfs;
+
+ while (vfs_supported) {
+ min_rx_rings = vfs_supported;
+ min_tx_rings = vfs_supported;
+ min_rss_ctxs = vfs_supported;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
+ min_rx_rings)
+ rx_ok = 1;
+ } else {
+ if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
+ min_rx_rings)
+ rx_ok = 1;
+ }
+
+ if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
+ tx_ok = 1;
+
+ if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
+ rss_ok = 1;
+
+ if (tx_ok && rx_ok && rss_ok)
+ break;
+
+ vfs_supported--;
+ }
+
+ if (!vfs_supported) {
+ netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
+ return -EINVAL;
+ }
+
+ if (vfs_supported != *num_vfs) {
+ netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
+ *num_vfs, vfs_supported);
+ *num_vfs = vfs_supported;
+ }
+
+ rc = bnxt_alloc_vf_resources(bp, *num_vfs);
+ if (rc)
+ goto err_out1;
+
+ /* Reserve resources for VFs */
+ rc = bnxt_hwrm_func_cfg(bp, num_vfs);
+ if (rc)
+ goto err_out2;
+
+ /* Register buffers for VFs */
+ rc = bnxt_hwrm_func_buf_rgtr(bp);
+ if (rc)
+ goto err_out2;
+
+ rc = pci_enable_sriov(bp->pdev, *num_vfs);
+ if (rc)
+ goto err_out2;
+
+ return 0;
+
+err_out2:
+ /* Free the resources reserved for various VF's */
+ bnxt_hwrm_func_vf_resource_free(bp);
+
+err_out1:
+ bnxt_free_vf_resources(bp);
+
+ return rc;
+}
+
+void bnxt_sriov_disable(struct bnxt *bp)
+{
+ if (!bp->pf.active_vfs)
+ return;
+
+ pci_disable_sriov(bp->pdev);
+
+ /* Free the resources reserved for various VF's */
+ bnxt_hwrm_func_vf_resource_free(bp);
+
+ bnxt_free_vf_resources(bp);
+
+ bp->pf.active_vfs = 0;
+ bp->pf.max_pf_rx_rings = bp->pf.max_rx_rings;
+ bp->pf.max_pf_tx_rings = bp->pf.max_tx_rings;
+}
+
+int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+ netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
+ return 0;
+ }
+
+ rtnl_lock();
+ if (!netif_running(dev)) {
+ netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
+ rtnl_unlock();
+ return 0;
+ }
+ bp->sriov_cfg = true;
+ rtnl_unlock();
+ if (!num_vfs) {
+ bnxt_sriov_disable(bp);
+ return 0;
+ }
+
+ /* Check if enabled VFs is same as requested */
+ if (num_vfs == bp->pf.active_vfs)
+ return 0;
+
+ bnxt_sriov_enable(bp, &num_vfs);
+
+ bp->sriov_cfg = false;
+ wake_up(&bp->sriov_cfg_wait);
+
+ return num_vfs;
+}
+
+static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+ void *encap_resp, __le64 encap_resp_addr,
+ __le16 encap_resp_cpr, u32 msg_size)
+{
+ int rc = 0;
+ struct hwrm_fwd_resp_input req = {0};
+ struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
+
+ /* Set the new target id */
+ req.target_id = cpu_to_le16(vf->fw_fid);
+ req.encap_resp_len = cpu_to_le16(msg_size);
+ req.encap_resp_addr = encap_resp_addr;
+ req.encap_resp_cmpl_ring = encap_resp_cpr;
+ memcpy(req.encap_resp, encap_resp, msg_size);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
+ goto fwd_resp_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+fwd_resp_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+ u32 msg_size)
+{
+ int rc = 0;
+ struct hwrm_reject_fwd_resp_input req = {0};
+ struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
+ /* Set the new target id */
+ req.target_id = cpu_to_le16(vf->fw_fid);
+ memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
+ goto fwd_err_resp_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+fwd_err_resp_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+ u32 msg_size)
+{
+ int rc = 0;
+ struct hwrm_exec_fwd_resp_input req = {0};
+ struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
+ /* Set the new target id */
+ req.target_id = cpu_to_le16(vf->fw_fid);
+ memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
+ goto exec_fwd_resp_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+exec_fwd_resp_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
+ struct hwrm_cfa_l2_filter_alloc_input *req =
+ (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
+
+ if (!is_valid_ether_addr(vf->mac_addr) ||
+ ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+ return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+ else
+ return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+}
+
+static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ int rc = 0;
+
+ if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
+ /* real link */
+ rc = bnxt_hwrm_exec_fwd_resp(
+ bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
+ } else {
+ struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+ struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
+
+ phy_qcfg_req =
+ (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
+ mutex_lock(&bp->hwrm_cmd_lock);
+ memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
+ sizeof(phy_qcfg_resp));
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
+
+ if (vf->flags & BNXT_VF_LINK_UP) {
+ /* if physical link is down, force link up on VF */
+ if (phy_qcfg_resp.link ==
+ PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
+ phy_qcfg_resp.link =
+ PORT_PHY_QCFG_RESP_LINK_LINK;
+ if (phy_qcfg_resp.auto_link_speed)
+ phy_qcfg_resp.link_speed =
+ phy_qcfg_resp.auto_link_speed;
+ else
+ phy_qcfg_resp.link_speed =
+ phy_qcfg_resp.force_link_speed;
+ phy_qcfg_resp.duplex =
+ PORT_PHY_QCFG_RESP_DUPLEX_FULL;
+ phy_qcfg_resp.pause =
+ (PORT_PHY_QCFG_RESP_PAUSE_TX |
+ PORT_PHY_QCFG_RESP_PAUSE_RX);
+ }
+ } else {
+ /* force link down */
+ phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
+ phy_qcfg_resp.link_speed = 0;
+ phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF;
+ phy_qcfg_resp.pause = 0;
+ }
+ rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
+ phy_qcfg_req->resp_addr,
+ phy_qcfg_req->cmpl_ring,
+ sizeof(phy_qcfg_resp));
+ }
+ return rc;
+}
+
+static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+ int rc = 0;
+ struct hwrm_cmd_req_hdr *encap_req = vf->hwrm_cmd_req_addr;
+ u32 req_type = le32_to_cpu(encap_req->cmpl_ring_req_type) & 0xffff;
+
+ switch (req_type) {
+ case HWRM_CFA_L2_FILTER_ALLOC:
+ rc = bnxt_vf_validate_set_mac(bp, vf);
+ break;
+ case HWRM_FUNC_CFG:
+ /* TODO Validate if VF is allowed to change mac address,
+ * mtu, num of rings etc
+ */
+ rc = bnxt_hwrm_exec_fwd_resp(
+ bp, vf, sizeof(struct hwrm_func_cfg_input));
+ break;
+ case HWRM_PORT_PHY_QCFG:
+ rc = bnxt_vf_set_link(bp, vf);
+ break;
+ default:
+ break;
+ }
+ return rc;
+}
+
+void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
+{
+ u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
+
+ /* Scan through VF's and process commands */
+ while (1) {
+ vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
+ if (vf_id >= active_vfs)
+ break;
+
+ clear_bit(vf_id, bp->pf.vf_event_bmap);
+ bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
+ i = vf_id + 1;
+ }
+}
+
+void bnxt_update_vf_mac(struct bnxt *bp)
+{
+ struct hwrm_func_qcaps_input req = {0};
+ struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+ goto update_vf_mac_exit;
+
+ if (!is_valid_ether_addr(resp->perm_mac_address))
+ goto update_vf_mac_exit;
+
+ if (ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
+ goto update_vf_mac_exit;
+
+ memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
+ memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+update_vf_mac_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+}
+
+#else
+
+void bnxt_sriov_disable(struct bnxt *bp)
+{
+}
+
+void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
+{
+ netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
+}
+
+void bnxt_update_vf_mac(struct bnxt *bp)
+{
+}
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
new file mode 100644
index 000000000000..c151280e3980
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -0,0 +1,23 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_SRIOV_H
+#define BNXT_SRIOV_H
+
+int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
+int bnxt_set_vf_mac(struct net_device *, int, u8 *);
+int bnxt_set_vf_vlan(struct net_device *, int, u16, u8);
+int bnxt_set_vf_bw(struct net_device *, int, int, int);
+int bnxt_set_vf_link_state(struct net_device *, int, int);
+int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
+int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
+void bnxt_sriov_disable(struct bnxt *);
+void bnxt_hwrm_exec_fwd_req(struct bnxt *);
+void bnxt_update_vf_mac(struct bnxt *);
+#endif
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 17c145fdf3ff..b69dc58faeab 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -192,6 +192,7 @@ static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = DRV_CTL_CTX_WR_CMD;
io->cid_addr = cid_addr;
io->offset = off;
@@ -206,6 +207,7 @@ static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = DRV_CTL_CTXTBL_WR_CMD;
io->offset = off;
io->dma_addr = addr;
@@ -219,6 +221,7 @@ static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
struct drv_ctl_info info;
struct drv_ctl_l2_ring *ring = &info.data.ring;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
if (start)
info.cmd = DRV_CTL_START_L2_CMD;
else
@@ -236,6 +239,7 @@ static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = DRV_CTL_IO_WR_CMD;
io->offset = off;
io->data = val;
@@ -249,13 +253,14 @@ static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = DRV_CTL_IO_RD_CMD;
io->offset = off;
ethdev->drv_ctl(dev->netdev, &info);
return io->data;
}
-static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
+static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
@@ -263,6 +268,7 @@ static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
struct fcoe_capabilities *fcoe_cap =
&info.data.register_data.fcoe_features;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
if (reg) {
info.cmd = DRV_CTL_ULP_REGISTER_CMD;
if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
@@ -272,6 +278,7 @@ static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
}
info.data.ulp_type = ulp_type;
+ info.drv_state = state;
ethdev->drv_ctl(dev->netdev, &info);
}
@@ -286,6 +293,7 @@ static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = cmd;
info.data.credit.credit_count = count;
ethdev->drv_ctl(dev->netdev, &info);
@@ -591,7 +599,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
mutex_unlock(&cnic_lock);
- cnic_ulp_ctl(dev, ulp_type, true);
+ cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
return 0;
@@ -636,7 +644,10 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
- cnic_ulp_ctl(dev, ulp_type, false);
+ if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
+ cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
+ else
+ cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
return 0;
}
@@ -4267,6 +4278,7 @@ static void cnic_delete_task(struct work_struct *work)
cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
+ memset(&info, 0, sizeof(struct drv_ctl_info));
info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
cp->ethdev->drv_ctl(dev->netdev, &info);
}
@@ -5433,6 +5445,23 @@ static void cnic_free_dev(struct cnic_dev *dev)
kfree(dev);
}
+static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
+ struct cnic_fc_npiv_tbl *npiv_tbl)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
+ int ret;
+
+ if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ return -EAGAIN; /* bnx2x is down */
+
+ if (!BNX2X_CHIP_IS_E2_PLUS(bp))
+ return -EINVAL;
+
+ ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
+ return ret;
+}
+
static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
struct pci_dev *pdev)
{
@@ -5451,6 +5480,7 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
cdev->register_device = cnic_register_device;
cdev->unregister_device = cnic_unregister_device;
cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
+ cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
cp = cdev->cnic_priv;
cp->dev = cdev;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index ef6125b0ee3e..789e5c7e9311 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -15,8 +15,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
-#define CNIC_MODULE_VERSION "2.5.21"
-#define CNIC_MODULE_RELDATE "January 29, 2015"
+#define CNIC_MODULE_VERSION "2.5.22"
+#define CNIC_MODULE_RELDATE "July 20, 2015"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -151,6 +151,11 @@ struct drv_ctl_register_data {
struct drv_ctl_info {
int cmd;
+ int drv_state;
+#define DRV_NOP 0
+#define DRV_ACTIVE 1
+#define DRV_INACTIVE 2
+#define DRV_UNLOADED 3
union {
struct drv_ctl_spq_credit credit;
struct drv_ctl_io io;
@@ -161,6 +166,15 @@ struct drv_ctl_info {
} data;
};
+#define MAX_NPIV_ENTRIES 64
+#define FC_NPIV_WWN_SIZE 8
+
+struct cnic_fc_npiv_tbl {
+ u8 wwpn[MAX_NPIV_ENTRIES][FC_NPIV_WWN_SIZE];
+ u8 wwnn[MAX_NPIV_ENTRIES][FC_NPIV_WWN_SIZE];
+ u32 count;
+};
+
struct cnic_ops {
struct module *cnic_owner;
/* Calls to these functions are protected by RCU. When
@@ -226,6 +240,8 @@ struct cnic_eth_dev {
int (*drv_submit_kwqes_16)(struct net_device *,
struct kwqe_16 *[], u32);
int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
+ int (*drv_get_fc_npiv_tbl)(struct net_device *,
+ struct cnic_fc_npiv_tbl *);
unsigned long reserved1[2];
union drv_info_to_mcp *addr_drv_info_to_mcp;
};
@@ -314,6 +330,7 @@ struct cnic_dev {
struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
char *data, u16 data_size);
+ int (*get_fc_npiv_tbl)(struct cnic_dev *, struct cnic_fc_npiv_tbl *);
unsigned long flags;
#define CNIC_F_CNIC_UP 1
#define CNIC_F_BNX2_CLASS 3
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 64c1e9db6b0b..17f017ab4dac 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -205,6 +205,23 @@ enum dma_reg {
DMA_INDEX2RING_5,
DMA_INDEX2RING_6,
DMA_INDEX2RING_7,
+ DMA_RING0_TIMEOUT,
+ DMA_RING1_TIMEOUT,
+ DMA_RING2_TIMEOUT,
+ DMA_RING3_TIMEOUT,
+ DMA_RING4_TIMEOUT,
+ DMA_RING5_TIMEOUT,
+ DMA_RING6_TIMEOUT,
+ DMA_RING7_TIMEOUT,
+ DMA_RING8_TIMEOUT,
+ DMA_RING9_TIMEOUT,
+ DMA_RING10_TIMEOUT,
+ DMA_RING11_TIMEOUT,
+ DMA_RING12_TIMEOUT,
+ DMA_RING13_TIMEOUT,
+ DMA_RING14_TIMEOUT,
+ DMA_RING15_TIMEOUT,
+ DMA_RING16_TIMEOUT,
};
static const u8 bcmgenet_dma_regs_v3plus[] = {
@@ -216,6 +233,23 @@ static const u8 bcmgenet_dma_regs_v3plus[] = {
[DMA_PRIORITY_0] = 0x30,
[DMA_PRIORITY_1] = 0x34,
[DMA_PRIORITY_2] = 0x38,
+ [DMA_RING0_TIMEOUT] = 0x2C,
+ [DMA_RING1_TIMEOUT] = 0x30,
+ [DMA_RING2_TIMEOUT] = 0x34,
+ [DMA_RING3_TIMEOUT] = 0x38,
+ [DMA_RING4_TIMEOUT] = 0x3c,
+ [DMA_RING5_TIMEOUT] = 0x40,
+ [DMA_RING6_TIMEOUT] = 0x44,
+ [DMA_RING7_TIMEOUT] = 0x48,
+ [DMA_RING8_TIMEOUT] = 0x4c,
+ [DMA_RING9_TIMEOUT] = 0x50,
+ [DMA_RING10_TIMEOUT] = 0x54,
+ [DMA_RING11_TIMEOUT] = 0x58,
+ [DMA_RING12_TIMEOUT] = 0x5c,
+ [DMA_RING13_TIMEOUT] = 0x60,
+ [DMA_RING14_TIMEOUT] = 0x64,
+ [DMA_RING15_TIMEOUT] = 0x68,
+ [DMA_RING16_TIMEOUT] = 0x6C,
[DMA_INDEX2RING_0] = 0x70,
[DMA_INDEX2RING_1] = 0x74,
[DMA_INDEX2RING_2] = 0x78,
@@ -235,6 +269,23 @@ static const u8 bcmgenet_dma_regs_v2[] = {
[DMA_PRIORITY_0] = 0x34,
[DMA_PRIORITY_1] = 0x38,
[DMA_PRIORITY_2] = 0x3C,
+ [DMA_RING0_TIMEOUT] = 0x2C,
+ [DMA_RING1_TIMEOUT] = 0x30,
+ [DMA_RING2_TIMEOUT] = 0x34,
+ [DMA_RING3_TIMEOUT] = 0x38,
+ [DMA_RING4_TIMEOUT] = 0x3c,
+ [DMA_RING5_TIMEOUT] = 0x40,
+ [DMA_RING6_TIMEOUT] = 0x44,
+ [DMA_RING7_TIMEOUT] = 0x48,
+ [DMA_RING8_TIMEOUT] = 0x4c,
+ [DMA_RING9_TIMEOUT] = 0x50,
+ [DMA_RING10_TIMEOUT] = 0x54,
+ [DMA_RING11_TIMEOUT] = 0x58,
+ [DMA_RING12_TIMEOUT] = 0x5c,
+ [DMA_RING13_TIMEOUT] = 0x60,
+ [DMA_RING14_TIMEOUT] = 0x64,
+ [DMA_RING15_TIMEOUT] = 0x68,
+ [DMA_RING16_TIMEOUT] = 0x6C,
};
static const u8 bcmgenet_dma_regs_v1[] = {
@@ -245,6 +296,23 @@ static const u8 bcmgenet_dma_regs_v1[] = {
[DMA_PRIORITY_0] = 0x34,
[DMA_PRIORITY_1] = 0x38,
[DMA_PRIORITY_2] = 0x3C,
+ [DMA_RING0_TIMEOUT] = 0x2C,
+ [DMA_RING1_TIMEOUT] = 0x30,
+ [DMA_RING2_TIMEOUT] = 0x34,
+ [DMA_RING3_TIMEOUT] = 0x38,
+ [DMA_RING4_TIMEOUT] = 0x3c,
+ [DMA_RING5_TIMEOUT] = 0x40,
+ [DMA_RING6_TIMEOUT] = 0x44,
+ [DMA_RING7_TIMEOUT] = 0x48,
+ [DMA_RING8_TIMEOUT] = 0x4c,
+ [DMA_RING9_TIMEOUT] = 0x50,
+ [DMA_RING10_TIMEOUT] = 0x54,
+ [DMA_RING11_TIMEOUT] = 0x58,
+ [DMA_RING12_TIMEOUT] = 0x5c,
+ [DMA_RING13_TIMEOUT] = 0x60,
+ [DMA_RING14_TIMEOUT] = 0x64,
+ [DMA_RING15_TIMEOUT] = 0x68,
+ [DMA_RING16_TIMEOUT] = 0x6C,
};
/* Set at runtime once bcmgenet version is known */
@@ -498,6 +566,85 @@ static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
priv->msg_enable = level;
}
+static int bcmgenet_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ ec->tx_max_coalesced_frames =
+ bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
+ DMA_MBUF_DONE_THRESH);
+ ec->rx_max_coalesced_frames =
+ bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
+ DMA_MBUF_DONE_THRESH);
+ ec->rx_coalesce_usecs =
+ bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
+
+ return 0;
+}
+
+static int bcmgenet_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned int i;
+ u32 reg;
+
+ /* Base system clock is 125Mhz, DMA timeout is this reference clock
+ * divided by 1024, which yields roughly 8.192us, our maximum value
+ * has to fit in the DMA_TIMEOUT_MASK (16 bits)
+ */
+ if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
+ ec->tx_max_coalesced_frames == 0 ||
+ ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
+ ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
+ return -EINVAL;
+
+ if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
+ return -EINVAL;
+
+ /* GENET TDMA hardware does not support a configurable timeout, but will
+ * always generate an interrupt either after MBDONE packets have been
+ * transmitted, or when the ring is emtpy.
+ */
+ if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
+ ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
+ return -EOPNOTSUPP;
+
+ /* Program all TX queues with the same values, as there is no
+ * ethtool knob to do coalescing on a per-queue basis
+ */
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ bcmgenet_tdma_ring_writel(priv, i,
+ ec->tx_max_coalesced_frames,
+ DMA_MBUF_DONE_THRESH);
+ bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
+ ec->tx_max_coalesced_frames,
+ DMA_MBUF_DONE_THRESH);
+
+ for (i = 0; i < priv->hw_params->rx_queues; i++) {
+ bcmgenet_rdma_ring_writel(priv, i,
+ ec->rx_max_coalesced_frames,
+ DMA_MBUF_DONE_THRESH);
+
+ reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
+ reg &= ~DMA_TIMEOUT_MASK;
+ reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
+ bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
+ }
+
+ bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
+ ec->rx_max_coalesced_frames,
+ DMA_MBUF_DONE_THRESH);
+
+ reg = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT);
+ reg &= ~DMA_TIMEOUT_MASK;
+ reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
+ bcmgenet_rdma_writel(priv, reg, DMA_RING16_TIMEOUT);
+
+ return 0;
+}
+
/* standard ethtool support functions. */
enum bcmgenet_stat_type {
BCMGENET_STAT_NETDEV = -1,
@@ -646,7 +793,6 @@ static void bcmgenet_get_drvinfo(struct net_device *dev,
{
strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
strlcpy(info->version, "v2.0", sizeof(info->version));
- info->n_stats = BCMGENET_STATS_LEN;
}
static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
@@ -844,6 +990,8 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
.get_eee = bcmgenet_get_eee,
.set_eee = bcmgenet_set_eee,
.nway_reset = bcmgenet_nway_reset,
+ .get_coalesce = bcmgenet_get_coalesce,
+ .set_coalesce = bcmgenet_set_coalesce,
};
/* Power down the unimac, based on mode. */
@@ -907,9 +1055,10 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
}
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-
- if (mode == GENET_POWER_PASSIVE)
+ if (mode == GENET_POWER_PASSIVE) {
+ bcmgenet_phy_power_set(priv->dev, true);
bcmgenet_mii_reset(priv->dev);
+ }
}
/* ioctl handle special commands that are not present in ethtool. */
@@ -1684,6 +1833,24 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
}
+static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
+{
+ u32 int0_enable = 0;
+
+ /* Monitor cable plug/unplugged event for internal PHY, external PHY
+ * and MoCA PHY
+ */
+ if (priv->internal_phy) {
+ int0_enable |= UMAC_IRQ_LINK_EVENT;
+ } else if (priv->ext_phy) {
+ int0_enable |= UMAC_IRQ_LINK_EVENT;
+ } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+ if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ int0_enable |= UMAC_IRQ_LINK_EVENT;
+ }
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+}
+
static int init_umac(struct bcmgenet_priv *priv)
{
struct device *kdev = &priv->pdev->dev;
@@ -1724,15 +1891,8 @@ static int init_umac(struct bcmgenet_priv *priv)
/* Enable Tx default queue 16 interrupts */
int0_enable |= UMAC_IRQ_TXDMA_DONE;
- /* Monitor cable plug/unplugged event for internal PHY */
- if (phy_is_internal(priv->phydev)) {
- int0_enable |= UMAC_IRQ_LINK_EVENT;
- } else if (priv->ext_phy) {
- int0_enable |= UMAC_IRQ_LINK_EVENT;
- } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
- int0_enable |= UMAC_IRQ_LINK_EVENT;
-
+ /* Configure backpressure vectors for MoCA */
+ if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
reg = bcmgenet_bp_mc_get(priv);
reg |= BIT(priv->hw_params->bp_in_en_shift);
@@ -2126,6 +2286,8 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
int ret = 0;
int timeout = 0;
u32 reg;
+ u32 dma_ctrl;
+ int i;
/* Disable TDMA to stop add more frames in TX DMA */
reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
@@ -2169,6 +2331,20 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
ret = -ETIMEDOUT;
}
+ dma_ctrl = 0;
+ for (i = 0; i < priv->hw_params->rx_queues; i++)
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ dma_ctrl = 0;
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
return ret;
}
@@ -2389,6 +2565,23 @@ static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bcmgenet_poll_controller(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ /* Invoke the main RX/TX interrupt handler */
+ disable_irq(priv->irq0);
+ bcmgenet_isr0(priv->irq0, priv);
+ enable_irq(priv->irq0);
+
+ /* And the interrupt handler for RX/TX priority queues */
+ disable_irq(priv->irq1);
+ bcmgenet_isr1(priv->irq1, priv);
+ enable_irq(priv->irq1);
+}
+#endif
+
static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
{
u32 reg;
@@ -2613,6 +2806,9 @@ static void bcmgenet_netif_start(struct net_device *dev)
netif_tx_start_all_queues(dev);
+ /* Monitor link interrupts now */
+ bcmgenet_link_intr_enable(priv);
+
phy_start(priv->phydev);
}
@@ -2626,13 +2822,12 @@ static int bcmgenet_open(struct net_device *dev)
netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
/* Turn on the clock */
- if (!IS_ERR(priv->clk))
- clk_prepare_enable(priv->clk);
+ clk_prepare_enable(priv->clk);
/* If this is an internal GPHY, power it back on now, before UniMAC is
* brought out of reset as absolutely no UniMAC activity is allowed
*/
- if (phy_is_internal(priv->phydev))
+ if (priv->internal_phy)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
/* take MAC out of reset */
@@ -2651,7 +2846,7 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- if (phy_is_internal(priv->phydev)) {
+ if (priv->internal_phy) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_ENERGY_DET_MASK;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
@@ -2687,23 +2882,24 @@ static int bcmgenet_open(struct net_device *dev)
goto err_irq0;
}
- /* Re-configure the port multiplexer towards the PHY device */
- bcmgenet_mii_config(priv->dev, false);
-
- phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
- priv->phy_interface);
+ ret = bcmgenet_mii_probe(dev);
+ if (ret) {
+ netdev_err(dev, "failed to connect to PHY\n");
+ goto err_irq1;
+ }
bcmgenet_netif_start(dev);
return 0;
+err_irq1:
+ free_irq(priv->irq1, priv);
err_irq0:
- free_irq(priv->irq0, dev);
+ free_irq(priv->irq0, priv);
err_fini_dma:
bcmgenet_fini_dma(priv);
err_clk_disable:
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -2757,11 +2953,10 @@ static int bcmgenet_close(struct net_device *dev)
free_irq(priv->irq0, priv);
free_irq(priv->irq1, priv);
- if (phy_is_internal(priv->phydev))
+ if (priv->internal_phy)
ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -2820,8 +3015,6 @@ static void bcmgenet_timeout(struct net_device *dev)
netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
- bcmgenet_disable_tx_napi(priv);
-
for (q = 0; q < priv->hw_params->tx_queues; q++)
bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
@@ -2837,8 +3030,6 @@ static void bcmgenet_timeout(struct net_device *dev)
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
- bcmgenet_enable_tx_napi(priv);
-
dev->trans_start = jiffies;
dev->stats.tx_errors++;
@@ -2941,6 +3132,9 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_set_mac_address = bcmgenet_set_mac_addr,
.ndo_do_ioctl = bcmgenet_ioctl,
.ndo_set_features = bcmgenet_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bcmgenet_poll_controller,
+#endif
};
/* Array of GENET hardware parameters/characteristics */
@@ -3125,6 +3319,7 @@ static const struct of_device_id bcmgenet_match[] = {
{ .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
{ },
};
+MODULE_DEVICE_TABLE(of, bcmgenet_match);
static int bcmgenet_probe(struct platform_device *pdev)
{
@@ -3214,11 +3409,12 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->version = pd->genet_version;
priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
- if (IS_ERR(priv->clk))
+ if (IS_ERR(priv->clk)) {
dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
+ priv->clk = NULL;
+ }
- if (!IS_ERR(priv->clk))
- clk_prepare_enable(priv->clk);
+ clk_prepare_enable(priv->clk);
bcmgenet_set_hw_params(priv);
@@ -3229,8 +3425,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
- if (IS_ERR(priv->clk_wol))
+ if (IS_ERR(priv->clk_wol)) {
dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+ priv->clk_wol = NULL;
+ }
priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
if (IS_ERR(priv->clk_eee)) {
@@ -3256,8 +3454,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
netif_carrier_off(dev);
/* Turn off the main clock, WOL clock is handled separately */
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
err = register_netdev(dev);
if (err)
@@ -3266,8 +3463,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
return err;
err_clk_disable:
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
err:
free_netdev(dev);
return err;
@@ -3319,7 +3515,7 @@ static int bcmgenet_suspend(struct device *d)
if (device_may_wakeup(d) && priv->wolopts) {
ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
clk_prepare_enable(priv->clk_wol);
- } else if (phy_is_internal(priv->phydev)) {
+ } else if (priv->internal_phy) {
ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
}
@@ -3348,7 +3544,7 @@ static int bcmgenet_resume(struct device *d)
/* If this is an internal GPHY, power it back on now, before UniMAC is
* brought out of reset as absolutely no UniMAC activity is allowed
*/
- if (phy_is_internal(priv->phydev))
+ if (priv->internal_phy)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
bcmgenet_umac_reset(priv);
@@ -3363,14 +3559,14 @@ static int bcmgenet_resume(struct device *d)
phy_init_hw(priv->phydev);
/* Speed settings must be restored */
- bcmgenet_mii_config(priv->dev, false);
+ bcmgenet_mii_config(priv->dev);
/* disable ethernet MAC while updating its registers */
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- if (phy_is_internal(priv->phydev)) {
+ if (priv->internal_phy) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_ENERGY_DET_MASK;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 6159deab8c98..967367557309 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -304,13 +304,12 @@ struct bcmgenet_mib_counters {
#define UMAC_IRQ_RXDMA_MBDONE (1 << 13)
#define UMAC_IRQ_RXDMA_PDONE (1 << 14)
#define UMAC_IRQ_RXDMA_BDONE (1 << 15)
-#define UMAC_IRQ_RXDMA_DONE (UMAC_IRQ_RXDMA_PDONE | \
- UMAC_IRQ_RXDMA_BDONE)
+#define UMAC_IRQ_RXDMA_DONE UMAC_IRQ_RXDMA_MBDONE
#define UMAC_IRQ_TXDMA_MBDONE (1 << 16)
#define UMAC_IRQ_TXDMA_PDONE (1 << 17)
#define UMAC_IRQ_TXDMA_BDONE (1 << 18)
-#define UMAC_IRQ_TXDMA_DONE (UMAC_IRQ_TXDMA_PDONE | \
- UMAC_IRQ_TXDMA_BDONE)
+#define UMAC_IRQ_TXDMA_DONE UMAC_IRQ_TXDMA_MBDONE
+
/* Only valid for GENETv3+ */
#define UMAC_IRQ_MDIO_DONE (1 << 23)
#define UMAC_IRQ_MDIO_ERROR (1 << 24)
@@ -386,7 +385,7 @@ struct bcmgenet_mib_counters {
#define DMA_RING_BUFFER_SIZE_MASK 0xFFFF
/* DMA interrupt threshold register */
-#define DMA_INTR_THRESHOLD_MASK 0x00FF
+#define DMA_INTR_THRESHOLD_MASK 0x01FF
/* DMA XON/XOFF register */
#define DMA_XON_THREHOLD_MASK 0xFFFF
@@ -593,6 +592,7 @@ struct bcmgenet_priv {
/* MDIO bus variables */
wait_queue_head_t wq;
struct phy_device *phydev;
+ bool internal_phy;
struct device_node *phy_dn;
struct device_node *mdio_dn;
struct mii_bus *mii_bus;
@@ -670,7 +670,8 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
/* MDIO routines */
int bcmgenet_mii_init(struct net_device *dev);
-int bcmgenet_mii_config(struct net_device *dev, bool init);
+int bcmgenet_mii_config(struct net_device *dev);
+int bcmgenet_mii_probe(struct net_device *dev);
void bcmgenet_mii_exit(struct net_device *dev);
void bcmgenet_mii_reset(struct net_device *dev);
void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index adf23d2ac488..8bdfe53754ba 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -163,10 +163,26 @@ void bcmgenet_mii_setup(struct net_device *dev)
phy_print_status(phydev);
}
+
+static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
+ struct fixed_phy_status *status)
+{
+ if (dev && dev->phydev && status)
+ status->link = dev->phydev->link;
+
+ return 0;
+}
+
+/* Perform a voluntary PHY software reset, since the EPHY is very finicky about
+ * not doing it and will start corrupting packets
+ */
void bcmgenet_mii_reset(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ if (GENET_IS_V4(priv))
+ return;
+
if (priv->phydev) {
phy_init_hw(priv->phydev);
phy_start_aneg(priv->phydev);
@@ -226,9 +242,13 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
reg |= LED_ACT_SOURCE_MAC;
bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+
+ if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ fixed_phy_set_link_update(priv->phydev,
+ bcmgenet_fixed_phy_link_update);
}
-int bcmgenet_mii_config(struct net_device *dev, bool init)
+int bcmgenet_mii_config(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
@@ -238,10 +258,10 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
u32 port_ctrl;
u32 reg;
- priv->ext_phy = !phy_is_internal(priv->phydev) &&
+ priv->ext_phy = !priv->internal_phy &&
(priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
- if (phy_is_internal(priv->phydev))
+ if (priv->internal_phy)
priv->phy_interface = PHY_INTERFACE_MODE_NA;
switch (priv->phy_interface) {
@@ -259,7 +279,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
- if (phy_is_internal(priv->phydev)) {
+ if (priv->internal_phy) {
phy_name = "internal PHY";
bcmgenet_internal_phy_setup(dev);
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
@@ -321,13 +341,12 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
}
- if (init)
- dev_info(kdev, "configuring instance for %s\n", phy_name);
+ dev_info_once(kdev, "configuring instance for %s\n", phy_name);
return 0;
}
-static int bcmgenet_mii_probe(struct net_device *dev)
+int bcmgenet_mii_probe(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device_node *dn = priv->pdev->dev.of_node;
@@ -345,22 +364,6 @@ static int bcmgenet_mii_probe(struct net_device *dev)
priv->old_pause = -1;
if (dn) {
- if (priv->phydev) {
- pr_info("PHY already attached\n");
- return 0;
- }
-
- /* In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
- ret = of_phy_register_fixed_link(dn);
- if (ret)
- return ret;
-
- priv->phy_dn = of_node_get(dn);
- }
-
phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
phy_flags, priv->phy_interface);
if (!phydev) {
@@ -386,7 +389,7 @@ static int bcmgenet_mii_probe(struct net_device *dev)
* PHY speed which is needed for bcmgenet_mii_config() to configure
* things appropriately.
*/
- ret = bcmgenet_mii_config(dev, true);
+ ret = bcmgenet_mii_config(dev);
if (ret) {
phy_disconnect(priv->phydev);
return ret;
@@ -397,14 +400,11 @@ static int bcmgenet_mii_probe(struct net_device *dev)
/* The internal PHY has its link interrupts routed to the
* Ethernet MAC ISRs
*/
- if (phy_is_internal(priv->phydev))
+ if (priv->internal_phy)
priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT;
else
priv->mii_bus->irq[phydev->addr] = PHY_POLL;
- pr_info("attached PHY at address %d [%s]\n",
- phydev->addr, phydev->drv->name);
-
return 0;
}
@@ -490,7 +490,10 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
{
struct device_node *dn = priv->pdev->dev.of_node;
struct device *kdev = &priv->pdev->dev;
+ const char *phy_mode_str = NULL;
+ struct phy_device *phydev = NULL;
char *compat;
+ int phy_mode;
int ret;
compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
@@ -513,17 +516,43 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
/* Fetch the PHY phandle */
priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0);
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
+ ret = of_phy_register_fixed_link(dn);
+ if (ret)
+ return ret;
+
+ priv->phy_dn = of_node_get(dn);
+ }
+
/* Get the link mode */
- priv->phy_interface = of_get_phy_mode(dn);
+ phy_mode = of_get_phy_mode(dn);
+ priv->phy_interface = phy_mode;
- return 0;
-}
+ /* We need to specifically look up whether this PHY interface is internal
+ * or not *before* we even try to probe the PHY driver over MDIO as we
+ * may have shut down the internal PHY for power saving purposes.
+ */
+ if (phy_mode < 0) {
+ ret = of_property_read_string(dn, "phy-mode", &phy_mode_str);
+ if (ret < 0) {
+ dev_err(kdev, "invalid PHY mode property\n");
+ return ret;
+ }
-static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
- struct fixed_phy_status *status)
-{
- if (dev && dev->phydev && status)
- status->link = dev->phydev->link;
+ priv->phy_interface = PHY_INTERFACE_MODE_NA;
+ if (!strcasecmp(phy_mode_str, "internal"))
+ priv->internal_phy = true;
+ }
+
+ /* Make sure we initialize MoCA PHYs with a link down */
+ if (phy_mode == PHY_INTERFACE_MODE_MOCA) {
+ phydev = of_phy_find_device(dn);
+ if (phydev)
+ phydev->link = 0;
+ }
return 0;
}
@@ -574,18 +603,15 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
.asym_pause = 0,
};
- phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+ phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
if (!phydev || IS_ERR(phydev)) {
dev_err(kdev, "failed to register fixed PHY device\n");
return -ENODEV;
}
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) {
- ret = fixed_phy_set_link_update(
- phydev, bcmgenet_fixed_phy_link_update);
- if (!ret)
- phydev->link = 0;
- }
+ /* Make sure we initialize MoCA PHYs with a link down */
+ phydev->link = 0;
+
}
priv->phydev = phydev;
@@ -615,10 +641,6 @@ int bcmgenet_mii_init(struct net_device *dev)
ret = bcmgenet_mii_bus_init(priv);
if (ret)
- goto out_free;
-
- ret = bcmgenet_mii_probe(dev);
- if (ret)
goto out;
return 0;
@@ -626,7 +648,6 @@ int bcmgenet_mii_init(struct net_device *dev)
out:
of_node_put(priv->phy_dn);
mdiobus_unregister(priv->mii_bus);
-out_free:
kfree(priv->mii_bus->irq);
mdiobus_free(priv->mii_bus);
return ret;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 73c934cf6c61..79789d8e52da 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -10757,7 +10757,7 @@ static ssize_t tg3_show_temp(struct device *dev,
tg3_ape_scratchpad_read(tp, &temperature, attr->index,
sizeof(temperature));
spin_unlock_bh(&tp->lock);
- return sprintf(buf, "%u\n", temperature);
+ return sprintf(buf, "%u\n", temperature * 1000);
}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index b7a0f7879de2..9e59663a6ead 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1543,7 +1543,7 @@ bfa_flash_cmd_act_check(void __iomem *pci_bar)
}
/* Flush FLI data fifo. */
-static u32
+static int
bfa_flash_fifo_flush(void __iomem *pci_bar)
{
u32 i;
@@ -1573,11 +1573,11 @@ bfa_flash_fifo_flush(void __iomem *pci_bar)
}
/* Read flash status. */
-static u32
+static int
bfa_flash_status_read(void __iomem *pci_bar)
{
union bfa_flash_dev_status_reg dev_status;
- u32 status;
+ int status;
u32 ret_status;
int i;
@@ -1611,11 +1611,11 @@ bfa_flash_status_read(void __iomem *pci_bar)
}
/* Start flash read operation. */
-static u32
+static int
bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
char *buf)
{
- u32 status;
+ int status;
/* len must be mutiple of 4 and not exceeding fifo size */
if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
@@ -1703,7 +1703,8 @@ static enum bfa_status
bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
u32 len)
{
- u32 n, status;
+ u32 n;
+ int status;
u32 off, l, s, residue, fifo_sz;
residue = len;
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 5d0753cc7e73..04b0d16b210e 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -2400,6 +2400,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
q0->rcb->id = 0;
q0->rx_packets = q0->rx_bytes = 0;
q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
+ q0->rxbuf_map_failed = 0;
bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
&dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
@@ -2428,6 +2429,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
: rx_cfg->q1_buf_size;
q1->rx_packets = q1->rx_bytes = 0;
q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
+ q1->rxbuf_map_failed = 0;
bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
&hqpt_mem[i], &hsqpt_mem[i],
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index e0e797f2ea14..c438d032e8bf 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -587,6 +587,7 @@ struct bna_rxq {
u64 rx_bytes;
u64 rx_packets_with_error;
u64 rxbuf_alloc_failed;
+ u64 rxbuf_map_failed;
};
/* RxQ pair */
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 0612b19f6313..21a0cfc3e7ec 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -399,7 +399,13 @@ bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
}
dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
- unmap_q->map_size, DMA_FROM_DEVICE);
+ unmap_q->map_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
+ put_page(page);
+ BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
+ rcb->rxq->rxbuf_map_failed++;
+ goto finishing;
+ }
unmap->page = page;
unmap->page_offset = page_offset;
@@ -454,8 +460,15 @@ bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
rcb->rxq->rxbuf_alloc_failed++;
goto finishing;
}
+
dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
buff_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
+ rcb->rxq->rxbuf_map_failed++;
+ goto finishing;
+ }
unmap->skb = skb;
dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
@@ -676,6 +689,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
if (!next_cmpl->valid)
break;
}
+ packets++;
/* TODO: BNA_CQ_EF_LOCAL ? */
if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
@@ -692,7 +706,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
else
bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
- packets++;
rcb->rxq->rx_packets++;
rcb->rxq->rx_bytes += totlen;
ccb->bytes_per_intr += totlen;
@@ -3025,6 +3038,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
unmap = head_unmap;
dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
+ return NETDEV_TX_OK;
+ }
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
txqent->vector[0].length = htons(len);
dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
@@ -3056,6 +3074,15 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
0, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
+ /* Undo the changes starting at tcb->producer_index */
+ bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
+ tcb->producer_index);
+ dev_kfree_skb_any(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
+ return NETDEV_TX_OK;
+ }
+
dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
txqent->vector[vect_id].length = htons(size);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index faedbf24777e..f4ed816b93ee 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -175,6 +175,7 @@ struct bnad_drv_stats {
u64 tx_skb_headlen_zero;
u64 tx_skb_frag_zero;
u64 tx_skb_len_mismatch;
+ u64 tx_skb_map_failed;
u64 hw_stats_updates;
u64 netif_rx_dropped;
@@ -189,6 +190,7 @@ struct bnad_drv_stats {
u64 rx_unmap_q_alloc_failed;
u64 rxbuf_alloc_failed;
+ u64 rxbuf_map_failed;
};
/* Complete driver stats */
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 2bdfc5dff4b1..0e4fdc3dd729 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -90,6 +90,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"tx_skb_headlen_zero",
"tx_skb_frag_zero",
"tx_skb_len_mismatch",
+ "tx_skb_map_failed",
"hw_stats_updates",
"netif_rx_dropped",
@@ -102,6 +103,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"tx_unmap_q_alloc_failed",
"rx_unmap_q_alloc_failed",
"rxbuf_alloc_failed",
+ "rxbuf_map_failed",
"mac_stats_clr_cnt",
"mac_frame_64",
@@ -807,6 +809,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
rx_packets_with_error;
buf[bi++] = rcb->rxq->
rxbuf_alloc_failed;
+ buf[bi++] = rcb->rxq->rxbuf_map_failed;
buf[bi++] = rcb->producer_index;
buf[bi++] = rcb->consumer_index;
}
@@ -821,6 +824,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
rx_packets_with_error;
buf[bi++] = rcb->rxq->
rxbuf_alloc_failed;
+ buf[bi++] = rcb->rxq->rxbuf_map_failed;
buf[bi++] = rcb->producer_index;
buf[bi++] = rcb->consumer_index;
}
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index bf9eb2ecf960..88c1e1a834f8 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2774,8 +2774,7 @@ static const struct macb_config emac_config = {
static const struct macb_config zynqmp_config = {
- .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO,
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -2783,8 +2782,7 @@ static const struct macb_config zynqmp_config = {
};
static const struct macb_config zynq_config = {
- .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_NO_GIGABIT_HALF,
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 1895b6b2addd..6e1faea00ca8 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -399,7 +399,7 @@
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
#define MACB_CAPS_MACB_IS_GEM 0x80000000
-#define MACB_CAPS_JUMBO 0x00000008
+#define MACB_CAPS_JUMBO 0x00000010
/* Bit manipulation macros */
#define MACB_BIT(name) \
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index c4d6bbe9458d..8fb84e69c30e 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -3,7 +3,7 @@
#
config NET_VENDOR_CAVIUM
- tristate "Cavium ethernet drivers"
+ bool "Cavium ethernet drivers"
depends on PCI
default y
---help---
@@ -16,7 +16,6 @@ if NET_VENDOR_CAVIUM
config THUNDER_NIC_PF
tristate "Thunder Physical function driver"
depends on 64BIT
- default ARCH_THUNDER
select THUNDER_NIC_BGX
---help---
This driver supports Thunder's NIC physical function.
@@ -29,14 +28,14 @@ config THUNDER_NIC_PF
config THUNDER_NIC_VF
tristate "Thunder Virtual function driver"
depends on 64BIT
- default ARCH_THUNDER
---help---
This driver supports Thunder's NIC virtual function
config THUNDER_NIC_BGX
tristate "Thunder MAC interface driver (BGX)"
depends on 64BIT
- default ARCH_THUNDER
+ select PHYLIB
+ select MDIO_OCTEON
---help---
This driver supports programming and controlling of MAC
interface from NIC physical function driver.
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 29f330831784..245c063ed4db 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -153,7 +153,6 @@ lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
ETHTOOL_FWVERS_LEN);
strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
- drvinfo->regdump_len = OCT_ETHTOOL_REGDUMP_LEN;
}
static void
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 0660deecc2c9..f683d97d7614 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -818,10 +818,9 @@ static int setup_glist(struct lio *lio)
INIT_LIST_HEAD(&lio->glist);
for (i = 0; i < lio->tx_qsize; i++) {
- g = kmalloc(sizeof(*g), GFP_KERNEL);
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
if (!g)
break;
- memset(g, 0, sizeof(struct octnic_gather));
g->sg_size =
((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 8aee250904ec..d3950b20feb9 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -135,6 +135,7 @@
#define NICVF_TX_TIMEOUT (50 * HZ)
struct nicvf_cq_poll {
+ struct nicvf *nicvf;
u8 cq_idx; /* Completion queue index */
struct napi_struct napi;
};
@@ -190,10 +191,10 @@ enum tx_stats_reg_offset {
};
struct nicvf_hw_stats {
- u64 rx_bytes_ok;
- u64 rx_ucast_frames_ok;
- u64 rx_bcast_frames_ok;
- u64 rx_mcast_frames_ok;
+ u64 rx_bytes;
+ u64 rx_ucast_frames;
+ u64 rx_bcast_frames;
+ u64 rx_mcast_frames;
u64 rx_fcs_errors;
u64 rx_l2_errors;
u64 rx_drop_red;
@@ -204,6 +205,31 @@ struct nicvf_hw_stats {
u64 rx_drop_mcast;
u64 rx_drop_l3_bcast;
u64 rx_drop_l3_mcast;
+ u64 rx_bgx_truncated_pkts;
+ u64 rx_jabber_errs;
+ u64 rx_fcs_errs;
+ u64 rx_bgx_errs;
+ u64 rx_prel2_errs;
+ u64 rx_l2_hdr_malformed;
+ u64 rx_oversize;
+ u64 rx_undersize;
+ u64 rx_l2_len_mismatch;
+ u64 rx_l2_pclp;
+ u64 rx_ip_ver_errs;
+ u64 rx_ip_csum_errs;
+ u64 rx_ip_hdr_malformed;
+ u64 rx_ip_payload_malformed;
+ u64 rx_ip_ttl_errs;
+ u64 rx_l3_pclp;
+ u64 rx_l4_malformed;
+ u64 rx_l4_csum_errs;
+ u64 rx_udp_len_errs;
+ u64 rx_l4_port_errs;
+ u64 rx_tcp_flag_errs;
+ u64 rx_tcp_offset_errs;
+ u64 rx_l4_pclp;
+ u64 rx_truncated_pkts;
+
u64 tx_bytes_ok;
u64 tx_ucast_frames_ok;
u64 tx_bcast_frames_ok;
@@ -222,6 +248,7 @@ struct nicvf_drv_stats {
u64 rx_frames_1518;
u64 rx_frames_jumbo;
u64 rx_drops;
+
/* Tx */
u64 tx_frames_ok;
u64 tx_drops;
@@ -231,13 +258,24 @@ struct nicvf_drv_stats {
};
struct nicvf {
+ struct nicvf *pnicvf;
struct net_device *netdev;
struct pci_dev *pdev;
u8 vf_id;
u8 node;
- u8 tns_mode;
+ u8 tns_mode:1;
+ u8 sqs_mode:1;
+ u8 loopback_supported:1;
u16 mtu;
struct queue_set *qs;
+#define MAX_SQS_PER_VF_SINGLE_NODE 5
+#define MAX_SQS_PER_VF 11
+ u8 sqs_id;
+ u8 sqs_count; /* Secondary Qset count */
+ struct nicvf *snicvf[MAX_SQS_PER_VF];
+ u8 rx_queues;
+ u8 tx_queues;
+ u8 max_queues;
void __iomem *reg_base;
bool link_up;
u8 duplex;
@@ -257,7 +295,7 @@ struct nicvf {
u32 cq_coalesce_usecs;
u32 msg_enable;
- struct nicvf_hw_stats stats;
+ struct nicvf_hw_stats hw_stats;
struct nicvf_drv_stats drv_stats;
struct bgx_stats bgx_stats;
struct work_struct reset_task;
@@ -269,10 +307,9 @@ struct nicvf {
char irq_name[NIC_VF_MSIX_VECTORS][20];
bool irq_allocated[NIC_VF_MSIX_VECTORS];
- bool pf_ready_to_rcv_msg;
+ /* VF <-> PF mailbox communication */
bool pf_acked;
bool pf_nacked;
- bool bgx_stats_acked;
bool set_mac_pending;
} ____cacheline_aligned_in_smp;
@@ -304,14 +341,21 @@ struct nicvf {
#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */
#define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */
#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */
-#define NIC_MBOX_MSG_CFG_DONE 0x12 /* VF configuration done */
-#define NIC_MBOX_MSG_SHUTDOWN 0x13 /* VF is being shutdown */
+#define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */
+#define NIC_MBOX_MSG_NICVF_PTR 0x13 /* Send nicvf ptr to PF */
+#define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */
+#define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */
+#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
+#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
+#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
struct nic_cfg_msg {
u8 msg;
u8 vf_id;
- u8 tns_mode;
u8 node_id;
+ u8 tns_mode:1;
+ u8 sqs_mode:1;
+ u8 loopback_supported:1;
u8 mac_addr[ETH_ALEN];
};
@@ -319,6 +363,7 @@ struct nic_cfg_msg {
struct qs_cfg_msg {
u8 msg;
u8 num;
+ u8 sqs_count;
u64 cfg;
};
@@ -335,6 +380,7 @@ struct sq_cfg_msg {
u8 msg;
u8 qs_num;
u8 sq_num;
+ bool sqs_mode;
u64 cfg;
};
@@ -394,6 +440,28 @@ struct bgx_link_status {
u32 speed;
};
+/* Get Extra Qset IDs */
+struct sqs_alloc {
+ u8 msg;
+ u8 vf_id;
+ u8 qs_count;
+};
+
+struct nicvf_ptr {
+ u8 msg;
+ u8 vf_id;
+ bool sqs_mode;
+ u8 sqs_id;
+ u64 nicvf;
+};
+
+/* Set interface in loopback mode */
+struct set_loopback {
+ u8 msg;
+ u8 vf_id;
+ bool enable;
+};
+
/* 128 bit shared memory between PF and each VF */
union nic_mbx {
struct { u8 msg; } msg;
@@ -408,6 +476,9 @@ union nic_mbx {
struct rss_cfg_msg rss_cfg;
struct bgx_stats_msg bgx_stats;
struct bgx_link_status link_status;
+ struct sqs_alloc sqs_alloc;
+ struct nicvf_ptr nicvf;
+ struct set_loopback lbk;
};
#define NIC_NODE_ID_MASK 0x03
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 6e0c03169a55..c561fdcb79a7 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -22,12 +22,16 @@
struct nicpf {
struct pci_dev *pdev;
- u8 rev_id;
u8 node;
unsigned int flags;
u8 num_vf_en; /* No of VF enabled */
bool vf_enabled[MAX_NUM_VFS_SUPPORTED];
void __iomem *reg_base; /* Register start address */
+ u8 num_sqs_en; /* Secondary qsets enabled */
+ u64 nicvf[MAX_NUM_VFS_SUPPORTED];
+ u8 vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF];
+ u8 pqs_vf[MAX_NUM_VFS_SUPPORTED];
+ bool sqs_used[MAX_NUM_VFS_SUPPORTED];
struct pkind_cfg pkind;
#define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
@@ -39,6 +43,7 @@ struct nicpf {
u8 duplex[MAX_LMAC];
u32 speed[MAX_LMAC];
u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
+ u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
u16 rss_ind_tbl_size;
bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
@@ -49,6 +54,11 @@ struct nicpf {
bool irq_allocated[NIC_PF_MSIX_VECTORS];
};
+static inline bool pass1_silicon(struct nicpf *nic)
+{
+ return nic->pdev->revision < 8;
+}
+
/* Supported devices */
static const struct pci_device_id nic_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
@@ -112,7 +122,7 @@ static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
* when PF writes to MBOX(1), in next revisions when
* PF writes to MBOX(0)
*/
- if (nic->rev_id == 0) {
+ if (pass1_silicon(nic)) {
/* see the comment for nic_reg_write()/nic_reg_read()
* functions above
*/
@@ -139,14 +149,19 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf)
mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
- bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
- lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
-
- mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
- if (mac)
- ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
+ if (vf < MAX_LMAC) {
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
+ if (mac)
+ ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
+ }
+ mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
mbx.nic_cfg.node_id = nic->node;
+
+ mbx.nic_cfg.loopback_supported = vf < MAX_LMAC;
+
nic_send_msg_to_vf(nic, vf, &mbx);
}
@@ -295,9 +310,6 @@ static void nic_init_hw(struct nicpf *nic)
{
int i;
- /* Reset NIC, in case the driver is repeatedly inserted and removed */
- nic_reg_write(nic, NIC_PF_SOFT_RESET, 1);
-
/* Enable NIC HW block */
nic_reg_write(nic, NIC_PF_CFG, 0x3);
@@ -329,6 +341,10 @@ static void nic_init_hw(struct nicpf *nic)
/* Timer config */
nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
+
+ /* Enable VLAN ethertype matching and stripping */
+ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
+ (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
}
/* Channel parse index configuration */
@@ -381,8 +397,18 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
/* Leave RSS_SIZE as '0' to disable RSS */
- nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
- (vnic << 24) | (padd << 16) | (rssi_base + rssi));
+ if (pass1_silicon(nic)) {
+ nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+ (vnic << 24) | (padd << 16) |
+ (rssi_base + rssi));
+ } else {
+ /* Set MPI_ALG to '0' to disable MCAM parsing */
+ nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+ (padd << 16));
+ /* MPI index is same as CPI if MPI_ALG is not enabled */
+ nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3),
+ (vnic << 24) | (rssi_base + rssi));
+ }
if ((rssi + 1) >= cfg->rq_cnt)
continue;
@@ -395,6 +421,7 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
rssi = ((cpi - cpi_base) & 0x38) >> 3;
}
nic->cpi_base[cfg->vf_id] = cpi_base;
+ nic->rssi_base[cfg->vf_id] = rssi_base;
}
/* Responsds to VF with its RSS indirection table size */
@@ -420,23 +447,34 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
{
u8 qset, idx = 0;
u64 cpi_cfg, cpi_base, rssi_base, rssi;
+ u64 idx_addr;
- cpi_base = nic->cpi_base[cfg->vf_id];
- cpi_cfg = nic_reg_read(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3));
- rssi_base = (cpi_cfg & 0x0FFF) + cfg->tbl_offset;
+ rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset;
rssi = rssi_base;
qset = cfg->vf_id;
for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
+ u8 svf = cfg->ind_tbl[idx] >> 3;
+
+ if (svf)
+ qset = nic->vf_sqs[cfg->vf_id][svf - 1];
+ else
+ qset = cfg->vf_id;
nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
(qset << 3) | (cfg->ind_tbl[idx] & 0x7));
idx++;
}
+ cpi_base = nic->cpi_base[cfg->vf_id];
+ if (pass1_silicon(nic))
+ idx_addr = NIC_PF_CPI_0_2047_CFG;
+ else
+ idx_addr = NIC_PF_MPI_0_2047_CFG;
+ cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3));
cpi_cfg &= ~(0xFULL << 20);
cpi_cfg |= (cfg->hash_bits << 20);
- nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3), cpi_cfg);
+ nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg);
}
/* 4 level transmit side scheduler configutation
@@ -452,19 +490,31 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
* VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
* VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
*/
-static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx)
+static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
+ struct sq_cfg_msg *sq)
{
u32 bgx, lmac, chan;
u32 tl2, tl3, tl4;
u32 rr_quantum;
+ u8 sq_idx = sq->sq_num;
+ u8 pqs_vnic;
+
+ if (sq->sqs_mode)
+ pqs_vnic = nic->pqs_vf[vnic];
+ else
+ pqs_vnic = vnic;
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
- bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
- lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
/* 24 bytes for FCS, IPG and preamble */
rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
tl4 += sq_idx;
+ if (sq->sqs_mode)
+ tl4 += vnic * 8;
+
tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
((u64)vnic << NIC_QS_ID_SHIFT) |
@@ -485,6 +535,86 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx)
nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
}
+/* Send primary nicvf pointer to secondary QS's VF */
+static void nic_send_pnicvf(struct nicpf *nic, int sqs)
+{
+ union nic_mbx mbx = {};
+
+ mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
+ mbx.nicvf.nicvf = nic->nicvf[nic->pqs_vf[sqs]];
+ nic_send_msg_to_vf(nic, sqs, &mbx);
+}
+
+/* Send SQS's nicvf pointer to primary QS's VF */
+static void nic_send_snicvf(struct nicpf *nic, struct nicvf_ptr *nicvf)
+{
+ union nic_mbx mbx = {};
+ int sqs_id = nic->vf_sqs[nicvf->vf_id][nicvf->sqs_id];
+
+ mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
+ mbx.nicvf.sqs_id = nicvf->sqs_id;
+ mbx.nicvf.nicvf = nic->nicvf[sqs_id];
+ nic_send_msg_to_vf(nic, nicvf->vf_id, &mbx);
+}
+
+/* Find next available Qset that can be assigned as a
+ * secondary Qset to a VF.
+ */
+static int nic_nxt_avail_sqs(struct nicpf *nic)
+{
+ int sqs;
+
+ for (sqs = 0; sqs < nic->num_sqs_en; sqs++) {
+ if (!nic->sqs_used[sqs])
+ nic->sqs_used[sqs] = true;
+ else
+ continue;
+ return sqs + nic->num_vf_en;
+ }
+ return -1;
+}
+
+/* Allocate additional Qsets for requested VF */
+static void nic_alloc_sqs(struct nicpf *nic, struct sqs_alloc *sqs)
+{
+ union nic_mbx mbx = {};
+ int idx, alloc_qs = 0;
+ int sqs_id;
+
+ if (!nic->num_sqs_en)
+ goto send_mbox;
+
+ for (idx = 0; idx < sqs->qs_count; idx++) {
+ sqs_id = nic_nxt_avail_sqs(nic);
+ if (sqs_id < 0)
+ break;
+ nic->vf_sqs[sqs->vf_id][idx] = sqs_id;
+ nic->pqs_vf[sqs_id] = sqs->vf_id;
+ alloc_qs++;
+ }
+
+send_mbox:
+ mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
+ mbx.sqs_alloc.vf_id = sqs->vf_id;
+ mbx.sqs_alloc.qs_count = alloc_qs;
+ nic_send_msg_to_vf(nic, sqs->vf_id, &mbx);
+}
+
+static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
+{
+ int bgx_idx, lmac_idx;
+
+ if (lbk->vf_id > MAX_LMAC)
+ return -1;
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
+ lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
+
+ bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
+
+ return 0;
+}
+
/* Interrupt handler to handle mailbox messages from VFs */
static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
{
@@ -492,6 +622,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
u64 *mbx_data;
u64 mbx_addr;
u64 reg_addr;
+ u64 cfg;
int bgx, lmac;
int i;
int ret = 0;
@@ -512,15 +643,24 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
switch (mbx.msg.msg) {
case NIC_MBOX_MSG_READY:
nic_mbx_send_ready(nic, vf);
- nic->link[vf] = 0;
- nic->duplex[vf] = 0;
- nic->speed[vf] = 0;
+ if (vf < MAX_LMAC) {
+ nic->link[vf] = 0;
+ nic->duplex[vf] = 0;
+ nic->speed[vf] = 0;
+ }
ret = 1;
break;
case NIC_MBOX_MSG_QS_CFG:
reg_addr = NIC_PF_QSET_0_127_CFG |
(mbx.qs.num << NIC_QS_ID_SHIFT);
- nic_reg_write(nic, reg_addr, mbx.qs.cfg);
+ cfg = mbx.qs.cfg;
+ /* Check if its a secondary Qset */
+ if (vf >= nic->num_vf_en) {
+ cfg = cfg & (~0x7FULL);
+ /* Assign this Qset to primary Qset's VF */
+ cfg |= nic->pqs_vf[vf];
+ }
+ nic_reg_write(nic, reg_addr, cfg);
break;
case NIC_MBOX_MSG_RQ_CFG:
reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
@@ -548,9 +688,11 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
(mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
(mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
nic_reg_write(nic, reg_addr, mbx.sq.cfg);
- nic_tx_channel_cfg(nic, mbx.qs.num, mbx.sq.sq_num);
+ nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
break;
case NIC_MBOX_MSG_SET_MAC:
+ if (vf >= nic->num_vf_en)
+ break;
lmac = mbx.mac.vf_id;
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
@@ -577,10 +719,28 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
case NIC_MBOX_MSG_SHUTDOWN:
/* First msg in VF teardown sequence */
nic->vf_enabled[vf] = false;
+ if (vf >= nic->num_vf_en)
+ nic->sqs_used[vf - nic->num_vf_en] = false;
+ nic->pqs_vf[vf] = 0;
break;
+ case NIC_MBOX_MSG_ALLOC_SQS:
+ nic_alloc_sqs(nic, &mbx.sqs_alloc);
+ goto unlock;
+ case NIC_MBOX_MSG_NICVF_PTR:
+ nic->nicvf[vf] = mbx.nicvf.nicvf;
+ break;
+ case NIC_MBOX_MSG_PNICVF_PTR:
+ nic_send_pnicvf(nic, vf);
+ goto unlock;
+ case NIC_MBOX_MSG_SNICVF_PTR:
+ nic_send_snicvf(nic, &mbx.nicvf);
+ goto unlock;
case NIC_MBOX_MSG_BGX_STATS:
nic_get_bgx_stats(nic, &mbx.bgx_stats);
goto unlock;
+ case NIC_MBOX_MSG_LOOPBACK:
+ ret = nic_config_loopback(nic, &mbx.lbk);
+ break;
default:
dev_err(&nic->pdev->dev,
"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
@@ -606,8 +766,7 @@ static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
if (intr & (1ULL << vf)) {
dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
vf + (mbx * vf_per_mbx_reg));
- if ((vf + (mbx * vf_per_mbx_reg)) > nic->num_vf_en)
- break;
+
nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg));
nic_clear_mbx_intr(nic, vf, mbx);
}
@@ -713,9 +872,24 @@ static void nic_unregister_interrupts(struct nicpf *nic)
nic_disable_msix(nic);
}
+static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
+{
+ int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE;
+ u16 total_vf;
+
+ /* Check if its a multi-node environment */
+ if (nr_node_ids > 1)
+ sqs_per_vf = MAX_SQS_PER_VF;
+
+ pos = pci_find_ext_capability(nic->pdev, PCI_EXT_CAP_ID_SRIOV);
+ pci_read_config_word(nic->pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf);
+ return min(total_vf - vf_en, vf_en * sqs_per_vf);
+}
+
static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
{
int pos = 0;
+ int vf_en;
int err;
u16 total_vf_cnt;
@@ -732,16 +906,20 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
if (!total_vf_cnt)
return 0;
- err = pci_enable_sriov(pdev, nic->num_vf_en);
+ vf_en = nic->num_vf_en;
+ nic->num_sqs_en = nic_num_sqs_en(nic, nic->num_vf_en);
+ vf_en += nic->num_sqs_en;
+
+ err = pci_enable_sriov(pdev, vf_en);
if (err) {
dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
- nic->num_vf_en);
+ vf_en);
nic->num_vf_en = 0;
return err;
}
dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
- nic->num_vf_en);
+ vf_en);
nic->flags |= NIC_SRIOV_ENABLED;
return 0;
@@ -841,8 +1019,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_release_regions;
}
- pci_read_config_byte(pdev, PCI_REVISION_ID, &nic->rev_id);
-
nic->node = nic_get_node_id(pdev);
nic_set_lmac_vf_mapping(nic);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index 58197bb2f805..dd536be20193 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -85,7 +85,11 @@
#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
+#define NIC_PF_MCAM_0_191_ENA (0x100000)
+#define NIC_PF_MCAM_0_191_M_0_5_DATA (0x110000)
+#define NIC_PF_MCAM_CTRL (0x120000)
#define NIC_PF_CPI_0_2047_CFG (0x200000)
+#define NIC_PF_MPI_0_2047_CFG (0x210000)
#define NIC_PF_RSSI_0_4097_RQ (0x220000)
#define NIC_PF_LMAC_0_7_CFG (0x240000)
#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index a4228e664567..af54c10945c2 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -35,10 +35,10 @@ struct nicvf_stat {
}
static const struct nicvf_stat nicvf_hw_stats[] = {
- NICVF_HW_STAT(rx_bytes_ok),
- NICVF_HW_STAT(rx_ucast_frames_ok),
- NICVF_HW_STAT(rx_bcast_frames_ok),
- NICVF_HW_STAT(rx_mcast_frames_ok),
+ NICVF_HW_STAT(rx_bytes),
+ NICVF_HW_STAT(rx_ucast_frames),
+ NICVF_HW_STAT(rx_bcast_frames),
+ NICVF_HW_STAT(rx_mcast_frames),
NICVF_HW_STAT(rx_fcs_errors),
NICVF_HW_STAT(rx_l2_errors),
NICVF_HW_STAT(rx_drop_red),
@@ -49,6 +49,30 @@ static const struct nicvf_stat nicvf_hw_stats[] = {
NICVF_HW_STAT(rx_drop_mcast),
NICVF_HW_STAT(rx_drop_l3_bcast),
NICVF_HW_STAT(rx_drop_l3_mcast),
+ NICVF_HW_STAT(rx_bgx_truncated_pkts),
+ NICVF_HW_STAT(rx_jabber_errs),
+ NICVF_HW_STAT(rx_fcs_errs),
+ NICVF_HW_STAT(rx_bgx_errs),
+ NICVF_HW_STAT(rx_prel2_errs),
+ NICVF_HW_STAT(rx_l2_hdr_malformed),
+ NICVF_HW_STAT(rx_oversize),
+ NICVF_HW_STAT(rx_undersize),
+ NICVF_HW_STAT(rx_l2_len_mismatch),
+ NICVF_HW_STAT(rx_l2_pclp),
+ NICVF_HW_STAT(rx_ip_ver_errs),
+ NICVF_HW_STAT(rx_ip_csum_errs),
+ NICVF_HW_STAT(rx_ip_hdr_malformed),
+ NICVF_HW_STAT(rx_ip_payload_malformed),
+ NICVF_HW_STAT(rx_ip_ttl_errs),
+ NICVF_HW_STAT(rx_l3_pclp),
+ NICVF_HW_STAT(rx_l4_malformed),
+ NICVF_HW_STAT(rx_l4_csum_errs),
+ NICVF_HW_STAT(rx_udp_len_errs),
+ NICVF_HW_STAT(rx_l4_port_errs),
+ NICVF_HW_STAT(rx_tcp_flag_errs),
+ NICVF_HW_STAT(rx_tcp_offset_errs),
+ NICVF_HW_STAT(rx_l4_pclp),
+ NICVF_HW_STAT(rx_truncated_pkts),
NICVF_HW_STAT(tx_bytes_ok),
NICVF_HW_STAT(tx_ucast_frames_ok),
NICVF_HW_STAT(tx_bcast_frames_ok),
@@ -125,10 +149,33 @@ static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
nic->msg_enable = lvl;
}
+static void nicvf_get_qset_strings(struct nicvf *nic, u8 **data, int qset)
+{
+ int stats, qidx;
+ int start_qidx = qset * MAX_RCV_QUEUES_PER_QS;
+
+ for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
+ for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+ sprintf(*data, "rxq%d: %s", qidx + start_qidx,
+ nicvf_queue_stats[stats].name);
+ *data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
+ for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+ sprintf(*data, "txq%d: %s", qidx + start_qidx,
+ nicvf_queue_stats[stats].name);
+ *data += ETH_GSTRING_LEN;
+ }
+ }
+}
+
static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
{
struct nicvf *nic = netdev_priv(netdev);
- int stats, qidx;
+ int stats;
+ int sqs;
if (sset != ETH_SS_STATS)
return;
@@ -143,20 +190,12 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
data += ETH_GSTRING_LEN;
}
- for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
- for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
- sprintf(data, "rxq%d: %s", qidx,
- nicvf_queue_stats[stats].name);
- data += ETH_GSTRING_LEN;
- }
- }
+ nicvf_get_qset_strings(nic, &data, 0);
- for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
- for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
- sprintf(data, "txq%d: %s", qidx,
- nicvf_queue_stats[stats].name);
- data += ETH_GSTRING_LEN;
- }
+ for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+ if (!nic->snicvf[sqs])
+ continue;
+ nicvf_get_qset_strings(nic->snicvf[sqs], &data, sqs + 1);
}
for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
@@ -173,21 +212,58 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
static int nicvf_get_sset_count(struct net_device *netdev, int sset)
{
struct nicvf *nic = netdev_priv(netdev);
+ int qstats_count;
+ int sqs;
if (sset != ETH_SS_STATS)
return -EINVAL;
+ qstats_count = nicvf_n_queue_stats *
+ (nic->qs->rq_cnt + nic->qs->sq_cnt);
+ for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+ struct nicvf *snic;
+
+ snic = nic->snicvf[sqs];
+ if (!snic)
+ continue;
+ qstats_count += nicvf_n_queue_stats *
+ (snic->qs->rq_cnt + snic->qs->sq_cnt);
+ }
+
return nicvf_n_hw_stats + nicvf_n_drv_stats +
- (nicvf_n_queue_stats *
- (nic->qs->rq_cnt + nic->qs->sq_cnt)) +
+ qstats_count +
BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
}
+static void nicvf_get_qset_stats(struct nicvf *nic,
+ struct ethtool_stats *stats, u64 **data)
+{
+ int stat, qidx;
+
+ if (!nic)
+ return;
+
+ for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
+ nicvf_update_rq_stats(nic, qidx);
+ for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+ *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats)
+ [nicvf_queue_stats[stat].index];
+ }
+
+ for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
+ nicvf_update_sq_stats(nic, qidx);
+ for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+ *((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats)
+ [nicvf_queue_stats[stat].index];
+ }
+}
+
static void nicvf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct nicvf *nic = netdev_priv(netdev);
- int stat, qidx;
+ int stat;
+ int sqs;
nicvf_update_stats(nic);
@@ -195,22 +271,18 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
nicvf_update_lmac_stats(nic);
for (stat = 0; stat < nicvf_n_hw_stats; stat++)
- *(data++) = ((u64 *)&nic->stats)
+ *(data++) = ((u64 *)&nic->hw_stats)
[nicvf_hw_stats[stat].index];
for (stat = 0; stat < nicvf_n_drv_stats; stat++)
*(data++) = ((u64 *)&nic->drv_stats)
[nicvf_drv_stats[stat].index];
- for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
- for (stat = 0; stat < nicvf_n_queue_stats; stat++)
- *(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
- [nicvf_queue_stats[stat].index];
- }
+ nicvf_get_qset_stats(nic, stats, &data);
- for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
- for (stat = 0; stat < nicvf_n_queue_stats; stat++)
- *(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
- [nicvf_queue_stats[stat].index];
+ for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+ if (!nic->snicvf[sqs])
+ continue;
+ nicvf_get_qset_stats(nic->snicvf[sqs], stats, &data);
}
for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
@@ -369,7 +441,7 @@ static int nicvf_get_rxnfc(struct net_device *dev,
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
- info->data = nic->qs->rq_cnt;
+ info->data = nic->rx_queues;
ret = 0;
break;
case ETHTOOL_GRXFH:
@@ -501,17 +573,15 @@ static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
struct nicvf_rss_info *rss = &nic->rss_info;
int idx;
- if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) {
- rss->enable = false;
- rss->hash_bits = 0;
- return -EIO;
- }
-
- /* We do not allow change in unsupported parameters */
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- rss->enable = true;
+ if (!rss->enable) {
+ netdev_err(nic->netdev,
+ "RSS is disabled, cannot change settings\n");
+ return -EIO;
+ }
+
if (indir) {
for (idx = 0; idx < rss->rss_size; idx++)
rss->ind_tbl[idx] = indir[idx];
@@ -534,11 +604,11 @@ static void nicvf_get_channels(struct net_device *dev,
memset(channel, 0, sizeof(*channel));
- channel->max_rx = MAX_RCV_QUEUES_PER_QS;
- channel->max_tx = MAX_SND_QUEUES_PER_QS;
+ channel->max_rx = nic->max_queues;
+ channel->max_tx = nic->max_queues;
- channel->rx_count = nic->qs->rq_cnt;
- channel->tx_count = nic->qs->sq_cnt;
+ channel->rx_count = nic->rx_queues;
+ channel->tx_count = nic->tx_queues;
}
/* Set no of Tx, Rx queues to be used */
@@ -548,22 +618,34 @@ static int nicvf_set_channels(struct net_device *dev,
struct nicvf *nic = netdev_priv(dev);
int err = 0;
bool if_up = netif_running(dev);
+ int cqcount;
if (!channel->rx_count || !channel->tx_count)
return -EINVAL;
- if (channel->rx_count > MAX_RCV_QUEUES_PER_QS)
+ if (channel->rx_count > nic->max_queues)
return -EINVAL;
- if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
+ if (channel->tx_count > nic->max_queues)
return -EINVAL;
if (if_up)
nicvf_stop(dev);
- nic->qs->rq_cnt = channel->rx_count;
- nic->qs->sq_cnt = channel->tx_count;
+ cqcount = max(channel->rx_count, channel->tx_count);
+
+ if (cqcount > MAX_CMP_QUEUES_PER_QS) {
+ nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS);
+ nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
+ } else {
+ nic->sqs_count = 0;
+ }
+
+ nic->qs->rq_cnt = min_t(u32, channel->rx_count, MAX_RCV_QUEUES_PER_QS);
+ nic->qs->sq_cnt = min_t(u32, channel->tx_count, MAX_SND_QUEUES_PER_QS);
nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
- err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt);
+ nic->rx_queues = channel->rx_count;
+ nic->tx_queues = channel->tx_count;
+ err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues);
if (err)
return err;
@@ -571,7 +653,7 @@ static int nicvf_set_channels(struct net_device *dev,
nicvf_open(dev);
netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
- nic->qs->sq_cnt, nic->qs->rq_cnt);
+ nic->tx_queues, nic->rx_queues);
return err;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 3b90afb8c293..a9377727c11c 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/log2.h>
@@ -28,7 +29,7 @@
static const struct pci_device_id nicvf_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_THUNDER_NIC_VF,
- PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+ PCI_VENDOR_ID_CAVIUM, 0xA134) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
PCI_VENDOR_ID_CAVIUM, 0xA11E) },
@@ -50,6 +51,14 @@ module_param(cpi_alg, int, S_IRUGO);
MODULE_PARM_DESC(cpi_alg,
"PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
+static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
+{
+ if (nic->sqs_mode)
+ return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
+ else
+ return qidx;
+}
+
static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
struct sk_buff *skb)
{
@@ -105,7 +114,6 @@ u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
}
/* VF -> PF mailbox communication */
-
static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
{
u64 *msg = (u64 *)mbx;
@@ -147,26 +155,15 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
*/
static int nicvf_check_pf_ready(struct nicvf *nic)
{
- int timeout = 5000, sleep = 20;
union nic_mbx mbx = {};
mbx.msg.msg = NIC_MBOX_MSG_READY;
-
- nic->pf_ready_to_rcv_msg = false;
-
- nicvf_write_to_mbx(nic, &mbx);
-
- while (!nic->pf_ready_to_rcv_msg) {
- msleep(sleep);
- if (nic->pf_ready_to_rcv_msg)
- break;
- timeout -= sleep;
- if (!timeout) {
- netdev_err(nic->netdev,
- "PF didn't respond to READY msg\n");
- return 0;
- }
+ if (nicvf_send_msg_to_pf(nic, &mbx)) {
+ netdev_err(nic->netdev,
+ "PF didn't respond to READY msg\n");
+ return 0;
}
+
return 1;
}
@@ -197,13 +194,15 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
switch (mbx.msg.msg) {
case NIC_MBOX_MSG_READY:
- nic->pf_ready_to_rcv_msg = true;
+ nic->pf_acked = true;
nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
nic->node = mbx.nic_cfg.node_id;
if (!nic->set_mac_pending)
ether_addr_copy(nic->netdev->dev_addr,
mbx.nic_cfg.mac_addr);
+ nic->sqs_mode = mbx.nic_cfg.sqs_mode;
+ nic->loopback_supported = mbx.nic_cfg.loopback_supported;
nic->link_up = false;
nic->duplex = 0;
nic->speed = 0;
@@ -221,7 +220,6 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
case NIC_MBOX_MSG_BGX_STATS:
nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
nic->pf_acked = true;
- nic->bgx_stats_acked = true;
break;
case NIC_MBOX_MSG_BGX_LINK_CHANGE:
nic->pf_acked = true;
@@ -242,6 +240,26 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
netif_tx_stop_all_queues(nic->netdev);
}
break;
+ case NIC_MBOX_MSG_ALLOC_SQS:
+ nic->sqs_count = mbx.sqs_alloc.qs_count;
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_SNICVF_PTR:
+ /* Primary VF: make note of secondary VF's pointer
+ * to be used while packet transmission.
+ */
+ nic->snicvf[mbx.nicvf.sqs_id] =
+ (struct nicvf *)mbx.nicvf.nicvf;
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_PNICVF_PTR:
+ /* Secondary VF/Qset: make note of primary VF's pointer
+ * to be used while packet reception, to handover packet
+ * to primary VF's netdev.
+ */
+ nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
+ nic->pf_acked = true;
+ break;
default:
netdev_err(nic->netdev,
"Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
@@ -326,7 +344,7 @@ static int nicvf_rss_init(struct nicvf *nic)
nicvf_get_rss_size(nic);
- if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) {
+ if (cpi_alg != CPI_ALG_NONE) {
rss->enable = false;
rss->hash_bits = 0;
return 0;
@@ -350,11 +368,100 @@ static int nicvf_rss_init(struct nicvf *nic)
for (idx = 0; idx < rss->rss_size; idx++)
rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
- nic->qs->rq_cnt);
+ nic->rx_queues);
nicvf_config_rss(nic);
return 1;
}
+/* Request PF to allocate additional Qsets */
+static void nicvf_request_sqs(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+ int sqs;
+ int sqs_count = nic->sqs_count;
+ int rx_queues = 0, tx_queues = 0;
+
+ /* Only primary VF should request */
+ if (nic->sqs_mode || !nic->sqs_count)
+ return;
+
+ mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
+ mbx.sqs_alloc.vf_id = nic->vf_id;
+ mbx.sqs_alloc.qs_count = nic->sqs_count;
+ if (nicvf_send_msg_to_pf(nic, &mbx)) {
+ /* No response from PF */
+ nic->sqs_count = 0;
+ return;
+ }
+
+ /* Return if no Secondary Qsets available */
+ if (!nic->sqs_count)
+ return;
+
+ if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
+ rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
+ if (nic->tx_queues > MAX_SND_QUEUES_PER_QS)
+ tx_queues = nic->tx_queues - MAX_SND_QUEUES_PER_QS;
+
+ /* Set no of Rx/Tx queues in each of the SQsets */
+ for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+ mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
+ mbx.nicvf.vf_id = nic->vf_id;
+ mbx.nicvf.sqs_id = sqs;
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ nic->snicvf[sqs]->sqs_id = sqs;
+ if (rx_queues > MAX_RCV_QUEUES_PER_QS) {
+ nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS;
+ rx_queues -= MAX_RCV_QUEUES_PER_QS;
+ } else {
+ nic->snicvf[sqs]->qs->rq_cnt = rx_queues;
+ rx_queues = 0;
+ }
+
+ if (tx_queues > MAX_SND_QUEUES_PER_QS) {
+ nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS;
+ tx_queues -= MAX_SND_QUEUES_PER_QS;
+ } else {
+ nic->snicvf[sqs]->qs->sq_cnt = tx_queues;
+ tx_queues = 0;
+ }
+
+ nic->snicvf[sqs]->qs->cq_cnt =
+ max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt);
+
+ /* Initialize secondary Qset's queues and its interrupts */
+ nicvf_open(nic->snicvf[sqs]->netdev);
+ }
+
+ /* Update stack with actual Rx/Tx queue count allocated */
+ if (sqs_count != nic->sqs_count)
+ nicvf_set_real_num_queues(nic->netdev,
+ nic->tx_queues, nic->rx_queues);
+}
+
+/* Send this Qset's nicvf pointer to PF.
+ * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs
+ * so that packets received by these Qsets can use primary VF's netdev
+ */
+static void nicvf_send_vf_struct(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR;
+ mbx.nicvf.sqs_mode = nic->sqs_mode;
+ mbx.nicvf.nicvf = (u64)nic;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_get_primary_vf_struct(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
int nicvf_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues)
{
@@ -429,6 +536,34 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
}
}
+static inline void nicvf_set_rxhash(struct net_device *netdev,
+ struct cqe_rx_t *cqe_rx,
+ struct sk_buff *skb)
+{
+ u8 hash_type;
+ u32 hash;
+
+ if (!(netdev->features & NETIF_F_RXHASH))
+ return;
+
+ switch (cqe_rx->rss_alg) {
+ case RSS_ALG_TCP_IP:
+ case RSS_ALG_UDP_IP:
+ hash_type = PKT_HASH_TYPE_L4;
+ hash = cqe_rx->rss_tag;
+ break;
+ case RSS_ALG_IP:
+ hash_type = PKT_HASH_TYPE_L3;
+ hash = cqe_rx->rss_tag;
+ break;
+ default:
+ hash_type = PKT_HASH_TYPE_NONE;
+ hash = 0;
+ }
+
+ skb_set_hash(skb, hash, hash_type);
+}
+
static void nicvf_rcv_pkt_handler(struct net_device *netdev,
struct napi_struct *napi,
struct cmp_queue *cq,
@@ -437,6 +572,15 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
struct sk_buff *skb;
struct nicvf *nic = netdev_priv(netdev);
int err = 0;
+ int rq_idx;
+
+ rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
+
+ if (nic->sqs_mode) {
+ /* Use primary VF's 'nicvf' struct */
+ nic = nic->pnicvf;
+ netdev = nic->netdev;
+ }
/* Check for errors */
err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
@@ -456,9 +600,17 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
skb->data, skb->len, true);
}
+ /* If error packet, drop it here */
+ if (err) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
nicvf_set_rx_frame_cnt(nic, skb);
- skb_record_rx_queue(skb, cqe_rx->rq_idx);
+ nicvf_set_rxhash(netdev, cqe_rx, skb);
+
+ skb_record_rx_queue(skb, rq_idx);
if (netdev->hw_features & NETIF_F_RXCSUM) {
/* HW by default verifies TCP/UDP/SCTP checksums */
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -468,6 +620,11 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
skb->protocol = eth_type_trans(skb, netdev);
+ /* Check for stripped VLAN */
+ if (cqe_rx->vlan_found && cqe_rx->vlan_stripped)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ ntohs((__force __be16)cqe_rx->vlan_tci));
+
if (napi && (netdev->features & NETIF_F_GRO))
napi_gro_receive(napi, skb);
else
@@ -549,8 +706,11 @@ loop:
done:
/* Wakeup TXQ if its stopped earlier due to SQ full */
if (tx_done) {
- txq = netdev_get_tx_queue(netdev, cq_idx);
- if (netif_tx_queue_stopped(txq)) {
+ netdev = nic->pnicvf->netdev;
+ txq = netdev_get_tx_queue(netdev,
+ nicvf_netdev_qidx(nic, cq_idx));
+ nic = nic->pnicvf;
+ if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
netif_tx_start_queue(txq);
nic->drv_stats.txq_wake++;
if (netif_msg_tx_err(nic))
@@ -624,11 +784,20 @@ static void nicvf_handle_qs_err(unsigned long data)
nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
}
+static void nicvf_dump_intr_status(struct nicvf *nic)
+{
+ if (netif_msg_intr(nic))
+ netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
+ nic->netdev->name, nicvf_reg_read(nic, NIC_VF_INT));
+}
+
static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
{
struct nicvf *nic = (struct nicvf *)nicvf_irq;
u64 intr;
+ nicvf_dump_intr_status(nic);
+
intr = nicvf_reg_read(nic, NIC_VF_INT);
/* Check for spurious interrupt */
if (!(intr & NICVF_INTR_MBOX_MASK))
@@ -639,59 +808,58 @@ static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
return IRQ_HANDLED;
}
-static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq)
+static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
+{
+ struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq;
+ struct nicvf *nic = cq_poll->nicvf;
+ int qidx = cq_poll->cq_idx;
+
+ nicvf_dump_intr_status(nic);
+
+ /* Disable interrupts */
+ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+
+ /* Schedule NAPI */
+ napi_schedule(&cq_poll->napi);
+
+ /* Clear interrupt */
+ nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq)
{
- u64 qidx, intr, clear_intr = 0;
- u64 cq_intr, rbdr_intr, qs_err_intr;
struct nicvf *nic = (struct nicvf *)nicvf_irq;
- struct queue_set *qs = nic->qs;
- struct nicvf_cq_poll *cq_poll = NULL;
+ u8 qidx;
- intr = nicvf_reg_read(nic, NIC_VF_INT);
- if (netif_msg_intr(nic))
- netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
- nic->netdev->name, intr);
-
- qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK;
- if (qs_err_intr) {
- /* Disable Qset err interrupt and schedule softirq */
- nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
- tasklet_hi_schedule(&nic->qs_err_task);
- clear_intr |= qs_err_intr;
- }
- /* Disable interrupts and start polling */
- cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT;
- for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
- if (!(cq_intr & (1 << qidx)))
- continue;
- if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx))
+ nicvf_dump_intr_status(nic);
+
+ /* Disable RBDR interrupt and schedule softirq */
+ for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
+ if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
continue;
+ nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+ tasklet_hi_schedule(&nic->rbdr_task);
+ /* Clear interrupt */
+ nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
+ }
- nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
- clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT);
+ return IRQ_HANDLED;
+}
- cq_poll = nic->napi[qidx];
- /* Schedule NAPI */
- if (cq_poll)
- napi_schedule(&cq_poll->napi);
- }
+static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
+{
+ struct nicvf *nic = (struct nicvf *)nicvf_irq;
- /* Handle RBDR interrupts */
- rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT;
- if (rbdr_intr) {
- /* Disable RBDR interrupt and schedule softirq */
- for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
- if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
- continue;
- nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
- tasklet_hi_schedule(&nic->rbdr_task);
- clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT);
- }
- }
+ nicvf_dump_intr_status(nic);
+
+ /* Disable Qset err interrupt and schedule softirq */
+ nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+ tasklet_hi_schedule(&nic->qs_err_task);
+ nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
- /* Clear interrupts */
- nicvf_reg_write(nic, NIC_VF_INT, clear_intr);
return IRQ_HANDLED;
}
@@ -725,7 +893,7 @@ static void nicvf_disable_msix(struct nicvf *nic)
static int nicvf_register_interrupts(struct nicvf *nic)
{
- int irq, free, ret = 0;
+ int irq, ret = 0;
int vector;
for_each_cq_irq(irq)
@@ -740,44 +908,42 @@ static int nicvf_register_interrupts(struct nicvf *nic)
sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
nic->vf_id, irq - NICVF_INTR_ID_RBDR);
- /* Register all interrupts except mailbox */
- for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) {
+ /* Register CQ interrupts */
+ for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
vector = nic->msix_entries[irq].vector;
ret = request_irq(vector, nicvf_intr_handler,
- 0, nic->irq_name[irq], nic);
+ 0, nic->irq_name[irq], nic->napi[irq]);
if (ret)
- break;
+ goto err;
nic->irq_allocated[irq] = true;
}
- for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) {
+ /* Register RBDR interrupt */
+ for (irq = NICVF_INTR_ID_RBDR;
+ irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
vector = nic->msix_entries[irq].vector;
- ret = request_irq(vector, nicvf_intr_handler,
+ ret = request_irq(vector, nicvf_rbdr_intr_handler,
0, nic->irq_name[irq], nic);
if (ret)
- break;
+ goto err;
nic->irq_allocated[irq] = true;
}
+ /* Register QS error interrupt */
sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
"NICVF%d Qset error", nic->vf_id);
- if (!ret) {
- vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector;
- irq = NICVF_INTR_ID_QS_ERR;
- ret = request_irq(vector, nicvf_intr_handler,
- 0, nic->irq_name[irq], nic);
- if (!ret)
- nic->irq_allocated[irq] = true;
- }
+ irq = NICVF_INTR_ID_QS_ERR;
+ ret = request_irq(nic->msix_entries[irq].vector,
+ nicvf_qs_err_intr_handler,
+ 0, nic->irq_name[irq], nic);
+ if (!ret)
+ nic->irq_allocated[irq] = true;
- if (ret) {
- netdev_err(nic->netdev, "Request irq failed\n");
- for (free = 0; free < irq; free++)
- free_irq(nic->msix_entries[free].vector, nic);
- return ret;
- }
+err:
+ if (ret)
+ netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq);
- return 0;
+ return ret;
}
static void nicvf_unregister_interrupts(struct nicvf *nic)
@@ -786,8 +952,14 @@ static void nicvf_unregister_interrupts(struct nicvf *nic)
/* Free registered interrupts */
for (irq = 0; irq < nic->num_vec; irq++) {
- if (nic->irq_allocated[irq])
+ if (!nic->irq_allocated[irq])
+ continue;
+
+ if (irq < NICVF_INTR_ID_SQ)
+ free_irq(nic->msix_entries[irq].vector, nic->napi[irq]);
+ else
free_irq(nic->msix_entries[irq].vector, nic);
+
nic->irq_allocated[irq] = false;
}
@@ -852,13 +1024,26 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
netdev_warn(netdev,
"%s: Transmit ring full, stopping SQ%d\n",
netdev->name, qid);
-
return NETDEV_TX_BUSY;
}
return NETDEV_TX_OK;
}
+static inline void nicvf_free_cq_poll(struct nicvf *nic)
+{
+ struct nicvf_cq_poll *cq_poll;
+ int qidx;
+
+ for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
+ cq_poll = nic->napi[qidx];
+ if (!cq_poll)
+ continue;
+ nic->napi[qidx] = NULL;
+ kfree(cq_poll);
+ }
+}
+
int nicvf_stop(struct net_device *netdev)
{
int irq, qidx;
@@ -871,6 +1056,17 @@ int nicvf_stop(struct net_device *netdev)
nicvf_send_msg_to_pf(nic, &mbx);
netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(nic->netdev);
+
+ /* Teardown secondary qsets first */
+ if (!nic->sqs_mode) {
+ for (qidx = 0; qidx < nic->sqs_count; qidx++) {
+ if (!nic->snicvf[qidx])
+ continue;
+ nicvf_stop(nic->snicvf[qidx]->netdev);
+ nic->snicvf[qidx] = NULL;
+ }
+ }
/* Disable RBDR & QS error interrupts */
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
@@ -893,7 +1089,6 @@ int nicvf_stop(struct net_device *netdev)
cq_poll = nic->napi[qidx];
if (!cq_poll)
continue;
- nic->napi[qidx] = NULL;
napi_synchronize(&cq_poll->napi);
/* CQ intr is enabled while napi_complete,
* so disable it now
@@ -902,7 +1097,6 @@ int nicvf_stop(struct net_device *netdev)
nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
napi_disable(&cq_poll->napi);
netif_napi_del(&cq_poll->napi);
- kfree(cq_poll);
}
netif_tx_disable(netdev);
@@ -918,6 +1112,12 @@ int nicvf_stop(struct net_device *netdev)
nicvf_unregister_interrupts(nic);
+ nicvf_free_cq_poll(nic);
+
+ /* Clear multiqset info */
+ nic->pnicvf = nic;
+ nic->sqs_count = 0;
+
return 0;
}
@@ -944,6 +1144,7 @@ int nicvf_open(struct net_device *netdev)
goto napi_del;
}
cq_poll->cq_idx = qidx;
+ cq_poll->nicvf = nic;
netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
NAPI_POLL_WEIGHT);
napi_enable(&cq_poll->napi);
@@ -972,10 +1173,16 @@ int nicvf_open(struct net_device *netdev)
/* Configure CPI alorithm */
nic->cpi_alg = cpi_alg;
- nicvf_config_cpi(nic);
+ if (!nic->sqs_mode)
+ nicvf_config_cpi(nic);
+
+ nicvf_request_sqs(nic);
+ if (nic->sqs_mode)
+ nicvf_get_primary_vf_struct(nic);
/* Configure receive side scaling */
- nicvf_rss_init(nic);
+ if (!nic->sqs_mode)
+ nicvf_rss_init(nic);
err = nicvf_register_interrupts(nic);
if (err)
@@ -1011,6 +1218,8 @@ int nicvf_open(struct net_device *netdev)
cleanup:
nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
nicvf_unregister_interrupts(nic);
+ tasklet_kill(&nic->qs_err_task);
+ tasklet_kill(&nic->rbdr_task);
napi_del:
for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
cq_poll = nic->napi[qidx];
@@ -1018,9 +1227,8 @@ napi_del:
continue;
napi_disable(&cq_poll->napi);
netif_napi_del(&cq_poll->napi);
- kfree(cq_poll);
- nic->napi[qidx] = NULL;
}
+ nicvf_free_cq_poll(nic);
return err;
}
@@ -1077,7 +1285,6 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
{
int stat = 0;
union nic_mbx mbx = {};
- int timeout;
if (!netif_running(nic->netdev))
return;
@@ -1087,14 +1294,9 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
/* Rx stats */
mbx.bgx_stats.rx = 1;
while (stat < BGX_RX_STATS_COUNT) {
- nic->bgx_stats_acked = 0;
mbx.bgx_stats.idx = stat;
- nicvf_send_msg_to_pf(nic, &mbx);
- timeout = 0;
- while ((!nic->bgx_stats_acked) && (timeout < 10)) {
- msleep(2);
- timeout++;
- }
+ if (nicvf_send_msg_to_pf(nic, &mbx))
+ return;
stat++;
}
@@ -1103,14 +1305,9 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
/* Tx stats */
mbx.bgx_stats.rx = 0;
while (stat < BGX_TX_STATS_COUNT) {
- nic->bgx_stats_acked = 0;
mbx.bgx_stats.idx = stat;
- nicvf_send_msg_to_pf(nic, &mbx);
- timeout = 0;
- while ((!nic->bgx_stats_acked) && (timeout < 10)) {
- msleep(2);
- timeout++;
- }
+ if (nicvf_send_msg_to_pf(nic, &mbx))
+ return;
stat++;
}
}
@@ -1118,7 +1315,7 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
void nicvf_update_stats(struct nicvf *nic)
{
int qidx;
- struct nicvf_hw_stats *stats = &nic->stats;
+ struct nicvf_hw_stats *stats = &nic->hw_stats;
struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
struct queue_set *qs = nic->qs;
@@ -1127,14 +1324,16 @@ void nicvf_update_stats(struct nicvf *nic)
#define GET_TX_STATS(reg) \
nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
- stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS);
- stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST);
- stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST);
- stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST);
+ stats->rx_bytes = GET_RX_STATS(RX_OCTS);
+ stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
+ stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
+ stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
stats->rx_drop_red = GET_RX_STATS(RX_RED);
+ stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
+ stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
@@ -1146,9 +1345,6 @@ void nicvf_update_stats(struct nicvf *nic)
stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
stats->tx_drops = GET_TX_STATS(TX_DROP);
- drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok +
- stats->rx_bcast_frames_ok +
- stats->rx_mcast_frames_ok;
drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
stats->tx_bcast_frames_ok +
stats->tx_mcast_frames_ok;
@@ -1167,14 +1363,15 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct nicvf *nic = netdev_priv(netdev);
- struct nicvf_hw_stats *hw_stats = &nic->stats;
+ struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
nicvf_update_stats(nic);
- stats->rx_bytes = hw_stats->rx_bytes_ok;
+ stats->rx_bytes = hw_stats->rx_bytes;
stats->rx_packets = drv_stats->rx_frames_ok;
stats->rx_dropped = drv_stats->rx_drops;
+ stats->multicast = hw_stats->rx_mcast_frames;
stats->tx_bytes = hw_stats->tx_bytes_ok;
stats->tx_packets = drv_stats->tx_frames_ok;
@@ -1208,6 +1405,45 @@ static void nicvf_reset_task(struct work_struct *work)
nic->netdev->trans_start = jiffies;
}
+static int nicvf_config_loopback(struct nicvf *nic,
+ netdev_features_t features)
+{
+ union nic_mbx mbx = {};
+
+ mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
+ mbx.lbk.vf_id = nic->vf_id;
+ mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0;
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static netdev_features_t nicvf_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if ((features & NETIF_F_LOOPBACK) &&
+ netif_running(netdev) && !nic->loopback_supported)
+ features &= ~NETIF_F_LOOPBACK;
+
+ return features;
+}
+
+static int nicvf_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ netdev_features_t changed = features ^ netdev->features;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+ nicvf_config_vlan_stripping(nic, features);
+
+ if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
+ return nicvf_config_loopback(nic, features);
+
+ return 0;
+}
+
static const struct net_device_ops nicvf_netdev_ops = {
.ndo_open = nicvf_open,
.ndo_stop = nicvf_stop,
@@ -1216,6 +1452,8 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_set_mac_address = nicvf_set_mac_address,
.ndo_get_stats64 = nicvf_get_stats64,
.ndo_tx_timeout = nicvf_tx_timeout,
+ .ndo_fix_features = nicvf_fix_features,
+ .ndo_set_features = nicvf_set_features,
};
static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1223,8 +1461,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct device *dev = &pdev->dev;
struct net_device *netdev;
struct nicvf *nic;
- struct queue_set *qs;
- int err;
+ int err, qcount;
err = pci_enable_device(pdev);
if (err) {
@@ -1250,9 +1487,17 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_release_regions;
}
- netdev = alloc_etherdev_mqs(sizeof(struct nicvf),
- MAX_RCV_QUEUES_PER_QS,
- MAX_SND_QUEUES_PER_QS);
+ qcount = MAX_CMP_QUEUES_PER_QS;
+
+ /* Restrict multiqset support only for host bound VFs */
+ if (pdev->is_virtfn) {
+ /* Set max number of queues per VF */
+ qcount = roundup(num_online_cpus(), MAX_CMP_QUEUES_PER_QS);
+ qcount = min(qcount,
+ (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
+ }
+
+ netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
if (!netdev) {
err = -ENOMEM;
goto err_release_regions;
@@ -1265,6 +1510,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic = netdev_priv(netdev);
nic->netdev = netdev;
nic->pdev = pdev;
+ nic->pnicvf = nic;
+ nic->max_queues = qcount;
/* MAP VF's configuration registers */
nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
@@ -1278,20 +1525,31 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_free_netdev;
- qs = nic->qs;
-
- err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt);
- if (err)
- goto err_free_netdev;
-
/* Check if PF is alive and get MAC address for this VF */
err = nicvf_register_misc_interrupt(nic);
if (err)
goto err_free_netdev;
- netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_TSO | NETIF_F_GRO);
- netdev->hw_features = netdev->features;
+ nicvf_send_vf_struct(nic);
+
+ /* Check if this VF is in QS only mode */
+ if (nic->sqs_mode)
+ return 0;
+
+ err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues);
+ if (err)
+ goto err_unregister_interrupts;
+
+ netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_GRO |
+ NETIF_F_HW_VLAN_CTAG_RX);
+
+ netdev->hw_features |= NETIF_F_RXHASH;
+
+ netdev->features |= netdev->hw_features;
+ netdev->hw_features |= NETIF_F_LOOPBACK;
+
+ netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
netdev->netdev_ops = &nicvf_netdev_ops;
netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
@@ -1326,8 +1584,13 @@ static void nicvf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct nicvf *nic = netdev_priv(netdev);
+ struct net_device *pnetdev = nic->pnicvf->netdev;
- unregister_netdev(netdev);
+ /* Check if this Qset is assigned to different VF.
+ * If yes, clean primary and all secondary Qsets.
+ */
+ if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
+ unregister_netdev(pnetdev);
nicvf_unregister_interrupts(nic);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index ca4240aa6d15..e404ea837727 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -475,6 +475,27 @@ static void nicvf_reclaim_rbdr(struct nicvf *nic,
return;
}
+void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
+{
+ u64 rq_cfg;
+ int sqs;
+
+ rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
+
+ /* Enable first VLAN stripping */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ rq_cfg |= (1ULL << 25);
+ else
+ rq_cfg &= ~(1ULL << 25);
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
+
+ /* Configure Secondary Qsets, if any */
+ for (sqs = 0; sqs < nic->sqs_count; sqs++)
+ if (nic->snicvf[sqs])
+ nicvf_queue_reg_write(nic->snicvf[sqs],
+ NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
+}
+
/* Configures receive queue */
static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
int qidx, bool enable)
@@ -524,7 +545,9 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
nicvf_send_msg_to_pf(nic, &mbx);
- nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00);
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
+ if (!nic->sqs_mode)
+ nicvf_config_vlan_stripping(nic, nic->netdev->features);
/* Enable Receive queue */
rq_cfg.ena = 1;
@@ -598,6 +621,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
mbx.sq.qs_num = qs->vnic_id;
mbx.sq.sq_num = qidx;
+ mbx.sq.sqs_mode = nic->sqs_mode;
mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
nicvf_send_msg_to_pf(nic, &mbx);
@@ -679,6 +703,7 @@ void nicvf_qset_config(struct nicvf *nic, bool enable)
/* Send a mailbox msg to PF to config Qset */
mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
mbx.qs.num = qs->vnic_id;
+ mbx.qs.sqs_count = nic->sqs_count;
mbx.qs.cfg = 0;
qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
@@ -759,6 +784,10 @@ int nicvf_set_qset_resources(struct nicvf *nic)
qs->rbdr_len = RCV_BUF_COUNT;
qs->sq_len = SND_QUEUE_LEN;
qs->cq_len = CMP_QUEUE_LEN;
+
+ nic->rx_queues = qs->rq_cnt;
+ nic->tx_queues = qs->sq_cnt;
+
return 0;
}
@@ -961,9 +990,6 @@ nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
/* Offload checksum calculation to HW */
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (skb->protocol != htons(ETH_P_IP))
- return;
-
hdr->csum_l3 = 1; /* Enable IP csum calculation */
hdr->l3_offset = skb_network_offset(skb);
hdr->l4_offset = skb_transport_offset(skb);
@@ -1005,7 +1031,7 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
* them to SQ for transfer
*/
static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
- int qentry, struct sk_buff *skb)
+ int sq_num, int qentry, struct sk_buff *skb)
{
struct tso_t tso;
int seg_subdescs = 0, desc_cnt = 0;
@@ -1065,7 +1091,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
/* Inform HW to xmit all TSO segments */
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
- skb_get_queue_mapping(skb), desc_cnt);
+ sq_num, desc_cnt);
nic->drv_stats.tx_tso++;
return 1;
}
@@ -1076,10 +1102,24 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
int i, size;
int subdesc_cnt;
int sq_num, qentry;
- struct queue_set *qs = nic->qs;
+ struct queue_set *qs;
struct snd_queue *sq;
sq_num = skb_get_queue_mapping(skb);
+ if (sq_num >= MAX_SND_QUEUES_PER_QS) {
+ /* Get secondary Qset's SQ structure */
+ i = sq_num / MAX_SND_QUEUES_PER_QS;
+ if (!nic->snicvf[i - 1]) {
+ netdev_warn(nic->netdev,
+ "Secondary Qset#%d's ptr not initialized\n",
+ i - 1);
+ return 1;
+ }
+ nic = (struct nicvf *)nic->snicvf[i - 1];
+ sq_num = sq_num % MAX_SND_QUEUES_PER_QS;
+ }
+
+ qs = nic->qs;
sq = &qs->sq[sq_num];
subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
@@ -1090,7 +1130,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
/* Check if its a TSO packet */
if (skb_shinfo(skb)->gso_size)
- return nicvf_sq_append_tso(nic, sq, qentry, skb);
+ return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
/* Add SQ header subdesc */
nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
@@ -1126,6 +1166,8 @@ doorbell:
return 1;
append_fail:
+ /* Use original PCI dev for debug log */
+ nic = nic->pnicvf;
netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
return 0;
}
@@ -1371,10 +1413,11 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
int nicvf_check_cqe_rx_errs(struct nicvf *nic,
struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
{
- struct cmp_queue_stats *stats = &cq->stats;
+ struct nicvf_hw_stats *stats = &nic->hw_stats;
+ struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
- stats->rx.errop.good++;
+ drv_stats->rx_frames_ok++;
return 0;
}
@@ -1384,111 +1427,78 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic,
nic->netdev->name,
cqe_rx->err_level, cqe_rx->err_opcode);
- switch (cqe_rx->err_level) {
- case CQ_ERRLVL_MAC:
- stats->rx.errlvl.mac_errs++;
- break;
- case CQ_ERRLVL_L2:
- stats->rx.errlvl.l2_errs++;
- break;
- case CQ_ERRLVL_L3:
- stats->rx.errlvl.l3_errs++;
- break;
- case CQ_ERRLVL_L4:
- stats->rx.errlvl.l4_errs++;
- break;
- }
-
switch (cqe_rx->err_opcode) {
case CQ_RX_ERROP_RE_PARTIAL:
- stats->rx.errop.partial_pkts++;
+ stats->rx_bgx_truncated_pkts++;
break;
case CQ_RX_ERROP_RE_JABBER:
- stats->rx.errop.jabber_errs++;
+ stats->rx_jabber_errs++;
break;
case CQ_RX_ERROP_RE_FCS:
- stats->rx.errop.fcs_errs++;
- break;
- case CQ_RX_ERROP_RE_TERMINATE:
- stats->rx.errop.terminate_errs++;
+ stats->rx_fcs_errs++;
break;
case CQ_RX_ERROP_RE_RX_CTL:
- stats->rx.errop.bgx_rx_errs++;
+ stats->rx_bgx_errs++;
break;
case CQ_RX_ERROP_PREL2_ERR:
- stats->rx.errop.prel2_errs++;
- break;
- case CQ_RX_ERROP_L2_FRAGMENT:
- stats->rx.errop.l2_frags++;
- break;
- case CQ_RX_ERROP_L2_OVERRUN:
- stats->rx.errop.l2_overruns++;
- break;
- case CQ_RX_ERROP_L2_PFCS:
- stats->rx.errop.l2_pfcs++;
- break;
- case CQ_RX_ERROP_L2_PUNY:
- stats->rx.errop.l2_puny++;
+ stats->rx_prel2_errs++;
break;
case CQ_RX_ERROP_L2_MAL:
- stats->rx.errop.l2_hdr_malformed++;
+ stats->rx_l2_hdr_malformed++;
break;
case CQ_RX_ERROP_L2_OVERSIZE:
- stats->rx.errop.l2_oversize++;
+ stats->rx_oversize++;
break;
case CQ_RX_ERROP_L2_UNDERSIZE:
- stats->rx.errop.l2_undersize++;
+ stats->rx_undersize++;
break;
case CQ_RX_ERROP_L2_LENMISM:
- stats->rx.errop.l2_len_mismatch++;
+ stats->rx_l2_len_mismatch++;
break;
case CQ_RX_ERROP_L2_PCLP:
- stats->rx.errop.l2_pclp++;
+ stats->rx_l2_pclp++;
break;
case CQ_RX_ERROP_IP_NOT:
- stats->rx.errop.non_ip++;
+ stats->rx_ip_ver_errs++;
break;
case CQ_RX_ERROP_IP_CSUM_ERR:
- stats->rx.errop.ip_csum_err++;
+ stats->rx_ip_csum_errs++;
break;
case CQ_RX_ERROP_IP_MAL:
- stats->rx.errop.ip_hdr_malformed++;
+ stats->rx_ip_hdr_malformed++;
break;
case CQ_RX_ERROP_IP_MALD:
- stats->rx.errop.ip_payload_malformed++;
+ stats->rx_ip_payload_malformed++;
break;
case CQ_RX_ERROP_IP_HOP:
- stats->rx.errop.ip_hop_errs++;
- break;
- case CQ_RX_ERROP_L3_ICRC:
- stats->rx.errop.l3_icrc_errs++;
+ stats->rx_ip_ttl_errs++;
break;
case CQ_RX_ERROP_L3_PCLP:
- stats->rx.errop.l3_pclp++;
+ stats->rx_l3_pclp++;
break;
case CQ_RX_ERROP_L4_MAL:
- stats->rx.errop.l4_malformed++;
+ stats->rx_l4_malformed++;
break;
case CQ_RX_ERROP_L4_CHK:
- stats->rx.errop.l4_csum_errs++;
+ stats->rx_l4_csum_errs++;
break;
case CQ_RX_ERROP_UDP_LEN:
- stats->rx.errop.udp_len_err++;
+ stats->rx_udp_len_errs++;
break;
case CQ_RX_ERROP_L4_PORT:
- stats->rx.errop.bad_l4_port++;
+ stats->rx_l4_port_errs++;
break;
case CQ_RX_ERROP_TCP_FLAG:
- stats->rx.errop.bad_tcp_flag++;
+ stats->rx_tcp_flag_errs++;
break;
case CQ_RX_ERROP_TCP_OFFSET:
- stats->rx.errop.tcp_offset_errs++;
+ stats->rx_tcp_offset_errs++;
break;
case CQ_RX_ERROP_L4_PCLP:
- stats->rx.errop.l4_pclp++;
+ stats->rx_l4_pclp++;
break;
case CQ_RX_ERROP_RBDR_TRUNC:
- stats->rx.errop.pkt_truncated++;
+ stats->rx_truncated_pkts++;
break;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index f0937b7bfe9f..fb4957d09914 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -181,47 +181,6 @@ enum CQ_TX_ERROP_E {
};
struct cmp_queue_stats {
- struct rx_stats {
- struct {
- u64 mac_errs;
- u64 l2_errs;
- u64 l3_errs;
- u64 l4_errs;
- } errlvl;
- struct {
- u64 good;
- u64 partial_pkts;
- u64 jabber_errs;
- u64 fcs_errs;
- u64 terminate_errs;
- u64 bgx_rx_errs;
- u64 prel2_errs;
- u64 l2_frags;
- u64 l2_overruns;
- u64 l2_pfcs;
- u64 l2_puny;
- u64 l2_hdr_malformed;
- u64 l2_oversize;
- u64 l2_undersize;
- u64 l2_len_mismatch;
- u64 l2_pclp;
- u64 non_ip;
- u64 ip_csum_err;
- u64 ip_hdr_malformed;
- u64 ip_payload_malformed;
- u64 ip_hop_errs;
- u64 l3_icrc_errs;
- u64 l3_pclp;
- u64 l4_malformed;
- u64 l4_csum_errs;
- u64 udp_len_err;
- u64 bad_l4_port;
- u64 bad_tcp_flag;
- u64 tcp_offset_errs;
- u64 l4_pclp;
- u64 pkt_truncated;
- } errop;
- } rx;
struct tx_stats {
u64 good;
u64 desc_fault;
@@ -292,6 +251,7 @@ struct cmp_queue {
void *desc;
struct q_desc_mem dmem;
struct cmp_queue_stats stats;
+ int irq;
} ____cacheline_aligned_in_smp;
struct snd_queue {
@@ -347,6 +307,8 @@ struct queue_set {
#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
+void nicvf_config_vlan_stripping(struct nicvf *nic,
+ netdev_features_t features);
int nicvf_set_qset_resources(struct nicvf *nic);
int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
void nicvf_qset_config(struct nicvf *nic, bool enable);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index b961a89dc626..180aa9fabf48 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -6,6 +6,7 @@
* as published by the Free Software Foundation.
*/
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
@@ -26,7 +27,7 @@
struct lmac {
struct bgx *bgx;
int dmac;
- unsigned char mac[ETH_ALEN];
+ u8 mac[ETH_ALEN];
bool link_up;
int lmacid; /* ID within BGX */
int lmacid_bd; /* ID on board */
@@ -328,6 +329,37 @@ static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
}
}
+/* Configure BGX LMAC in internal loopback mode */
+void bgx_lmac_internal_loopback(int node, int bgx_idx,
+ int lmac_idx, bool enable)
+{
+ struct bgx *bgx;
+ struct lmac *lmac;
+ u64 cfg;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (!bgx)
+ return;
+
+ lmac = &bgx->lmac[lmac_idx];
+ if (lmac->is_sgmii) {
+ cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
+ if (enable)
+ cfg |= PCS_MRX_CTL_LOOPBACK1;
+ else
+ cfg &= ~PCS_MRX_CTL_LOOPBACK1;
+ bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
+ } else {
+ cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
+ if (enable)
+ cfg |= SPU_CTL_LOOPBACK;
+ else
+ cfg &= ~SPU_CTL_LOOPBACK;
+ bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
+ }
+}
+EXPORT_SYMBOL(bgx_lmac_internal_loopback);
+
static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
{
u64 cfg;
@@ -835,18 +867,108 @@ static void bgx_get_qlm_mode(struct bgx *bgx)
}
}
-static void bgx_init_of(struct bgx *bgx, struct device_node *np)
+#ifdef CONFIG_ACPI
+
+static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst)
+{
+ u8 mac[ETH_ALEN];
+ int ret;
+
+ ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
+ "mac-address", mac, ETH_ALEN);
+ if (ret)
+ goto out;
+
+ if (!is_valid_ether_addr(mac)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(dst, mac, ETH_ALEN);
+out:
+ return ret;
+}
+
+/* Currently only sets the MAC address. */
+static acpi_status bgx_acpi_register_phy(acpi_handle handle,
+ u32 lvl, void *context, void **rv)
+{
+ struct bgx *bgx = context;
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ goto out;
+
+ acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac);
+
+ SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev);
+
+ bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
+out:
+ bgx->lmac_count++;
+ return AE_OK;
+}
+
+static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
+ void *context, void **ret_val)
{
+ struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct bgx *bgx = context;
+ char bgx_sel[5];
+
+ snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
+ if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
+ pr_warn("Invalid link device\n");
+ return AE_OK;
+ }
+
+ if (strncmp(string.pointer, bgx_sel, 4))
+ return AE_OK;
+
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ bgx_acpi_register_phy, NULL, bgx, NULL);
+
+ kfree(string.pointer);
+ return AE_CTRL_TERMINATE;
+}
+
+static int bgx_init_acpi_phy(struct bgx *bgx)
+{
+ acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL);
+ return 0;
+}
+
+#else
+
+static int bgx_init_acpi_phy(struct bgx *bgx)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_ACPI */
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+
+static int bgx_init_of_phy(struct bgx *bgx)
+{
+ struct device_node *np;
struct device_node *np_child;
u8 lmac = 0;
+ char bgx_sel[5];
+ const char *mac;
- for_each_child_of_node(np, np_child) {
- struct device_node *phy_np;
- const char *mac;
+ /* Get BGX node from DT */
+ snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
+ np = of_find_node_by_name(NULL, bgx_sel);
+ if (!np)
+ return -ENODEV;
- phy_np = of_parse_phandle(np_child, "phy-handle", 0);
- if (phy_np)
- bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
+ for_each_child_of_node(np, np_child) {
+ struct device_node *phy_np = of_parse_phandle(np_child,
+ "phy-handle", 0);
+ if (!phy_np)
+ continue;
+ bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
mac = of_get_mac_address(np_child);
if (mac)
@@ -855,9 +977,29 @@ static void bgx_init_of(struct bgx *bgx, struct device_node *np)
SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
bgx->lmac[lmac].lmacid = lmac;
lmac++;
- if (lmac == MAX_LMAC_PER_BGX)
+ if (lmac == MAX_LMAC_PER_BGX) {
+ of_node_put(np_child);
break;
+ }
}
+ return 0;
+}
+
+#else
+
+static int bgx_init_of_phy(struct bgx *bgx)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_OF_MDIO */
+
+static int bgx_init_phy(struct bgx *bgx)
+{
+ if (!acpi_disabled)
+ return bgx_init_acpi_phy(bgx);
+
+ return bgx_init_of_phy(bgx);
}
static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -865,8 +1007,6 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
struct device *dev = &pdev->dev;
struct bgx *bgx = NULL;
- struct device_node *np;
- char bgx_sel[5];
u8 lmac;
bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
@@ -902,10 +1042,9 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bgx_vnic[bgx->bgx_id] = bgx;
bgx_get_qlm_mode(bgx);
- snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
- np = of_find_node_by_name(NULL, bgx_sel);
- if (np)
- bgx_init_of(bgx, np);
+ err = bgx_init_phy(bgx);
+ if (err)
+ goto err_enable;
bgx_init_hw(bgx);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index ba4f53b7cc2c..07b7ec66c60d 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -72,6 +72,7 @@
#define BGX_SPUX_CONTROL1 0x10000
#define SPU_CTL_LOW_POWER BIT_ULL(11)
+#define SPU_CTL_LOOPBACK BIT_ULL(14)
#define SPU_CTL_RESET BIT_ULL(15)
#define BGX_SPUX_STATUS1 0x10008
#define SPU_STATUS1_RCV_LNK BIT_ULL(2)
@@ -126,6 +127,7 @@
#define PCS_MRX_CTL_RST_AN BIT_ULL(9)
#define PCS_MRX_CTL_PWR_DN BIT_ULL(11)
#define PCS_MRX_CTL_AN_EN BIT_ULL(12)
+#define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14)
#define PCS_MRX_CTL_RESET BIT_ULL(15)
#define BGX_GMP_PCS_MRX_STATUS 0x30008
#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
@@ -186,6 +188,8 @@ int bgx_get_lmac_count(int node, int bgx);
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
+void bgx_lmac_internal_loopback(int node, int bgx_idx,
+ int lmac_idx, bool enable);
u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
#define BGX_RX_STATS_COUNT 11
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 629f75d70353..414fe7c487d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -47,6 +47,7 @@
#include <linux/timer.h>
#include <linux/vmalloc.h>
#include <linux/etherdevice.h>
+#include <linux/net_tstamp.h>
#include <asm/io.h>
#include "cxgb4_uld.h"
@@ -478,6 +479,8 @@ struct port_info {
#ifdef CONFIG_CHELSIO_T4_FCOE
struct cxgb_fcoe fcoe;
#endif /* CONFIG_CHELSIO_T4_FCOE */
+ bool rxtstamp; /* Enable TS */
+ struct hwtstamp_config tstamp_config;
};
struct dentry;
@@ -517,6 +520,7 @@ struct sge_fl { /* SGE free-buffer queue state */
/* A packet gather list */
struct pkt_gl {
+ u64 sgetstamp; /* SGE Time Stamp for Ingress Packet */
struct page_frag frags[MAX_SKB_FRAGS];
void *va; /* virtual address of first byte */
unsigned int nfrags; /* # of fragments */
@@ -767,6 +771,11 @@ struct adapter {
bool tid_release_task_busy;
struct dentry *debugfs_root;
+ bool use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */
+ bool trace_rss; /* 1 implies that different RSS flit per filter is
+ * used per filter else if 0 default RSS flit is
+ * used for all 4 filters.
+ */
spinlock_t stats_lock;
spinlock_t win0_lock ____cacheline_aligned_in_smp;
@@ -1284,6 +1293,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
const u8 *fw_data, unsigned int size, int force);
unsigned int t4_flash_cfg_addr(struct adapter *adapter);
+int t4_check_fw_version(struct adapter *adap);
int t4_get_fw_version(struct adapter *adapter, u32 *vers);
int t4_get_tp_version(struct adapter *adapter, u32 *vers);
int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
@@ -1440,6 +1450,10 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
void t4_db_full(struct adapter *adapter);
void t4_db_dropped(struct adapter *adapter);
+int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
+ int filter_index, int enable);
+void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
+ int filter_index, int *enabled);
int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val);
void t4_sge_decode_idma_state(struct adapter *adapter, int state);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 6074680bc985..052c660aca80 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -31,6 +31,15 @@ static const char * const dcb_ver_array[] = {
"Auto Negotiated"
};
+static inline bool cxgb4_dcb_state_synced(enum cxgb4_dcb_state state)
+{
+ if (state == CXGB4_DCB_STATE_FW_ALLSYNCED ||
+ state == CXGB4_DCB_STATE_HOST)
+ return true;
+ else
+ return false;
+}
+
/* Initialize a port's Data Center Bridging state. Typically used after a
* Link Down event.
*/
@@ -603,7 +612,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
struct port_info *pi = netdev2pinfo(dev);
struct port_dcb_info *dcb = &pi->dcb;
- if (dcb->state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+ if (!cxgb4_dcb_state_synced(dcb->state) ||
priority >= CXGB4_MAX_PRIORITY)
*pfccfg = 0;
else
@@ -620,7 +629,7 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
struct adapter *adap = pi->adapter;
int err;
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+ if (!cxgb4_dcb_state_synced(pi->dcb.state) ||
priority >= CXGB4_MAX_PRIORITY)
return;
@@ -732,7 +741,7 @@ static u8 cxgb4_getpfcstate(struct net_device *dev)
{
struct port_info *pi = netdev2pinfo(dev);
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return false;
return pi->dcb.pfcen != 0;
@@ -756,7 +765,7 @@ static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id,
struct adapter *adap = pi->adapter;
int i;
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return 0;
for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
@@ -794,7 +803,9 @@ static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id,
*/
static int cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id)
{
- return __cxgb4_getapp(dev, app_idtype, app_id, 0);
+ /* Convert app_idtype to firmware format before querying */
+ return __cxgb4_getapp(dev, app_idtype == DCB_APP_IDTYPE_ETHTYPE ?
+ app_idtype : 3, app_id, 0);
}
/* Write a new Application User Priority Map for the specified Application ID
@@ -808,7 +819,7 @@ static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
int i, err;
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return -EINVAL;
/* DCB info gets thrown away on link up */
@@ -896,10 +907,11 @@ cxgb4_ieee_negotiation_complete(struct net_device *dev,
struct port_info *pi = netdev2pinfo(dev);
struct port_dcb_info *dcb = &pi->dcb;
- if (dcb_subtype && !(dcb->msgs & dcb_subtype))
- return 0;
+ if (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (dcb_subtype && !(dcb->msgs & dcb_subtype))
+ return 0;
- return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED &&
+ return (cxgb4_dcb_state_synced(dcb->state) &&
(dcb->supported & DCB_CAP_DCBX_VER_IEEE));
}
@@ -1057,7 +1069,7 @@ static u8 cxgb4_setdcbx(struct net_device *dev, u8 dcb_request)
/* Can't enable DCB if we haven't successfully negotiated it.
*/
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return 1;
/* There's currently no mechanism to allow for the firmware DCBX
@@ -1080,7 +1092,7 @@ static int cxgb4_getpeer_app(struct net_device *dev,
struct adapter *adap = pi->adapter;
int i, err = 0;
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return 1;
info->willing = 0;
@@ -1114,7 +1126,7 @@ static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table)
struct adapter *adap = pi->adapter;
int i, err = 0;
- if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+ if (!cxgb4_dcb_state_synced(pi->dcb.state))
return 1;
for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
@@ -1133,7 +1145,7 @@ static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table)
if (!pcmd.u.dcb.app_priority.protocolid)
break;
- table[i].selector = pcmd.u.dcb.app_priority.sel_field;
+ table[i].selector = (pcmd.u.dcb.app_priority.sel_field + 1);
table[i].protocol =
be16_to_cpu(pcmd.u.dcb.app_priority.protocolid);
table[i].priority =
@@ -1181,6 +1193,8 @@ static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg)
for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
pg->pg_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
+ pg->tcs_supported = pcmd.u.dcb.pgrate.num_tcs_supported;
+
return 0;
}
@@ -1198,6 +1212,8 @@ static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc)
*/
pfc->pfc_en = bitswap_1(pi->dcb.pfcen);
+ pfc->tcs_supported = pi->dcb.pfc_num_tcs_supported;
+
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index a11485fbb33f..4269944c5db5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -151,6 +151,45 @@ static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx)
return 0;
}
+static int cim_la_show_t6(struct seq_file *seq, void *v, int idx)
+{
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Status Inst Data PC LS0Stat "
+ "LS0Addr LS0Data LS1Stat LS1Addr LS1Data\n");
+ } else {
+ const u32 *p = v;
+
+ seq_printf(seq, " %02x %04x%04x %04x%04x %04x%04x %08x %08x %08x %08x %08x %08x\n",
+ (p[9] >> 16) & 0xff, /* Status */
+ p[9] & 0xffff, p[8] >> 16, /* Inst */
+ p[8] & 0xffff, p[7] >> 16, /* Data */
+ p[7] & 0xffff, p[6] >> 16, /* PC */
+ p[2], p[1], p[0], /* LS0 Stat, Addr and Data */
+ p[5], p[4], p[3]); /* LS1 Stat, Addr and Data */
+ }
+ return 0;
+}
+
+static int cim_la_show_pc_t6(struct seq_file *seq, void *v, int idx)
+{
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Status Inst Data PC\n");
+ } else {
+ const u32 *p = v;
+
+ seq_printf(seq, " %02x %08x %08x %08x\n",
+ p[3] & 0xff, p[2], p[1], p[0]);
+ seq_printf(seq, " %02x %02x%06x %02x%06x %02x%06x\n",
+ (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
+ p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
+ seq_printf(seq, " %02x %04x%04x %04x%04x %04x%04x\n",
+ (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
+ p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
+ p[6] >> 16);
+ }
+ return 0;
+}
+
static int cim_la_open(struct inode *inode, struct file *file)
{
int ret;
@@ -162,9 +201,18 @@ static int cim_la_open(struct inode *inode, struct file *file)
if (ret)
return ret;
- p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1,
- cfg & UPDBGLACAPTPCONLY_F ?
- cim_la_show_3in1 : cim_la_show);
+ if (is_t6(adap->params.chip)) {
+ /* +1 to account for integer division of CIMLA_SIZE/10 */
+ p = seq_open_tab(file, (adap->params.cim_la_size / 10) + 1,
+ 10 * sizeof(u32), 1,
+ cfg & UPDBGLACAPTPCONLY_F ?
+ cim_la_show_pc_t6 : cim_la_show_t6);
+ } else {
+ p = seq_open_tab(file, adap->params.cim_la_size / 8,
+ 8 * sizeof(u32), 1,
+ cfg & UPDBGLACAPTPCONLY_F ? cim_la_show_3in1 :
+ cim_la_show);
+ }
if (!p)
return -ENOMEM;
@@ -298,11 +346,11 @@ static int cim_qcfg_show(struct seq_file *seq, void *v)
if (is_t4(adap->params.chip)) {
i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A,
ARRAY_SIZE(obq_wr_t4), obq_wr_t4);
- wr = obq_wr_t4;
+ wr = obq_wr_t4;
} else {
i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A,
ARRAY_SIZE(obq_wr_t5), obq_wr_t5);
- wr = obq_wr_t5;
+ wr = obq_wr_t5;
}
}
if (i)
@@ -892,6 +940,7 @@ static const char * const devlog_level_strings[] = {
static const char * const devlog_facility_strings[] = {
[FW_DEVLOG_FACILITY_CORE] = "CORE",
+ [FW_DEVLOG_FACILITY_CF] = "CF",
[FW_DEVLOG_FACILITY_SCHED] = "SCHED",
[FW_DEVLOG_FACILITY_TIMER] = "TIMER",
[FW_DEVLOG_FACILITY_RES] = "RES",
@@ -1080,18 +1129,26 @@ static const struct file_operations devlog_fops = {
static int mbox_show(struct seq_file *seq, void *v)
{
static const char * const owner[] = { "none", "FW", "driver",
- "unknown" };
+ "unknown", "<unread>" };
int i;
unsigned int mbox = (uintptr_t)seq->private & 7;
struct adapter *adap = seq->private - mbox;
void __iomem *addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
- unsigned int ctrl_reg = (is_t4(adap->params.chip)
- ? CIM_PF_MAILBOX_CTRL_A
- : CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A);
- void __iomem *ctrl = adap->regs + PF_REG(mbox, ctrl_reg);
- i = MBOWNER_G(readl(ctrl));
+ /* For T4 we don't have a shadow copy of the Mailbox Control register.
+ * And since reading that real register causes a side effect of
+ * granting ownership, we're best of simply not reading it at all.
+ */
+ if (is_t4(adap->params.chip)) {
+ i = 4; /* index of "<unread>" */
+ } else {
+ unsigned int ctrl_reg = CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A;
+ void __iomem *ctrl = adap->regs + PF_REG(mbox, ctrl_reg);
+
+ i = MBOWNER_G(readl(ctrl));
+ }
+
seq_printf(seq, "mailbox owned by %s\n\n", owner[i]);
for (i = 0; i < MBOX_LEN; i += 8)
@@ -1153,6 +1210,299 @@ static const struct file_operations mbox_debugfs_fops = {
.write = mbox_write
};
+static int mps_trc_show(struct seq_file *seq, void *v)
+{
+ int enabled, i;
+ struct trace_params tp;
+ unsigned int trcidx = (uintptr_t)seq->private & 3;
+ struct adapter *adap = seq->private - trcidx;
+
+ t4_get_trace_filter(adap, &tp, trcidx, &enabled);
+ if (!enabled) {
+ seq_puts(seq, "tracer is disabled\n");
+ return 0;
+ }
+
+ if (tp.skip_ofst * 8 >= TRACE_LEN) {
+ dev_err(adap->pdev_dev, "illegal trace pattern skip offset\n");
+ return -EINVAL;
+ }
+ if (tp.port < 8) {
+ i = adap->chan_map[tp.port & 3];
+ if (i >= MAX_NPORTS) {
+ dev_err(adap->pdev_dev, "tracer %u is assigned "
+ "to non-existing port\n", trcidx);
+ return -EINVAL;
+ }
+ seq_printf(seq, "tracer is capturing %s %s, ",
+ adap->port[i]->name, tp.port < 4 ? "Rx" : "Tx");
+ } else
+ seq_printf(seq, "tracer is capturing loopback %d, ",
+ tp.port - 8);
+ seq_printf(seq, "snap length: %u, min length: %u\n", tp.snap_len,
+ tp.min_len);
+ seq_printf(seq, "packets captured %smatch filter\n",
+ tp.invert ? "do not " : "");
+
+ if (tp.skip_ofst) {
+ seq_puts(seq, "filter pattern: ");
+ for (i = 0; i < tp.skip_ofst * 2; i += 2)
+ seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]);
+ seq_putc(seq, '/');
+ for (i = 0; i < tp.skip_ofst * 2; i += 2)
+ seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]);
+ seq_puts(seq, "@0\n");
+ }
+
+ seq_puts(seq, "filter pattern: ");
+ for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2)
+ seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]);
+ seq_putc(seq, '/');
+ for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2)
+ seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]);
+ seq_printf(seq, "@%u\n", (tp.skip_ofst + tp.skip_len) * 8);
+ return 0;
+}
+
+static int mps_trc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mps_trc_show, inode->i_private);
+}
+
+static unsigned int xdigit2int(unsigned char c)
+{
+ return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10;
+}
+
+#define TRC_PORT_NONE 0xff
+#define TRC_RSS_ENABLE 0x33
+#define TRC_RSS_DISABLE 0x13
+
+/* Set an MPS trace filter. Syntax is:
+ *
+ * disable
+ *
+ * to disable tracing, or
+ *
+ * interface qid=<qid no> [snaplen=<val>] [minlen=<val>] [not] [<pattern>]...
+ *
+ * where interface is one of rxN, txN, or loopbackN, N = 0..3, qid can be one
+ * of the NIC's response qid obtained from sge_qinfo and pattern has the form
+ *
+ * <pattern data>[/<pattern mask>][@<anchor>]
+ *
+ * Up to 2 filter patterns can be specified. If 2 are supplied the first one
+ * must be anchored at 0. An omited mask is taken as a mask of 1s, an omitted
+ * anchor is taken as 0.
+ */
+static ssize_t mps_trc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ int i, enable, ret;
+ u32 *data, *mask;
+ struct trace_params tp;
+ const struct inode *ino;
+ unsigned int trcidx;
+ char *s, *p, *word, *end;
+ struct adapter *adap;
+ u32 j;
+
+ ino = file_inode(file);
+ trcidx = (uintptr_t)ino->i_private & 3;
+ adap = ino->i_private - trcidx;
+
+ /* Don't accept input more than 1K, can't be anything valid except lots
+ * of whitespace. Well, use less.
+ */
+ if (count > 1024)
+ return -EFBIG;
+ p = s = kzalloc(count + 1, GFP_USER);
+ if (!s)
+ return -ENOMEM;
+ if (copy_from_user(s, buf, count)) {
+ count = -EFAULT;
+ goto out;
+ }
+
+ if (s[count - 1] == '\n')
+ s[count - 1] = '\0';
+
+ enable = strcmp("disable", s) != 0;
+ if (!enable)
+ goto apply;
+
+ /* enable or disable trace multi rss filter */
+ if (adap->trace_rss)
+ t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_ENABLE);
+ else
+ t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_DISABLE);
+
+ memset(&tp, 0, sizeof(tp));
+ tp.port = TRC_PORT_NONE;
+ i = 0; /* counts pattern nibbles */
+
+ while (p) {
+ while (isspace(*p))
+ p++;
+ word = strsep(&p, " ");
+ if (!*word)
+ break;
+
+ if (!strncmp(word, "qid=", 4)) {
+ end = (char *)word + 4;
+ ret = kstrtouint(end, 10, &j);
+ if (ret)
+ goto out;
+ if (!adap->trace_rss) {
+ t4_write_reg(adap, MPS_T5_TRC_RSS_CONTROL_A, j);
+ continue;
+ }
+
+ switch (trcidx) {
+ case 0:
+ t4_write_reg(adap, MPS_TRC_RSS_CONTROL_A, j);
+ break;
+ case 1:
+ t4_write_reg(adap,
+ MPS_TRC_FILTER1_RSS_CONTROL_A, j);
+ break;
+ case 2:
+ t4_write_reg(adap,
+ MPS_TRC_FILTER2_RSS_CONTROL_A, j);
+ break;
+ case 3:
+ t4_write_reg(adap,
+ MPS_TRC_FILTER3_RSS_CONTROL_A, j);
+ break;
+ }
+ continue;
+ }
+ if (!strncmp(word, "snaplen=", 8)) {
+ end = (char *)word + 8;
+ ret = kstrtouint(end, 10, &j);
+ if (ret || j > 9600) {
+inval: count = -EINVAL;
+ goto out;
+ }
+ tp.snap_len = j;
+ continue;
+ }
+ if (!strncmp(word, "minlen=", 7)) {
+ end = (char *)word + 7;
+ ret = kstrtouint(end, 10, &j);
+ if (ret || j > TFMINPKTSIZE_M)
+ goto inval;
+ tp.min_len = j;
+ continue;
+ }
+ if (!strcmp(word, "not")) {
+ tp.invert = !tp.invert;
+ continue;
+ }
+ if (!strncmp(word, "loopback", 8) && tp.port == TRC_PORT_NONE) {
+ if (word[8] < '0' || word[8] > '3' || word[9])
+ goto inval;
+ tp.port = word[8] - '0' + 8;
+ continue;
+ }
+ if (!strncmp(word, "tx", 2) && tp.port == TRC_PORT_NONE) {
+ if (word[2] < '0' || word[2] > '3' || word[3])
+ goto inval;
+ tp.port = word[2] - '0' + 4;
+ if (adap->chan_map[tp.port & 3] >= MAX_NPORTS)
+ goto inval;
+ continue;
+ }
+ if (!strncmp(word, "rx", 2) && tp.port == TRC_PORT_NONE) {
+ if (word[2] < '0' || word[2] > '3' || word[3])
+ goto inval;
+ tp.port = word[2] - '0';
+ if (adap->chan_map[tp.port] >= MAX_NPORTS)
+ goto inval;
+ continue;
+ }
+ if (!isxdigit(*word))
+ goto inval;
+
+ /* we have found a trace pattern */
+ if (i) { /* split pattern */
+ if (tp.skip_len) /* too many splits */
+ goto inval;
+ tp.skip_ofst = i / 16;
+ }
+
+ data = &tp.data[i / 8];
+ mask = &tp.mask[i / 8];
+ j = i;
+
+ while (isxdigit(*word)) {
+ if (i >= TRACE_LEN * 2) {
+ count = -EFBIG;
+ goto out;
+ }
+ *data = (*data << 4) + xdigit2int(*word++);
+ if (++i % 8 == 0)
+ data++;
+ }
+ if (*word == '/') {
+ word++;
+ while (isxdigit(*word)) {
+ if (j >= i) /* mask longer than data */
+ goto inval;
+ *mask = (*mask << 4) + xdigit2int(*word++);
+ if (++j % 8 == 0)
+ mask++;
+ }
+ if (i != j) /* mask shorter than data */
+ goto inval;
+ } else { /* no mask, use all 1s */
+ for ( ; i - j >= 8; j += 8)
+ *mask++ = 0xffffffff;
+ if (i % 8)
+ *mask = (1 << (i % 8) * 4) - 1;
+ }
+ if (*word == '@') {
+ end = (char *)word + 1;
+ ret = kstrtouint(end, 10, &j);
+ if (*end && *end != '\n')
+ goto inval;
+ if (j & 7) /* doesn't start at multiple of 8 */
+ goto inval;
+ j /= 8;
+ if (j < tp.skip_ofst) /* overlaps earlier pattern */
+ goto inval;
+ if (j - tp.skip_ofst > 31) /* skip too big */
+ goto inval;
+ tp.skip_len = j - tp.skip_ofst;
+ }
+ if (i % 8) {
+ *data <<= (8 - i % 8) * 4;
+ *mask <<= (8 - i % 8) * 4;
+ i = (i + 15) & ~15; /* 8-byte align */
+ }
+ }
+
+ if (tp.port == TRC_PORT_NONE)
+ goto inval;
+
+apply:
+ i = t4_set_trace_filter(adap, &tp, trcidx, enable);
+ if (i)
+ count = i;
+out:
+ kfree(s);
+ return count;
+}
+
+static const struct file_operations mps_trc_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = mps_trc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = mps_trc_write
+};
+
static ssize_t flash_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
@@ -1895,13 +2245,13 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
{
struct adapter *adap = seq->private;
int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
- int toe_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
+ int iscsi_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
int i, r = (uintptr_t)v - 1;
- int toe_idx = r - eth_entries;
- int rdma_idx = toe_idx - toe_entries;
+ int iscsi_idx = r - eth_entries;
+ int rdma_idx = iscsi_idx - iscsi_entries;
int ciq_idx = rdma_idx - rdma_entries;
int ctrl_idx = ciq_idx - ciq_entries;
int fq_idx = ctrl_idx - ctrl_entries;
@@ -1917,8 +2267,12 @@ do { \
seq_putc(seq, '\n'); \
} while (0)
#define S(s, v) S3("s", s, v)
+#define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v)
#define T(s, v) S3("u", s, tx[i].v)
+#define TL(s, v) T3("lu", s, v)
+#define R3(fmt_spec, s, v) S3(fmt_spec, s, rx[i].v)
#define R(s, v) S3("u", s, rx[i].v)
+#define RL(s, v) R3("lu", s, v)
if (r < eth_entries) {
int base_qset = r * 4;
@@ -1957,12 +2311,30 @@ do { \
R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx);
- } else if (toe_idx < toe_entries) {
- const struct sge_ofld_rxq *rx = &adap->sge.ofldrxq[toe_idx * 4];
- const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[toe_idx * 4];
- int n = min(4, adap->sge.ofldqsets - 4 * toe_idx);
+ RL("RxPackets:", stats.pkts);
+ RL("RxCSO:", stats.rx_cso);
+ RL("VLANxtract:", stats.vlan_ex);
+ RL("LROmerged:", stats.lro_merged);
+ RL("LROpackets:", stats.lro_pkts);
+ RL("RxDrops:", stats.rx_drops);
+ TL("TSO:", tso);
+ TL("TxCSO:", tx_cso);
+ TL("VLANins:", vlan_ins);
+ TL("TxQFull:", q.stops);
+ TL("TxQRestarts:", q.restarts);
+ TL("TxMapErr:", mapping_err);
+ RL("FLAllocErr:", fl.alloc_failed);
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLStarving:", fl.starving);
+
+ } else if (iscsi_idx < iscsi_entries) {
+ const struct sge_ofld_rxq *rx =
+ &adap->sge.ofldrxq[iscsi_idx * 4];
+ const struct sge_ofld_txq *tx =
+ &adap->sge.ofldtxq[iscsi_idx * 4];
+ int n = min(4, adap->sge.ofldqsets - 4 * iscsi_idx);
- S("QType:", "TOE");
+ S("QType:", "iSCSI");
T("TxQ ID:", q.cntxt_id);
T("TxQ size:", q.size);
T("TxQ inuse:", q.in_use);
@@ -1982,6 +2354,13 @@ do { \
R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx);
+ RL("RxPackets:", stats.pkts);
+ RL("RxImmPkts:", stats.imm);
+ RL("RxNoMem:", stats.nomem);
+ RL("FLAllocErr:", fl.alloc_failed);
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLStarving:", fl.starving);
+
} else if (rdma_idx < rdma_entries) {
const struct sge_ofld_rxq *rx =
&adap->sge.rdmarxq[rdma_idx * 4];
@@ -2004,6 +2383,13 @@ do { \
R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx);
+ RL("RxPackets:", stats.pkts);
+ RL("RxImmPkts:", stats.imm);
+ RL("RxNoMem:", stats.nomem);
+ RL("FLAllocErr:", fl.alloc_failed);
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLStarving:", fl.starving);
+
} else if (ciq_idx < ciq_entries) {
const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
@@ -2019,6 +2405,9 @@ do { \
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
S3("u", "Intr pktcnt:",
adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+ RL("RxAN:", stats.an);
+ RL("RxNoMem:", stats.nomem);
+
} else if (ctrl_idx < ctrl_entries) {
const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
int n = min(4, adap->params.nports - 4 * ctrl_idx);
@@ -2029,6 +2418,8 @@ do { \
T("TxQ inuse:", q.in_use);
T("TxQ CIDX:", q.cidx);
T("TxQ PIDX:", q.pidx);
+ TL("TxQFull:", q.stops);
+ TL("TxQRestarts:", q.restarts);
} else if (fq_idx == 0) {
const struct sge_rspq *evtq = &adap->sge.fw_evtq;
@@ -2044,10 +2435,14 @@ do { \
adap->sge.counter_val[evtq->pktcnt_idx]);
}
#undef R
+#undef RL
#undef T
+#undef TL
#undef S
+#undef R3
+#undef T3
#undef S3
-return 0;
+ return 0;
}
static int sge_queue_entries(const struct adapter *adap)
@@ -2164,6 +2559,73 @@ static const struct file_operations mem_debugfs_fops = {
.llseek = default_llseek,
};
+static int tid_info_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adap = seq->private;
+ const struct tid_info *t = &adap->tids;
+ enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
+ unsigned int sb;
+
+ if (chip <= CHELSIO_T5)
+ sb = t4_read_reg(adap, LE_DB_SERVER_INDEX_A) / 4;
+ else
+ sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
+
+ if (sb) {
+ seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1,
+ adap->tids.hash_base,
+ t->ntids - 1);
+ seq_printf(seq, ", in use: %u/%u\n",
+ atomic_read(&t->tids_in_use),
+ atomic_read(&t->hash_tids_in_use));
+ } else if (adap->flags & FW_OFLD_CONN) {
+ seq_printf(seq, "TID range: %u..%u/%u..%u",
+ t->aftid_base,
+ t->aftid_end,
+ adap->tids.hash_base,
+ t->ntids - 1);
+ seq_printf(seq, ", in use: %u/%u\n",
+ atomic_read(&t->tids_in_use),
+ atomic_read(&t->hash_tids_in_use));
+ } else {
+ seq_printf(seq, "TID range: %u..%u",
+ adap->tids.hash_base,
+ t->ntids - 1);
+ seq_printf(seq, ", in use: %u\n",
+ atomic_read(&t->hash_tids_in_use));
+ }
+ } else if (t->ntids) {
+ seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
+ seq_printf(seq, ", in use: %u\n",
+ atomic_read(&t->tids_in_use));
+ }
+
+ if (t->nstids)
+ seq_printf(seq, "STID range: %u..%u, in use: %u\n",
+ (!t->stid_base &&
+ (chip <= CHELSIO_T5)) ?
+ t->stid_base + 1 : t->stid_base,
+ t->stid_base + t->nstids - 1, t->stids_in_use);
+ if (t->natids)
+ seq_printf(seq, "ATID range: 0..%u, in use: %u\n",
+ t->natids - 1, t->atids_in_use);
+ seq_printf(seq, "FTID range: %u..%u\n", t->ftid_base,
+ t->ftid_base + t->nftids - 1);
+ if (t->nsftids)
+ seq_printf(seq, "SFTID range: %u..%u in use: %u\n",
+ t->sftid_base, t->sftid_base + t->nsftids - 2,
+ t->sftids_in_use);
+ if (t->ntids)
+ seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
+ t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
+ t4_read_reg(adap, LE_DB_ACT_CNT_IPV6_A));
+ return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(tid_info);
+
static void add_debugfs_mem(struct adapter *adap, const char *name,
unsigned int idx, unsigned int size_mb)
{
@@ -2227,6 +2689,290 @@ static const struct file_operations blocked_fl_fops = {
.llseek = generic_file_llseek,
};
+struct mem_desc {
+ unsigned int base;
+ unsigned int limit;
+ unsigned int idx;
+};
+
+static int mem_desc_cmp(const void *a, const void *b)
+{
+ return ((const struct mem_desc *)a)->base -
+ ((const struct mem_desc *)b)->base;
+}
+
+static void mem_region_show(struct seq_file *seq, const char *name,
+ unsigned int from, unsigned int to)
+{
+ char buf[40];
+
+ string_get_size((u64)to - from + 1, 1, STRING_UNITS_2, buf,
+ sizeof(buf));
+ seq_printf(seq, "%-15s %#x-%#x [%s]\n", name, from, to, buf);
+}
+
+static int meminfo_show(struct seq_file *seq, void *v)
+{
+ static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
+ "MC0:", "MC1:"};
+ static const char * const region[] = {
+ "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
+ "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
+ "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
+ "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
+ "RQUDP region:", "PBL region:", "TXPBL region:",
+ "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
+ "On-chip queues:"
+ };
+
+ int i, n;
+ u32 lo, hi, used, alloc;
+ struct mem_desc avail[4];
+ struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */
+ struct mem_desc *md = mem;
+ struct adapter *adap = seq->private;
+
+ for (i = 0; i < ARRAY_SIZE(mem); i++) {
+ mem[i].limit = 0;
+ mem[i].idx = i;
+ }
+
+ /* Find and sort the populated memory ranges */
+ i = 0;
+ lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (lo & EDRAM0_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EDRAM0_BAR_A);
+ avail[i].base = EDRAM0_BASE_G(hi) << 20;
+ avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20);
+ avail[i].idx = 0;
+ i++;
+ }
+ if (lo & EDRAM1_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EDRAM1_BAR_A);
+ avail[i].base = EDRAM1_BASE_G(hi) << 20;
+ avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20);
+ avail[i].idx = 1;
+ i++;
+ }
+
+ if (is_t5(adap->params.chip)) {
+ if (lo & EXT_MEM0_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+ avail[i].base = EXT_MEM0_BASE_G(hi) << 20;
+ avail[i].limit =
+ avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20);
+ avail[i].idx = 3;
+ i++;
+ }
+ if (lo & EXT_MEM1_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+ avail[i].base = EXT_MEM1_BASE_G(hi) << 20;
+ avail[i].limit =
+ avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20);
+ avail[i].idx = 4;
+ i++;
+ }
+ } else {
+ if (lo & EXT_MEM_ENABLE_F) {
+ hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
+ avail[i].base = EXT_MEM_BASE_G(hi) << 20;
+ avail[i].limit =
+ avail[i].base + (EXT_MEM_SIZE_G(hi) << 20);
+ avail[i].idx = 2;
+ i++;
+ }
+ }
+ if (!i) /* no memory available */
+ return 0;
+ sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+
+ (md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A);
+ (md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A);
+ (md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A);
+ (md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A);
+
+ /* the next few have explicit upper bounds */
+ md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A);
+ md->limit = md->base - 1 +
+ t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) *
+ PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A));
+ md++;
+
+ md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A);
+ md->limit = md->base - 1 +
+ t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) *
+ PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A));
+ md++;
+
+ if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
+ hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4;
+ md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
+ } else {
+ hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
+ md->base = t4_read_reg(adap,
+ LE_DB_HASH_TBL_BASE_ADDR_A);
+ }
+ md->limit = 0;
+ } else {
+ md->base = 0;
+ md->idx = ARRAY_SIZE(region); /* hide it */
+ }
+ md++;
+
+#define ulp_region(reg) do { \
+ md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\
+ (md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \
+} while (0)
+
+ ulp_region(RX_ISCSI);
+ ulp_region(RX_TDDP);
+ ulp_region(TX_TPT);
+ ulp_region(RX_STAG);
+ ulp_region(RX_RQ);
+ ulp_region(RX_RQUDP);
+ ulp_region(RX_PBL);
+ ulp_region(TX_PBL);
+#undef ulp_region
+ md->base = 0;
+ md->idx = ARRAY_SIZE(region);
+ if (!is_t4(adap->params.chip)) {
+ u32 size = 0;
+ u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A);
+ u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A);
+
+ if (is_t5(adap->params.chip)) {
+ if (sge_ctrl & VFIFO_ENABLE_F)
+ size = DBVFIFO_SIZE_G(fifo_size);
+ } else {
+ size = T6_DBVFIFO_SIZE_G(fifo_size);
+ }
+
+ if (size) {
+ md->base = BASEADDR_G(t4_read_reg(adap,
+ SGE_DBVFIFO_BADDR_A));
+ md->limit = md->base + (size << 2) - 1;
+ }
+ }
+
+ md++;
+
+ md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A);
+ md->limit = 0;
+ md++;
+ md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A);
+ md->limit = 0;
+ md++;
+
+ md->base = adap->vres.ocq.start;
+ if (adap->vres.ocq.size)
+ md->limit = md->base + adap->vres.ocq.size - 1;
+ else
+ md->idx = ARRAY_SIZE(region); /* hide it */
+ md++;
+
+ /* add any address-space holes, there can be up to 3 */
+ for (n = 0; n < i - 1; n++)
+ if (avail[n].limit < avail[n + 1].base)
+ (md++)->base = avail[n].limit;
+ if (avail[n].limit)
+ (md++)->base = avail[n].limit;
+
+ n = md - mem;
+ sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+
+ for (lo = 0; lo < i; lo++)
+ mem_region_show(seq, memory[avail[lo].idx], avail[lo].base,
+ avail[lo].limit - 1);
+
+ seq_putc(seq, '\n');
+ for (i = 0; i < n; i++) {
+ if (mem[i].idx >= ARRAY_SIZE(region))
+ continue; /* skip holes */
+ if (!mem[i].limit)
+ mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
+ mem_region_show(seq, region[mem[i].idx], mem[i].base,
+ mem[i].limit);
+ }
+
+ seq_putc(seq, '\n');
+ lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A);
+ hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
+ mem_region_show(seq, "uP RAM:", lo, hi);
+
+ lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A);
+ hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
+ mem_region_show(seq, "uP Extmem2:", lo, hi);
+
+ lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A);
+ seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
+ PMRXMAXPAGE_G(lo),
+ t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10,
+ (lo & PMRXNUMCHN_F) ? 2 : 1);
+
+ lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A);
+ hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A);
+ seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
+ PMTXMAXPAGE_G(lo),
+ hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
+ hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo));
+ seq_printf(seq, "%u p-structs\n\n",
+ t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A));
+
+ for (i = 0; i < 4; i++) {
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
+ lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
+ else
+ lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4);
+ if (is_t5(adap->params.chip)) {
+ used = T5_USED_G(lo);
+ alloc = T5_ALLOC_G(lo);
+ } else {
+ used = USED_G(lo);
+ alloc = ALLOC_G(lo);
+ }
+ /* For T6 these are MAC buffer groups */
+ seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
+ i, used, alloc);
+ }
+ for (i = 0; i < adap->params.arch.nchan; i++) {
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
+ lo = t4_read_reg(adap,
+ MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
+ else
+ lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4);
+ if (is_t5(adap->params.chip)) {
+ used = T5_USED_G(lo);
+ alloc = T5_ALLOC_G(lo);
+ } else {
+ used = USED_G(lo);
+ alloc = ALLOC_G(lo);
+ }
+ /* For T6 these are MAC buffer groups */
+ seq_printf(seq,
+ "Loopback %d using %u pages out of %u allocated\n",
+ i, used, alloc);
+ }
+ return 0;
+}
+
+static int meminfo_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, meminfo_show, inode->i_private);
+}
+
+static const struct file_operations meminfo_fops = {
+ .owner = THIS_MODULE,
+ .open = meminfo_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
/* Add an array of Debug FS files.
*/
void add_debugfs_files(struct adapter *adap,
@@ -2264,6 +3010,10 @@ int t4_setup_debugfs(struct adapter *adap)
{ "mbox5", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 5 },
{ "mbox6", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 6 },
{ "mbox7", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 7 },
+ { "trace0", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
+ { "trace1", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
+ { "trace2", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
+ { "trace3", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 3 },
{ "l2t", &t4_l2t_fops, S_IRUSR, 0},
{ "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 },
{ "rss", &rss_debugfs_fops, S_IRUSR, 0 },
@@ -2293,7 +3043,9 @@ int t4_setup_debugfs(struct adapter *adap)
#if IS_ENABLED(CONFIG_IPV6)
{ "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
#endif
+ { "tids", &tid_info_debugfs_fops, S_IRUSR, 0},
{ "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
+ { "meminfo", &meminfo_fops, S_IRUSR, 0 },
};
/* Debug FS nodes common to all T5 and later adapters.
@@ -2332,14 +3084,19 @@ int t4_setup_debugfs(struct adapter *adap)
EXT_MEM1_SIZE_G(size));
}
} else {
- if (i & EXT_MEM_ENABLE_F)
+ if (i & EXT_MEM_ENABLE_F) {
size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
add_debugfs_mem(adap, "mc", MEM_MC,
EXT_MEM_SIZE_G(size));
+ }
}
de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
&flash_debugfs_fops, adap->params.sf_size);
+ debugfs_create_bool("use_backdoor", S_IWUSR | S_IRUSR,
+ adap->debugfs_root, &adap->use_bd);
+ debugfs_create_bool("trace_rss", S_IWUSR | S_IRUSR,
+ adap->debugfs_root, &adap->trace_rss);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 687acf71fa15..a077f9476daf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -35,79 +35,79 @@ static void set_msglevel(struct net_device *dev, u32 val)
}
static const char stats_strings[][ETH_GSTRING_LEN] = {
- "TxOctetsOK ",
- "TxFramesOK ",
- "TxBroadcastFrames ",
- "TxMulticastFrames ",
- "TxUnicastFrames ",
- "TxErrorFrames ",
-
- "TxFrames64 ",
- "TxFrames65To127 ",
- "TxFrames128To255 ",
- "TxFrames256To511 ",
- "TxFrames512To1023 ",
- "TxFrames1024To1518 ",
- "TxFrames1519ToMax ",
-
- "TxFramesDropped ",
- "TxPauseFrames ",
- "TxPPP0Frames ",
- "TxPPP1Frames ",
- "TxPPP2Frames ",
- "TxPPP3Frames ",
- "TxPPP4Frames ",
- "TxPPP5Frames ",
- "TxPPP6Frames ",
- "TxPPP7Frames ",
-
- "RxOctetsOK ",
- "RxFramesOK ",
- "RxBroadcastFrames ",
- "RxMulticastFrames ",
- "RxUnicastFrames ",
-
- "RxFramesTooLong ",
- "RxJabberErrors ",
- "RxFCSErrors ",
- "RxLengthErrors ",
- "RxSymbolErrors ",
- "RxRuntFrames ",
-
- "RxFrames64 ",
- "RxFrames65To127 ",
- "RxFrames128To255 ",
- "RxFrames256To511 ",
- "RxFrames512To1023 ",
- "RxFrames1024To1518 ",
- "RxFrames1519ToMax ",
-
- "RxPauseFrames ",
- "RxPPP0Frames ",
- "RxPPP1Frames ",
- "RxPPP2Frames ",
- "RxPPP3Frames ",
- "RxPPP4Frames ",
- "RxPPP5Frames ",
- "RxPPP6Frames ",
- "RxPPP7Frames ",
-
- "RxBG0FramesDropped ",
- "RxBG1FramesDropped ",
- "RxBG2FramesDropped ",
- "RxBG3FramesDropped ",
- "RxBG0FramesTrunc ",
- "RxBG1FramesTrunc ",
- "RxBG2FramesTrunc ",
- "RxBG3FramesTrunc ",
-
- "TSO ",
- "TxCsumOffload ",
- "RxCsumGood ",
- "VLANextractions ",
- "VLANinsertions ",
- "GROpackets ",
- "GROmerged ",
+ "tx_octets_ok ",
+ "tx_frames_ok ",
+ "tx_broadcast_frames ",
+ "tx_multicast_frames ",
+ "tx_unicast_frames ",
+ "tx_error_frames ",
+
+ "tx_frames_64 ",
+ "tx_frames_65_to_127 ",
+ "tx_frames_128_to_255 ",
+ "tx_frames_256_to_511 ",
+ "tx_frames_512_to_1023 ",
+ "tx_frames_1024_to_1518 ",
+ "tx_frames_1519_to_max ",
+
+ "tx_frames_dropped ",
+ "tx_pause_frames ",
+ "tx_ppp0_frames ",
+ "tx_ppp1_frames ",
+ "tx_ppp2_frames ",
+ "tx_ppp3_frames ",
+ "tx_ppp4_frames ",
+ "tx_ppp5_frames ",
+ "tx_ppp6_frames ",
+ "tx_ppp7_frames ",
+
+ "rx_octets_ok ",
+ "rx_frames_ok ",
+ "rx_broadcast_frames ",
+ "rx_multicast_frames ",
+ "rx_unicast_frames ",
+
+ "rx_frames_too_long ",
+ "rx_jabber_errors ",
+ "rx_fcs_errors ",
+ "rx_length_errors ",
+ "rx_symbol_errors ",
+ "rx_runt_frames ",
+
+ "rx_frames_64 ",
+ "rx_frames_65_to_127 ",
+ "rx_frames_128_to_255 ",
+ "rx_frames_256_to_511 ",
+ "rx_frames_512_to_1023 ",
+ "rx_frames_1024_to_1518 ",
+ "rx_frames_1519_to_max ",
+
+ "rx_pause_frames ",
+ "rx_ppp0_frames ",
+ "rx_ppp1_frames ",
+ "rx_ppp2_frames ",
+ "rx_ppp3_frames ",
+ "rx_ppp4_frames ",
+ "rx_ppp5_frames ",
+ "rx_ppp6_frames ",
+ "rx_ppp7_frames ",
+
+ "rx_bg0_frames_dropped ",
+ "rx_bg1_frames_dropped ",
+ "rx_bg2_frames_dropped ",
+ "rx_bg3_frames_dropped ",
+ "rx_bg0_frames_trunc ",
+ "rx_bg1_frames_trunc ",
+ "rx_bg2_frames_trunc ",
+ "rx_bg3_frames_trunc ",
+
+ "tso ",
+ "tx_csum_offload ",
+ "rx_csum_good ",
+ "vlan_extractions ",
+ "vlan_insertions ",
+ "gro_packets ",
+ "gro_merged ",
};
static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
@@ -211,8 +211,11 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
+ info->regdump_len = get_regs_len(dev);
- if (adapter->params.fw_vers)
+ if (!adapter->params.fw_vers)
+ strcpy(info->fw_version, "N/A");
+ else
snprintf(info->fw_version, sizeof(info->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
@@ -612,6 +615,8 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct port_info *p = netdev_priv(dev);
struct link_config *lc = &p->link_cfg;
u32 speed = ethtool_cmd_speed(cmd);
+ struct link_config old_lc;
+ int ret;
if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
return -EINVAL;
@@ -626,13 +631,11 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return -EINVAL;
}
+ old_lc = *lc;
if (cmd->autoneg == AUTONEG_DISABLE) {
cap = speed_to_caps(speed);
- if (!(lc->supported & cap) ||
- (speed == 1000) ||
- (speed == 10000) ||
- (speed == 40000))
+ if (!(lc->supported & cap))
return -EINVAL;
lc->requested_speed = cap;
lc->advertising = 0;
@@ -645,10 +648,14 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
lc->autoneg = cmd->autoneg;
- if (netif_running(dev))
- return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
- lc);
- return 0;
+ /* If the firmware rejects the Link Configuration request, back out
+ * the changes and report the error.
+ */
+ ret = t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, lc);
+ if (ret)
+ *lc = old_lc;
+
+ return ret;
}
static void get_pauseparam(struct net_device *dev,
@@ -847,7 +854,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
{
int i, err = 0;
struct adapter *adapter = netdev2adap(dev);
- u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
+ u8 *buf = t4_alloc_mem(EEPROMSIZE);
if (!buf)
return -ENOMEM;
@@ -858,7 +865,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
if (!err)
memcpy(data, buf + e->offset, e->len);
- kfree(buf);
+ t4_free_mem(buf);
return err;
}
@@ -887,7 +894,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
/* RMW possibly needed for first or last words.
*/
- buf = kmalloc(aligned_len, GFP_KERNEL);
+ buf = t4_alloc_mem(aligned_len);
if (!buf)
return -ENOMEM;
err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
@@ -915,7 +922,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
err = t4_seeprom_wp(adapter, true);
out:
if (buf != data)
- kfree(buf);
+ t4_free_mem(buf);
return err;
}
@@ -925,6 +932,20 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
const struct firmware *fw;
struct adapter *adap = netdev2adap(netdev);
unsigned int mbox = PCIE_FW_MASTER_M + 1;
+ u32 pcie_fw;
+ unsigned int master;
+ u8 master_vld = 0;
+
+ pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+ master = PCIE_FW_MASTER_G(pcie_fw);
+ if (pcie_fw & PCIE_FW_MASTER_VLD_F)
+ master_vld = 1;
+ /* if csiostor is the master return */
+ if (master_vld && (master != adap->pf)) {
+ dev_warn(adap->pdev_dev,
+ "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
+ return -EOPNOTSUPP;
+ }
ef->data[sizeof(ef->data) - 1] = '\0';
ret = request_firmware(&fw, ef->data, adap->pdev_dev);
@@ -947,6 +968,20 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
return ret;
}
+static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
+{
+ ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+
+ ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ ts_info->phc_index = -1;
+
+ return 0;
+}
+
static u32 get_rss_table_size(struct net_device *dev)
{
const struct port_info *pi = netdev_priv(dev);
@@ -983,11 +1018,15 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
if (!p)
return 0;
- for (i = 0; i < pi->rss_size; i++)
- pi->rss[i] = p[i];
- if (pi->adapter->flags & FULL_INIT_DONE)
+ /* Interface must be brought up atleast once */
+ if (pi->adapter->flags & FULL_INIT_DONE) {
+ for (i = 0; i < pi->rss_size; i++)
+ pi->rss[i] = p[i];
+
return cxgb4_write_rss(pi, pi->rss);
- return 0;
+ }
+
+ return -EPERM;
}
static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
@@ -1081,6 +1120,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_rxfh = get_rss_table,
.set_rxfh = set_rss_table,
.flash_device = set_flash,
+ .get_ts_info = get_ts_info
};
void cxgb4_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 351f3b1bf800..2cf81857a297 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -83,7 +83,7 @@ char cxgb4_driver_name[] = KBUILD_MODNAME;
#endif
#define DRV_VERSION "2.0.0-ko"
const char cxgb4_driver_version[] = DRV_VERSION;
-#define DRV_DESC "Chelsio T4/T5 Network Driver"
+#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
/* Host shadow copy of ingress filter entry. This is in host native format
* and doesn't match the ordering or bit order, etc. of the hardware of the
@@ -151,6 +151,7 @@ MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
MODULE_FIRMWARE(FW4_FNAME);
MODULE_FIRMWARE(FW5_FNAME);
+MODULE_FIRMWARE(FW6_FNAME);
/*
* Normally we're willing to become the firmware's Master PF but will be happy
@@ -275,7 +276,7 @@ static void link_report(struct net_device *dev)
else {
static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
- const char *s = "10Mbps";
+ const char *s;
const struct port_info *p = netdev_priv(dev);
switch (p->link_cfg.speed) {
@@ -291,6 +292,10 @@ static void link_report(struct net_device *dev)
case 40000:
s = "40Gbps";
break;
+ default:
+ pr_info("%s: unsupported speed: %d\n",
+ dev->name, p->link_cfg.speed);
+ return;
}
netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
@@ -1548,7 +1553,7 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
t->stid_tab[stid].data = data;
stid -= t->nstids;
stid += t->sftid_base;
- t->stids_in_use++;
+ t->sftids_in_use++;
}
spin_unlock_bh(&t->stid_lock);
return stid;
@@ -1573,10 +1578,14 @@ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
else
bitmap_release_region(t->stid_bmap, stid, 2);
t->stid_tab[stid].data = NULL;
- if (family == PF_INET)
- t->stids_in_use--;
- else
- t->stids_in_use -= 4;
+ if (stid < t->nstids) {
+ if (family == PF_INET)
+ t->stids_in_use--;
+ else
+ t->stids_in_use -= 4;
+ } else {
+ t->sftids_in_use--;
+ }
spin_unlock_bh(&t->stid_lock);
}
EXPORT_SYMBOL(cxgb4_free_stid);
@@ -1654,20 +1663,25 @@ static void process_tid_release_list(struct work_struct *work)
*/
void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
{
- void *old;
struct sk_buff *skb;
struct adapter *adap = container_of(t, struct adapter, tids);
- old = t->tid_tab[tid];
+ WARN_ON(tid >= t->ntids);
+
+ if (t->tid_tab[tid]) {
+ t->tid_tab[tid] = NULL;
+ if (t->hash_base && (tid >= t->hash_base))
+ atomic_dec(&t->hash_tids_in_use);
+ else
+ atomic_dec(&t->tids_in_use);
+ }
+
skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
if (likely(skb)) {
- t->tid_tab[tid] = NULL;
mk_tid_release(skb, chan, tid);
t4_ofld_send(adap, skb);
} else
cxgb4_queue_tid_release(t, chan, tid);
- if (old)
- atomic_dec(&t->tids_in_use);
}
EXPORT_SYMBOL(cxgb4_remove_tid);
@@ -1702,9 +1716,11 @@ static int tid_init(struct tid_info *t)
spin_lock_init(&t->atid_lock);
t->stids_in_use = 0;
+ t->sftids_in_use = 0;
t->afree = NULL;
t->atids_in_use = 0;
atomic_set(&t->tids_in_use, 0);
+ atomic_set(&t->hash_tids_in_use, 0);
/* Setup the free list for atid_tab and clear the stid bitmap. */
if (natids) {
@@ -2948,6 +2964,30 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
data->reg_num, data->val_in);
break;
+ case SIOCGHWTSTAMP:
+ return copy_to_user(req->ifr_data, &pi->tstamp_config,
+ sizeof(pi->tstamp_config)) ?
+ -EFAULT : 0;
+ case SIOCSHWTSTAMP:
+ if (copy_from_user(&pi->tstamp_config, req->ifr_data,
+ sizeof(pi->tstamp_config)))
+ return -EFAULT;
+
+ switch (pi->tstamp_config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ pi->rxtstamp = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ pi->rxtstamp = true;
+ break;
+ default:
+ pi->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ return copy_to_user(req->ifr_data, &pi->tstamp_config,
+ sizeof(pi->tstamp_config)) ?
+ -EFAULT : 0;
default:
return -EOPNOTSUPP;
}
@@ -3657,6 +3697,10 @@ static int adap_init0(struct adapter *adap)
*/
t4_get_fw_version(adap, &adap->params.fw_vers);
t4_get_tp_version(adap, &adap->params.tp_vers);
+ ret = t4_check_fw_version(adap);
+ /* If firmware is too old (not supported by driver) force an update. */
+ if (ret)
+ state = DEV_STATE_UNINIT;
if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
struct fw_info *fw_info;
struct fw_hdr *card_fw;
@@ -4442,6 +4486,10 @@ static int enable_msix(struct adapter *adap)
}
for (i = 0; i < allocated; ++i)
adap->msix_info[i].vec = entries[i].vector;
+ dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
+ "nic %d iscsi %d rdma cpl %d rdma ciq %d\n",
+ allocated, s->max_ethqsets, s->ofldqsets, s->rdmaqs,
+ s->rdmaciqs);
kfree(entries);
return 0;
@@ -4551,6 +4599,27 @@ static void free_some_resources(struct adapter *adapter)
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
#define SEGMENT_SIZE 128
+static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
+{
+ u16 device_id;
+
+ /* Retrieve adapter's device ID */
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
+
+ switch (device_id >> 12) {
+ case CHELSIO_T4:
+ return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
+ case CHELSIO_T5:
+ return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+ case CHELSIO_T6:
+ return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+ default:
+ dev_err(&pdev->dev, "Device %d is not supported\n",
+ device_id);
+ }
+ return -EINVAL;
+}
+
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int func, i, err, s_qpp, qpp, num_seg;
@@ -4558,6 +4627,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bool highdma = false;
struct adapter *adapter = NULL;
void __iomem *regs;
+ u32 whoami, pl_rev;
+ enum chip_type chip;
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -4586,7 +4657,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_unmap_bar0;
/* We control everything through one PF */
- func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
+ whoami = readl(regs + PL_WHOAMI_A);
+ pl_rev = REV_G(readl(regs + PL_REV_A));
+ chip = get_chip_type(pdev, pl_rev);
+ func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
+ SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
if (func != ent->driver_data) {
iounmap(regs);
pci_disable_device(pdev);
@@ -4677,8 +4752,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENOMEM;
goto out_free_adapter;
}
- t4_write_reg(adapter, SGE_STAT_CFG_A,
- STATSOURCE_T5_V(7) | STATMODE_V(0));
}
setup_memwin(adapter);
@@ -4690,6 +4763,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_unmap_bar;
+ /* configure SGE_STAT_CFG_A to read WC stats */
+ if (!is_t4(adapter->params.chip))
+ t4_write_reg(adapter, SGE_STAT_CFG_A,
+ STATSOURCE_T5_V(7) | STATMODE_V(0));
+
for_each_port(adapter, i) {
struct net_device *netdev;
@@ -4757,7 +4835,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
cfg_queues(adapter);
- adapter->l2t = t4_init_l2t();
+ adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
if (!adapter->l2t) {
/* We tolerate a lack of L2T, giving up some functionality */
dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
@@ -4782,6 +4860,22 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->params.offload = 0;
}
+ if (is_offload(adapter)) {
+ if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
+ u32 hash_base, hash_reg;
+
+ if (chip <= CHELSIO_T5) {
+ hash_reg = LE_DB_TID_HASHBASE_A;
+ hash_base = t4_read_reg(adapter, hash_reg);
+ adapter->tids.hash_base = hash_base / 4;
+ } else {
+ hash_reg = T6_LE_DB_HASH_TID_BASE_A;
+ hash_base = t4_read_reg(adapter, hash_reg);
+ adapter->tids.hash_base = hash_base;
+ }
+ }
+ }
+
/* See what interrupts we'll be using */
if (msi > 1 && enable_msix(adapter) == 0)
adapter->flags |= USING_MSIX;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index b27897d4f787..c3a8be5541e7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -96,6 +96,7 @@ struct tid_info {
unsigned long *stid_bmap;
unsigned int nstids;
unsigned int stid_base;
+ unsigned int hash_base;
union aopen_entry *atid_tab;
unsigned int natids;
@@ -116,8 +117,12 @@ struct tid_info {
spinlock_t stid_lock;
unsigned int stids_in_use;
+ unsigned int sftids_in_use;
+ /* TIDs in the TCAM */
atomic_t tids_in_use;
+ /* TIDs in the HASH */
+ atomic_t hash_tids_in_use;
};
static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
@@ -147,7 +152,10 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
unsigned int tid)
{
t->tid_tab[tid] = data;
- atomic_inc(&t->tids_in_use);
+ if (t->hash_base && (tid >= t->hash_base))
+ atomic_inc(&t->hash_tids_in_use);
+ else
+ atomic_inc(&t->tids_in_use);
}
int cxgb4_alloc_atid(struct tid_info *t, void *data);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 252efc29321f..ac27898c6ab0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -51,24 +51,17 @@
#define VLAN_NONE 0xfff
/* identifies sync vs async L2T_WRITE_REQs */
-#define F_SYNC_WR (1 << 12)
-
-enum {
- L2T_STATE_VALID, /* entry is up to date */
- L2T_STATE_STALE, /* entry may be used but needs revalidation */
- L2T_STATE_RESOLVING, /* entry needs address resolution */
- L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
-
- /* when state is one of the below the entry is not hashed */
- L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
- L2T_STATE_UNUSED /* entry not in use */
-};
+#define SYNC_WR_S 12
+#define SYNC_WR_V(x) ((x) << SYNC_WR_S)
+#define SYNC_WR_F SYNC_WR_V(1)
struct l2t_data {
+ unsigned int l2t_start; /* start index of our piece of the L2T */
+ unsigned int l2t_size; /* number of entries in l2tab */
rwlock_t lock;
atomic_t nfree; /* number of free entries */
struct l2t_entry *rover; /* starting point for next allocation */
- struct l2t_entry l2tab[L2T_SIZE];
+ struct l2t_entry l2tab[0]; /* MUST BE LAST */
};
static inline unsigned int vlan_prio(const struct l2t_entry *e)
@@ -85,29 +78,36 @@ static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
/*
* To avoid having to check address families we do not allow v4 and v6
* neighbors to be on the same hash chain. We keep v4 entries in the first
- * half of available hash buckets and v6 in the second.
+ * half of available hash buckets and v6 in the second. We need at least two
+ * entries in our L2T for this scheme to work.
*/
enum {
- L2T_SZ_HALF = L2T_SIZE / 2,
- L2T_HASH_MASK = L2T_SZ_HALF - 1
+ L2T_MIN_HASH_BUCKETS = 2,
};
-static inline unsigned int arp_hash(const u32 *key, int ifindex)
+static inline unsigned int arp_hash(struct l2t_data *d, const u32 *key,
+ int ifindex)
{
- return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
+ unsigned int l2t_size_half = d->l2t_size / 2;
+
+ return jhash_2words(*key, ifindex, 0) % l2t_size_half;
}
-static inline unsigned int ipv6_hash(const u32 *key, int ifindex)
+static inline unsigned int ipv6_hash(struct l2t_data *d, const u32 *key,
+ int ifindex)
{
+ unsigned int l2t_size_half = d->l2t_size / 2;
u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
- return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
+ return (l2t_size_half +
+ (jhash_2words(xor, ifindex, 0) % l2t_size_half));
}
-static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex)
+static unsigned int addr_hash(struct l2t_data *d, const u32 *addr,
+ int addr_len, int ifindex)
{
- return addr_len == 4 ? arp_hash(addr, ifindex) :
- ipv6_hash(addr, ifindex);
+ return addr_len == 4 ? arp_hash(d, addr, ifindex) :
+ ipv6_hash(d, addr, ifindex);
}
/*
@@ -139,6 +139,8 @@ static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
*/
static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
{
+ struct l2t_data *d = adap->l2t;
+ unsigned int l2t_idx = e->idx + d->l2t_start;
struct sk_buff *skb;
struct cpl_l2t_write_req *req;
@@ -150,10 +152,10 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
- e->idx | (sync ? F_SYNC_WR : 0) |
+ l2t_idx | (sync ? SYNC_WR_F : 0) |
TID_QID_V(adap->sge.fw_evtq.abs_id)));
req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
- req->l2t_idx = htons(e->idx);
+ req->l2t_idx = htons(l2t_idx);
req->vlan = htons(e->vlan);
if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
@@ -190,18 +192,19 @@ static void send_pending(struct adapter *adap, struct l2t_entry *e)
*/
void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
{
+ struct l2t_data *d = adap->l2t;
unsigned int tid = GET_TID(rpl);
- unsigned int idx = tid & (L2T_SIZE - 1);
+ unsigned int l2t_idx = tid % L2T_SIZE;
if (unlikely(rpl->status != CPL_ERR_NONE)) {
dev_err(adap->pdev_dev,
"Unexpected L2T_WRITE_RPL status %u for entry %u\n",
- rpl->status, idx);
+ rpl->status, l2t_idx);
return;
}
- if (tid & F_SYNC_WR) {
- struct l2t_entry *e = &adap->l2t->l2tab[idx];
+ if (tid & SYNC_WR_F) {
+ struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
spin_lock(&e->lock);
if (e->state != L2T_STATE_SWITCHING) {
@@ -276,7 +279,7 @@ static struct l2t_entry *alloc_l2e(struct l2t_data *d)
return NULL;
/* there's definitely a free entry */
- for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
+ for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
if (atomic_read(&e->refcnt) == 0)
goto found;
@@ -368,7 +371,7 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
int addr_len = neigh->tbl->key_len;
u32 *addr = (u32 *)neigh->primary_key;
int ifidx = neigh->dev->ifindex;
- int hash = addr_hash(addr, addr_len, ifidx);
+ int hash = addr_hash(d, addr, addr_len, ifidx);
if (neigh->dev->flags & IFF_LOOPBACK)
lport = netdev2pinfo(physdev)->tx_chan + 4;
@@ -481,7 +484,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
int addr_len = neigh->tbl->key_len;
u32 *addr = (u32 *) neigh->primary_key;
int ifidx = neigh->dev->ifindex;
- int hash = addr_hash(addr, addr_len, ifidx);
+ int hash = addr_hash(d, addr, addr_len, ifidx);
read_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
@@ -554,20 +557,30 @@ int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
return write_l2e(adap, e, 0);
}
-struct l2t_data *t4_init_l2t(void)
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
{
+ unsigned int l2t_size;
int i;
struct l2t_data *d;
- d = t4_alloc_mem(sizeof(*d));
+ if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE)
+ return NULL;
+ l2t_size = l2t_end - l2t_start + 1;
+ if (l2t_size < L2T_MIN_HASH_BUCKETS)
+ return NULL;
+
+ d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry));
if (!d)
return NULL;
+ d->l2t_start = l2t_start;
+ d->l2t_size = l2t_size;
+
d->rover = d->l2tab;
- atomic_set(&d->nfree, L2T_SIZE);
+ atomic_set(&d->nfree, l2t_size);
rwlock_init(&d->lock);
- for (i = 0; i < L2T_SIZE; ++i) {
+ for (i = 0; i < d->l2t_size; ++i) {
d->l2tab[i].idx = i;
d->l2tab[i].state = L2T_STATE_UNUSED;
spin_lock_init(&d->l2tab[i].lock);
@@ -578,9 +591,9 @@ struct l2t_data *t4_init_l2t(void)
static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
{
- struct l2t_entry *l2tab = seq->private;
+ struct l2t_data *d = seq->private;
- return pos >= L2T_SIZE ? NULL : &l2tab[pos];
+ return pos >= d->l2t_size ? NULL : &d->l2tab[pos];
}
static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
@@ -620,6 +633,7 @@ static int l2t_seq_show(struct seq_file *seq, void *v)
"Ethernet address VLAN/P LP State Users Port\n");
else {
char ip[60];
+ struct l2t_data *d = seq->private;
struct l2t_entry *e = v;
spin_lock_bh(&e->lock);
@@ -628,7 +642,7 @@ static int l2t_seq_show(struct seq_file *seq, void *v)
else
sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n",
- e->idx, ip, e->dmac,
+ e->idx + d->l2t_start, ip, e->dmac,
e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
l2e_state(e), atomic_read(&e->refcnt),
e->neigh ? e->neigh->dev->name : "");
@@ -652,7 +666,7 @@ static int l2t_seq_open(struct inode *inode, struct file *file)
struct adapter *adap = inode->i_private;
struct seq_file *seq = file->private_data;
- seq->private = adap->l2t->l2tab;
+ seq->private = adap->l2t;
}
return rc;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index a30126ce90cb..b38dc526aad5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -39,6 +39,20 @@
#include <linux/if_ether.h>
#include <linux/atomic.h>
+enum { L2T_SIZE = 4096 }; /* # of L2T entries */
+
+enum {
+ L2T_STATE_VALID, /* entry is up to date */
+ L2T_STATE_STALE, /* entry may be used but needs revalidation */
+ L2T_STATE_RESOLVING, /* entry needs address resolution */
+ L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
+ L2T_STATE_NOARP, /* Netdev down or removed*/
+
+ /* when state is one of the below the entry is not hashed */
+ L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
+ L2T_STATE_UNUSED /* entry not in use */
+};
+
struct adapter;
struct l2t_data;
struct neighbour;
@@ -56,7 +70,7 @@ struct cpl_l2t_write_rpl;
*/
struct l2t_entry {
u16 state; /* entry state */
- u16 idx; /* entry index */
+ u16 idx; /* entry index within in-memory table */
u32 addr[4]; /* next hop IP or IPv6 address */
int ifindex; /* neighbor's net_device's ifindex */
struct neighbour *neigh; /* associated neighbour */
@@ -104,7 +118,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
u8 port, u8 *eth_addr);
-struct l2t_data *t4_init_l2t(void);
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end);
void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
extern const struct file_operations t4_l2t_fops;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 942db078f33a..b7b93e7a643d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -807,7 +807,7 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
* message or, if we're doing a Large Send Offload, an LSO CPL message
* with an embedded TX Packet Write CPL message.
*/
- flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
if (skb_shinfo(skb)->gso_size)
flits += (sizeof(struct fw_eth_tx_pkt_wr) +
sizeof(struct cpl_tx_pkt_lso_core) +
@@ -1137,7 +1137,7 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
*/
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- u32 wr_mid;
+ u32 wr_mid, ctrl0;
u64 cntrl, *end;
int qidx, credits;
unsigned int flits, ndesc;
@@ -1274,9 +1274,15 @@ out_free: dev_kfree_skb_any(skb);
#endif /* CONFIG_CHELSIO_T4_FCOE */
}
- cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
- TXPKT_INTF_V(pi->tx_chan) |
- TXPKT_PF_V(adap->pf));
+ ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->pf);
+#ifdef CONFIG_CHELSIO_T4_DCB
+ if (is_t4(adap->params.chip))
+ ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
+ else
+ ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
+#endif
+ cpl->ctrl0 = htonl(ctrl0);
cpl->pack = htons(0);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1418,18 +1424,17 @@ static void restart_ctrlq(unsigned long data)
struct fw_wr_hdr *wr;
unsigned int ndesc = skb->priority; /* previously saved */
- /*
- * Write descriptors and free skbs outside the lock to limit
+ written += ndesc;
+ /* Write descriptors and free skbs outside the lock to limit
* wait times. q->full is still set so new skbs will be queued.
*/
+ wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+ txq_advance(&q->q, ndesc);
spin_unlock(&q->sendq.lock);
- wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
inline_tx_skb(skb, &q->q, wr);
kfree_skb(skb);
- written += ndesc;
- txq_advance(&q->q, ndesc);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
unsigned long old = q->q.stops;
@@ -1815,11 +1820,34 @@ static noinline int handle_trace_pkt(struct adapter *adap,
return 0;
}
+/**
+ * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
+ * @adap: the adapter
+ * @hwtstamps: time stamp structure to update
+ * @sgetstamp: 60bit iqe timestamp
+ *
+ * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
+ * which is in Core Clock ticks into ktime_t and assign it
+ **/
+static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
+ struct skb_shared_hwtstamps *hwtstamps,
+ u64 sgetstamp)
+{
+ u64 ns;
+ u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
+
+ ns = div_u64(tmp, adap->params.vpd.cclk);
+
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
const struct cpl_rx_pkt *pkt)
{
struct adapter *adapter = rxq->rspq.adap;
struct sge *s = &adapter->sge;
+ struct port_info *pi;
int ret;
struct sk_buff *skb;
@@ -1837,6 +1865,10 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
skb_mark_napi_id(skb, &rxq->rspq.napi);
+ pi = netdev_priv(skb->dev);
+ if (pi->rxtstamp)
+ cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
+ gl->sgetstamp);
if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
PKT_HASH_TYPE_L3);
@@ -1872,9 +1904,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
struct sge *s = &q->adap->sge;
int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
-#ifdef CONFIG_CHELSIO_T4_FCOE
struct port_info *pi;
-#endif
if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
return handle_trace_pkt(q->adap, si);
@@ -1905,6 +1935,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.pkts++;
+ pi = netdev_priv(skb->dev);
+ if (pi->rxtstamp)
+ cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
+ si->sgetstamp);
if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
if (!pkt->ip_frag) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1921,7 +1955,6 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
#define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
- pi = netdev_priv(skb->dev);
if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
(pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
@@ -2062,6 +2095,8 @@ static int process_responses(struct sge_rspq *q, int budget)
unmap_rx_buf(q->adap, &rxq->fl);
}
+ si.sgetstamp = SGE_TIMESTAMP_G(
+ be64_to_cpu(rc->last_flit));
/*
* Last buffer remains mapped so explicitly make it
* coherent for CPU access.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2b52aae7ec86..cf61a5869c6e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -37,6 +37,7 @@
#include "t4_regs.h"
#include "t4_values.h"
#include "t4fw_api.h"
+#include "t4fw_version.h"
/**
* t4_wait_op_done_val - wait until an operation is completed
@@ -345,6 +346,43 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
FW_CMD_MAX_TIMEOUT);
}
+static int t4_edc_err_read(struct adapter *adap, int idx)
+{
+ u32 edc_ecc_err_addr_reg;
+ u32 rdata_reg;
+
+ if (is_t4(adap->params.chip)) {
+ CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
+ return 0;
+ }
+ if (idx != 0 && idx != 1) {
+ CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
+ return 0;
+ }
+
+ edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
+ rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
+
+ CH_WARN(adap,
+ "edc%d err addr 0x%x: 0x%x.\n",
+ idx, edc_ecc_err_addr_reg,
+ t4_read_reg(adap, edc_ecc_err_addr_reg));
+ CH_WARN(adap,
+ "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
+ rdata_reg,
+ (unsigned long long)t4_read_reg64(adap, rdata_reg),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
+ (unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
+
+ return 0;
+}
+
/**
* t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
* @adap: the adapter
@@ -661,50 +699,107 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
{
static const unsigned int t4_reg_ranges[] = {
0x1008, 0x1108,
- 0x1180, 0x11b4,
+ 0x1180, 0x1184,
+ 0x1190, 0x1194,
+ 0x11a0, 0x11a4,
+ 0x11b0, 0x11b4,
0x11fc, 0x123c,
0x1300, 0x173c,
0x1800, 0x18fc,
- 0x3000, 0x305c,
- 0x3068, 0x30d8,
- 0x30e0, 0x5924,
- 0x5960, 0x59d4,
- 0x5a00, 0x5af8,
+ 0x3000, 0x30d8,
+ 0x30e0, 0x30e4,
+ 0x30ec, 0x5910,
+ 0x5920, 0x5924,
+ 0x5960, 0x5960,
+ 0x5968, 0x5968,
+ 0x5970, 0x5970,
+ 0x5978, 0x5978,
+ 0x5980, 0x5980,
+ 0x5988, 0x5988,
+ 0x5990, 0x5990,
+ 0x5998, 0x5998,
+ 0x59a0, 0x59d4,
+ 0x5a00, 0x5ae0,
+ 0x5ae8, 0x5ae8,
+ 0x5af0, 0x5af0,
+ 0x5af8, 0x5af8,
0x6000, 0x6098,
0x6100, 0x6150,
0x6200, 0x6208,
0x6240, 0x6248,
- 0x6280, 0x6338,
+ 0x6280, 0x62b0,
+ 0x62c0, 0x6338,
0x6370, 0x638c,
0x6400, 0x643c,
0x6500, 0x6524,
- 0x6a00, 0x6a38,
- 0x6a60, 0x6a78,
- 0x6b00, 0x6b84,
- 0x6bf0, 0x6c84,
- 0x6cf0, 0x6d84,
- 0x6df0, 0x6e84,
- 0x6ef0, 0x6f84,
- 0x6ff0, 0x7084,
- 0x70f0, 0x7184,
- 0x71f0, 0x7284,
- 0x72f0, 0x7384,
- 0x73f0, 0x7450,
+ 0x6a00, 0x6a04,
+ 0x6a14, 0x6a38,
+ 0x6a60, 0x6a70,
+ 0x6a78, 0x6a78,
+ 0x6b00, 0x6b0c,
+ 0x6b1c, 0x6b84,
+ 0x6bf0, 0x6bf8,
+ 0x6c00, 0x6c0c,
+ 0x6c1c, 0x6c84,
+ 0x6cf0, 0x6cf8,
+ 0x6d00, 0x6d0c,
+ 0x6d1c, 0x6d84,
+ 0x6df0, 0x6df8,
+ 0x6e00, 0x6e0c,
+ 0x6e1c, 0x6e84,
+ 0x6ef0, 0x6ef8,
+ 0x6f00, 0x6f0c,
+ 0x6f1c, 0x6f84,
+ 0x6ff0, 0x6ff8,
+ 0x7000, 0x700c,
+ 0x701c, 0x7084,
+ 0x70f0, 0x70f8,
+ 0x7100, 0x710c,
+ 0x711c, 0x7184,
+ 0x71f0, 0x71f8,
+ 0x7200, 0x720c,
+ 0x721c, 0x7284,
+ 0x72f0, 0x72f8,
+ 0x7300, 0x730c,
+ 0x731c, 0x7384,
+ 0x73f0, 0x73f8,
+ 0x7400, 0x7450,
0x7500, 0x7530,
- 0x7600, 0x761c,
+ 0x7600, 0x760c,
+ 0x7614, 0x761c,
0x7680, 0x76cc,
0x7700, 0x7798,
0x77c0, 0x77fc,
0x7900, 0x79fc,
- 0x7b00, 0x7c38,
- 0x7d00, 0x7efc,
- 0x8dc0, 0x8e1c,
+ 0x7b00, 0x7b58,
+ 0x7b60, 0x7b84,
+ 0x7b8c, 0x7c38,
+ 0x7d00, 0x7d38,
+ 0x7d40, 0x7d80,
+ 0x7d8c, 0x7ddc,
+ 0x7de4, 0x7e04,
+ 0x7e10, 0x7e1c,
+ 0x7e24, 0x7e38,
+ 0x7e40, 0x7e44,
+ 0x7e4c, 0x7e78,
+ 0x7e80, 0x7ea4,
+ 0x7eac, 0x7edc,
+ 0x7ee8, 0x7efc,
+ 0x8dc0, 0x8e04,
+ 0x8e10, 0x8e1c,
0x8e30, 0x8e78,
- 0x8ea0, 0x8f6c,
- 0x8fc0, 0x9074,
+ 0x8ea0, 0x8eb8,
+ 0x8ec0, 0x8f6c,
+ 0x8fc0, 0x9008,
+ 0x9010, 0x9058,
+ 0x9060, 0x9060,
+ 0x9068, 0x9074,
0x90fc, 0x90fc,
- 0x9400, 0x9458,
- 0x9600, 0x96bc,
+ 0x9400, 0x9408,
+ 0x9410, 0x9458,
+ 0x9600, 0x9600,
+ 0x9608, 0x9638,
+ 0x9640, 0x96bc,
0x9800, 0x9808,
0x9820, 0x983c,
0x9850, 0x9864,
@@ -716,23 +811,42 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x9e80, 0x9eec,
0x9f00, 0x9f6c,
0x9f80, 0x9fec,
- 0xd004, 0xd03c,
+ 0xd004, 0xd004,
+ 0xd010, 0xd03c,
0xdfc0, 0xdfe0,
0xe000, 0xea7c,
- 0xf000, 0x11110,
- 0x11118, 0x11190,
+ 0xf000, 0x11190,
0x19040, 0x1906c,
0x19078, 0x19080,
- 0x1908c, 0x19124,
- 0x19150, 0x191b0,
+ 0x1908c, 0x190e4,
+ 0x190f0, 0x190f8,
+ 0x19100, 0x19110,
+ 0x19120, 0x19124,
+ 0x19150, 0x19194,
+ 0x1919c, 0x191b0,
0x191d0, 0x191e8,
0x19238, 0x1924c,
- 0x193f8, 0x19474,
- 0x19490, 0x194f8,
- 0x19800, 0x19f4c,
- 0x1a000, 0x1a06c,
- 0x1a0b0, 0x1a120,
- 0x1a128, 0x1a138,
+ 0x193f8, 0x1943c,
+ 0x1944c, 0x19474,
+ 0x19490, 0x194e0,
+ 0x194f0, 0x194f8,
+ 0x19800, 0x19c08,
+ 0x19c10, 0x19c90,
+ 0x19ca0, 0x19ce4,
+ 0x19cf0, 0x19d40,
+ 0x19d50, 0x19d94,
+ 0x19da0, 0x19de8,
+ 0x19df0, 0x19e40,
+ 0x19e50, 0x19e90,
+ 0x19ea0, 0x19f4c,
+ 0x1a000, 0x1a004,
+ 0x1a010, 0x1a06c,
+ 0x1a0b0, 0x1a0e4,
+ 0x1a0ec, 0x1a0f4,
+ 0x1a100, 0x1a108,
+ 0x1a114, 0x1a120,
+ 0x1a128, 0x1a130,
+ 0x1a138, 0x1a138,
0x1a190, 0x1a1c4,
0x1a1fc, 0x1a1fc,
0x1e040, 0x1e04c,
@@ -785,9 +899,12 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x1ffc0, 0x1ffc8,
0x20000, 0x2002c,
0x20100, 0x2013c,
- 0x20190, 0x201c8,
+ 0x20190, 0x201a0,
+ 0x201a8, 0x201b8,
+ 0x201c4, 0x201c8,
0x20200, 0x20318,
- 0x20400, 0x20528,
+ 0x20400, 0x204b4,
+ 0x204c0, 0x20528,
0x20540, 0x20614,
0x21000, 0x21040,
0x2104c, 0x21060,
@@ -796,22 +913,62 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x21270, 0x21284,
0x212fc, 0x21388,
0x21400, 0x21404,
- 0x21500, 0x21518,
- 0x2152c, 0x2153c,
+ 0x21500, 0x21500,
+ 0x21510, 0x21518,
+ 0x2152c, 0x21530,
+ 0x2153c, 0x2153c,
0x21550, 0x21554,
0x21600, 0x21600,
- 0x21608, 0x21628,
- 0x21630, 0x2163c,
+ 0x21608, 0x2161c,
+ 0x21624, 0x21628,
+ 0x21630, 0x21634,
+ 0x2163c, 0x2163c,
0x21700, 0x2171c,
0x21780, 0x2178c,
- 0x21800, 0x21c38,
- 0x21c80, 0x21d7c,
+ 0x21800, 0x21818,
+ 0x21820, 0x21828,
+ 0x21830, 0x21848,
+ 0x21850, 0x21854,
+ 0x21860, 0x21868,
+ 0x21870, 0x21870,
+ 0x21878, 0x21898,
+ 0x218a0, 0x218a8,
+ 0x218b0, 0x218c8,
+ 0x218d0, 0x218d4,
+ 0x218e0, 0x218e8,
+ 0x218f0, 0x218f0,
+ 0x218f8, 0x21a18,
+ 0x21a20, 0x21a28,
+ 0x21a30, 0x21a48,
+ 0x21a50, 0x21a54,
+ 0x21a60, 0x21a68,
+ 0x21a70, 0x21a70,
+ 0x21a78, 0x21a98,
+ 0x21aa0, 0x21aa8,
+ 0x21ab0, 0x21ac8,
+ 0x21ad0, 0x21ad4,
+ 0x21ae0, 0x21ae8,
+ 0x21af0, 0x21af0,
+ 0x21af8, 0x21c18,
+ 0x21c20, 0x21c20,
+ 0x21c28, 0x21c30,
+ 0x21c38, 0x21c38,
+ 0x21c80, 0x21c98,
+ 0x21ca0, 0x21ca8,
+ 0x21cb0, 0x21cc8,
+ 0x21cd0, 0x21cd4,
+ 0x21ce0, 0x21ce8,
+ 0x21cf0, 0x21cf0,
+ 0x21cf8, 0x21d7c,
0x21e00, 0x21e04,
0x22000, 0x2202c,
0x22100, 0x2213c,
- 0x22190, 0x221c8,
+ 0x22190, 0x221a0,
+ 0x221a8, 0x221b8,
+ 0x221c4, 0x221c8,
0x22200, 0x22318,
- 0x22400, 0x22528,
+ 0x22400, 0x224b4,
+ 0x224c0, 0x22528,
0x22540, 0x22614,
0x23000, 0x23040,
0x2304c, 0x23060,
@@ -820,22 +977,62 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x23270, 0x23284,
0x232fc, 0x23388,
0x23400, 0x23404,
- 0x23500, 0x23518,
- 0x2352c, 0x2353c,
+ 0x23500, 0x23500,
+ 0x23510, 0x23518,
+ 0x2352c, 0x23530,
+ 0x2353c, 0x2353c,
0x23550, 0x23554,
0x23600, 0x23600,
- 0x23608, 0x23628,
- 0x23630, 0x2363c,
+ 0x23608, 0x2361c,
+ 0x23624, 0x23628,
+ 0x23630, 0x23634,
+ 0x2363c, 0x2363c,
0x23700, 0x2371c,
0x23780, 0x2378c,
- 0x23800, 0x23c38,
- 0x23c80, 0x23d7c,
+ 0x23800, 0x23818,
+ 0x23820, 0x23828,
+ 0x23830, 0x23848,
+ 0x23850, 0x23854,
+ 0x23860, 0x23868,
+ 0x23870, 0x23870,
+ 0x23878, 0x23898,
+ 0x238a0, 0x238a8,
+ 0x238b0, 0x238c8,
+ 0x238d0, 0x238d4,
+ 0x238e0, 0x238e8,
+ 0x238f0, 0x238f0,
+ 0x238f8, 0x23a18,
+ 0x23a20, 0x23a28,
+ 0x23a30, 0x23a48,
+ 0x23a50, 0x23a54,
+ 0x23a60, 0x23a68,
+ 0x23a70, 0x23a70,
+ 0x23a78, 0x23a98,
+ 0x23aa0, 0x23aa8,
+ 0x23ab0, 0x23ac8,
+ 0x23ad0, 0x23ad4,
+ 0x23ae0, 0x23ae8,
+ 0x23af0, 0x23af0,
+ 0x23af8, 0x23c18,
+ 0x23c20, 0x23c20,
+ 0x23c28, 0x23c30,
+ 0x23c38, 0x23c38,
+ 0x23c80, 0x23c98,
+ 0x23ca0, 0x23ca8,
+ 0x23cb0, 0x23cc8,
+ 0x23cd0, 0x23cd4,
+ 0x23ce0, 0x23ce8,
+ 0x23cf0, 0x23cf0,
+ 0x23cf8, 0x23d7c,
0x23e00, 0x23e04,
0x24000, 0x2402c,
0x24100, 0x2413c,
- 0x24190, 0x241c8,
+ 0x24190, 0x241a0,
+ 0x241a8, 0x241b8,
+ 0x241c4, 0x241c8,
0x24200, 0x24318,
- 0x24400, 0x24528,
+ 0x24400, 0x244b4,
+ 0x244c0, 0x24528,
0x24540, 0x24614,
0x25000, 0x25040,
0x2504c, 0x25060,
@@ -844,22 +1041,62 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x25270, 0x25284,
0x252fc, 0x25388,
0x25400, 0x25404,
- 0x25500, 0x25518,
- 0x2552c, 0x2553c,
+ 0x25500, 0x25500,
+ 0x25510, 0x25518,
+ 0x2552c, 0x25530,
+ 0x2553c, 0x2553c,
0x25550, 0x25554,
0x25600, 0x25600,
- 0x25608, 0x25628,
- 0x25630, 0x2563c,
+ 0x25608, 0x2561c,
+ 0x25624, 0x25628,
+ 0x25630, 0x25634,
+ 0x2563c, 0x2563c,
0x25700, 0x2571c,
0x25780, 0x2578c,
- 0x25800, 0x25c38,
- 0x25c80, 0x25d7c,
+ 0x25800, 0x25818,
+ 0x25820, 0x25828,
+ 0x25830, 0x25848,
+ 0x25850, 0x25854,
+ 0x25860, 0x25868,
+ 0x25870, 0x25870,
+ 0x25878, 0x25898,
+ 0x258a0, 0x258a8,
+ 0x258b0, 0x258c8,
+ 0x258d0, 0x258d4,
+ 0x258e0, 0x258e8,
+ 0x258f0, 0x258f0,
+ 0x258f8, 0x25a18,
+ 0x25a20, 0x25a28,
+ 0x25a30, 0x25a48,
+ 0x25a50, 0x25a54,
+ 0x25a60, 0x25a68,
+ 0x25a70, 0x25a70,
+ 0x25a78, 0x25a98,
+ 0x25aa0, 0x25aa8,
+ 0x25ab0, 0x25ac8,
+ 0x25ad0, 0x25ad4,
+ 0x25ae0, 0x25ae8,
+ 0x25af0, 0x25af0,
+ 0x25af8, 0x25c18,
+ 0x25c20, 0x25c20,
+ 0x25c28, 0x25c30,
+ 0x25c38, 0x25c38,
+ 0x25c80, 0x25c98,
+ 0x25ca0, 0x25ca8,
+ 0x25cb0, 0x25cc8,
+ 0x25cd0, 0x25cd4,
+ 0x25ce0, 0x25ce8,
+ 0x25cf0, 0x25cf0,
+ 0x25cf8, 0x25d7c,
0x25e00, 0x25e04,
0x26000, 0x2602c,
0x26100, 0x2613c,
- 0x26190, 0x261c8,
+ 0x26190, 0x261a0,
+ 0x261a8, 0x261b8,
+ 0x261c4, 0x261c8,
0x26200, 0x26318,
- 0x26400, 0x26528,
+ 0x26400, 0x264b4,
+ 0x264c0, 0x26528,
0x26540, 0x26614,
0x27000, 0x27040,
0x2704c, 0x27060,
@@ -868,51 +1105,120 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x27270, 0x27284,
0x272fc, 0x27388,
0x27400, 0x27404,
- 0x27500, 0x27518,
- 0x2752c, 0x2753c,
+ 0x27500, 0x27500,
+ 0x27510, 0x27518,
+ 0x2752c, 0x27530,
+ 0x2753c, 0x2753c,
0x27550, 0x27554,
0x27600, 0x27600,
- 0x27608, 0x27628,
- 0x27630, 0x2763c,
+ 0x27608, 0x2761c,
+ 0x27624, 0x27628,
+ 0x27630, 0x27634,
+ 0x2763c, 0x2763c,
0x27700, 0x2771c,
0x27780, 0x2778c,
- 0x27800, 0x27c38,
- 0x27c80, 0x27d7c,
+ 0x27800, 0x27818,
+ 0x27820, 0x27828,
+ 0x27830, 0x27848,
+ 0x27850, 0x27854,
+ 0x27860, 0x27868,
+ 0x27870, 0x27870,
+ 0x27878, 0x27898,
+ 0x278a0, 0x278a8,
+ 0x278b0, 0x278c8,
+ 0x278d0, 0x278d4,
+ 0x278e0, 0x278e8,
+ 0x278f0, 0x278f0,
+ 0x278f8, 0x27a18,
+ 0x27a20, 0x27a28,
+ 0x27a30, 0x27a48,
+ 0x27a50, 0x27a54,
+ 0x27a60, 0x27a68,
+ 0x27a70, 0x27a70,
+ 0x27a78, 0x27a98,
+ 0x27aa0, 0x27aa8,
+ 0x27ab0, 0x27ac8,
+ 0x27ad0, 0x27ad4,
+ 0x27ae0, 0x27ae8,
+ 0x27af0, 0x27af0,
+ 0x27af8, 0x27c18,
+ 0x27c20, 0x27c20,
+ 0x27c28, 0x27c30,
+ 0x27c38, 0x27c38,
+ 0x27c80, 0x27c98,
+ 0x27ca0, 0x27ca8,
+ 0x27cb0, 0x27cc8,
+ 0x27cd0, 0x27cd4,
+ 0x27ce0, 0x27ce8,
+ 0x27cf0, 0x27cf0,
+ 0x27cf8, 0x27d7c,
0x27e00, 0x27e04,
};
static const unsigned int t5_reg_ranges[] = {
- 0x1008, 0x1148,
- 0x1180, 0x11b4,
+ 0x1008, 0x10c0,
+ 0x10cc, 0x10f8,
+ 0x1100, 0x1100,
+ 0x110c, 0x1148,
+ 0x1180, 0x1184,
+ 0x1190, 0x1194,
+ 0x11a0, 0x11a4,
+ 0x11b0, 0x11b4,
0x11fc, 0x123c,
0x1280, 0x173c,
0x1800, 0x18fc,
0x3000, 0x3028,
- 0x3068, 0x30d8,
+ 0x3060, 0x30b0,
+ 0x30b8, 0x30d8,
0x30e0, 0x30fc,
0x3140, 0x357c,
0x35a8, 0x35cc,
0x35ec, 0x35ec,
0x3600, 0x5624,
- 0x56cc, 0x575c,
+ 0x56cc, 0x56ec,
+ 0x56f4, 0x5720,
+ 0x5728, 0x575c,
0x580c, 0x5814,
- 0x5890, 0x58bc,
- 0x5940, 0x59dc,
+ 0x5890, 0x589c,
+ 0x58a4, 0x58ac,
+ 0x58b8, 0x58bc,
+ 0x5940, 0x59c8,
+ 0x59d0, 0x59dc,
0x59fc, 0x5a18,
- 0x5a60, 0x5a9c,
+ 0x5a60, 0x5a70,
+ 0x5a80, 0x5a9c,
0x5b94, 0x5bfc,
- 0x6000, 0x6040,
- 0x6058, 0x614c,
+ 0x6000, 0x6020,
+ 0x6028, 0x6040,
+ 0x6058, 0x609c,
+ 0x60a8, 0x614c,
0x7700, 0x7798,
0x77c0, 0x78fc,
- 0x7b00, 0x7c54,
- 0x7d00, 0x7efc,
+ 0x7b00, 0x7b58,
+ 0x7b60, 0x7b84,
+ 0x7b8c, 0x7c54,
+ 0x7d00, 0x7d38,
+ 0x7d40, 0x7d80,
+ 0x7d8c, 0x7ddc,
+ 0x7de4, 0x7e04,
+ 0x7e10, 0x7e1c,
+ 0x7e24, 0x7e38,
+ 0x7e40, 0x7e44,
+ 0x7e4c, 0x7e78,
+ 0x7e80, 0x7edc,
+ 0x7ee8, 0x7efc,
0x8dc0, 0x8de0,
- 0x8df8, 0x8e84,
+ 0x8df8, 0x8e04,
+ 0x8e10, 0x8e84,
0x8ea0, 0x8f84,
- 0x8fc0, 0x90f8,
- 0x9400, 0x9470,
- 0x9600, 0x96f4,
+ 0x8fc0, 0x9058,
+ 0x9060, 0x9060,
+ 0x9068, 0x90f8,
+ 0x9400, 0x9408,
+ 0x9410, 0x9470,
+ 0x9600, 0x9600,
+ 0x9608, 0x9638,
+ 0x9640, 0x96f4,
0x9800, 0x9808,
0x9820, 0x983c,
0x9850, 0x9864,
@@ -924,103 +1230,143 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x9e80, 0x9eec,
0x9f00, 0x9f6c,
0x9f80, 0xa020,
- 0xd004, 0xd03c,
+ 0xd004, 0xd004,
+ 0xd010, 0xd03c,
0xdfc0, 0xdfe0,
- 0xe000, 0x11088,
- 0x1109c, 0x11110,
- 0x11118, 0x1117c,
+ 0xe000, 0x1106c,
+ 0x11074, 0x11088,
+ 0x1109c, 0x1117c,
0x11190, 0x11204,
0x19040, 0x1906c,
0x19078, 0x19080,
- 0x1908c, 0x19124,
- 0x19150, 0x191b0,
+ 0x1908c, 0x190e8,
+ 0x190f0, 0x190f8,
+ 0x19100, 0x19110,
+ 0x19120, 0x19124,
+ 0x19150, 0x19194,
+ 0x1919c, 0x191b0,
0x191d0, 0x191e8,
0x19238, 0x19290,
- 0x193f8, 0x19474,
+ 0x193f8, 0x19428,
+ 0x19430, 0x19444,
+ 0x1944c, 0x1946c,
+ 0x19474, 0x19474,
0x19490, 0x194cc,
0x194f0, 0x194f8,
- 0x19c00, 0x19c60,
- 0x19c94, 0x19e10,
- 0x19e50, 0x19f34,
+ 0x19c00, 0x19c08,
+ 0x19c10, 0x19c60,
+ 0x19c94, 0x19ce4,
+ 0x19cf0, 0x19d40,
+ 0x19d50, 0x19d94,
+ 0x19da0, 0x19de8,
+ 0x19df0, 0x19e10,
+ 0x19e50, 0x19e90,
+ 0x19ea0, 0x19f24,
+ 0x19f34, 0x19f34,
0x19f40, 0x19f50,
- 0x19f90, 0x19fe4,
- 0x1a000, 0x1a06c,
- 0x1a0b0, 0x1a120,
- 0x1a128, 0x1a138,
+ 0x19f90, 0x19fb4,
+ 0x19fc4, 0x19fe4,
+ 0x1a000, 0x1a004,
+ 0x1a010, 0x1a06c,
+ 0x1a0b0, 0x1a0e4,
+ 0x1a0ec, 0x1a0f8,
+ 0x1a100, 0x1a108,
+ 0x1a114, 0x1a120,
+ 0x1a128, 0x1a130,
+ 0x1a138, 0x1a138,
0x1a190, 0x1a1c4,
0x1a1fc, 0x1a1fc,
0x1e008, 0x1e00c,
- 0x1e040, 0x1e04c,
+ 0x1e040, 0x1e044,
+ 0x1e04c, 0x1e04c,
0x1e284, 0x1e290,
0x1e2c0, 0x1e2c0,
0x1e2e0, 0x1e2e0,
0x1e300, 0x1e384,
0x1e3c0, 0x1e3c8,
0x1e408, 0x1e40c,
- 0x1e440, 0x1e44c,
+ 0x1e440, 0x1e444,
+ 0x1e44c, 0x1e44c,
0x1e684, 0x1e690,
0x1e6c0, 0x1e6c0,
0x1e6e0, 0x1e6e0,
0x1e700, 0x1e784,
0x1e7c0, 0x1e7c8,
0x1e808, 0x1e80c,
- 0x1e840, 0x1e84c,
+ 0x1e840, 0x1e844,
+ 0x1e84c, 0x1e84c,
0x1ea84, 0x1ea90,
0x1eac0, 0x1eac0,
0x1eae0, 0x1eae0,
0x1eb00, 0x1eb84,
0x1ebc0, 0x1ebc8,
0x1ec08, 0x1ec0c,
- 0x1ec40, 0x1ec4c,
+ 0x1ec40, 0x1ec44,
+ 0x1ec4c, 0x1ec4c,
0x1ee84, 0x1ee90,
0x1eec0, 0x1eec0,
0x1eee0, 0x1eee0,
0x1ef00, 0x1ef84,
0x1efc0, 0x1efc8,
0x1f008, 0x1f00c,
- 0x1f040, 0x1f04c,
+ 0x1f040, 0x1f044,
+ 0x1f04c, 0x1f04c,
0x1f284, 0x1f290,
0x1f2c0, 0x1f2c0,
0x1f2e0, 0x1f2e0,
0x1f300, 0x1f384,
0x1f3c0, 0x1f3c8,
0x1f408, 0x1f40c,
- 0x1f440, 0x1f44c,
+ 0x1f440, 0x1f444,
+ 0x1f44c, 0x1f44c,
0x1f684, 0x1f690,
0x1f6c0, 0x1f6c0,
0x1f6e0, 0x1f6e0,
0x1f700, 0x1f784,
0x1f7c0, 0x1f7c8,
0x1f808, 0x1f80c,
- 0x1f840, 0x1f84c,
+ 0x1f840, 0x1f844,
+ 0x1f84c, 0x1f84c,
0x1fa84, 0x1fa90,
0x1fac0, 0x1fac0,
0x1fae0, 0x1fae0,
0x1fb00, 0x1fb84,
0x1fbc0, 0x1fbc8,
0x1fc08, 0x1fc0c,
- 0x1fc40, 0x1fc4c,
+ 0x1fc40, 0x1fc44,
+ 0x1fc4c, 0x1fc4c,
0x1fe84, 0x1fe90,
0x1fec0, 0x1fec0,
0x1fee0, 0x1fee0,
0x1ff00, 0x1ff84,
0x1ffc0, 0x1ffc8,
0x30000, 0x30030,
+ 0x30038, 0x30038,
+ 0x30040, 0x30040,
0x30100, 0x30144,
- 0x30190, 0x301d0,
+ 0x30190, 0x301a0,
+ 0x301a8, 0x301b8,
+ 0x301c4, 0x301c8,
+ 0x301d0, 0x301d0,
0x30200, 0x30318,
- 0x30400, 0x3052c,
+ 0x30400, 0x304b4,
+ 0x304c0, 0x3052c,
0x30540, 0x3061c,
- 0x30800, 0x30834,
+ 0x30800, 0x30828,
+ 0x30834, 0x30834,
0x308c0, 0x30908,
0x30910, 0x309ac,
- 0x30a00, 0x30a2c,
+ 0x30a00, 0x30a14,
+ 0x30a1c, 0x30a2c,
0x30a44, 0x30a50,
- 0x30a74, 0x30c24,
+ 0x30a74, 0x30a74,
+ 0x30a7c, 0x30afc,
+ 0x30b08, 0x30c24,
0x30d00, 0x30d00,
0x30d08, 0x30d14,
0x30d1c, 0x30d20,
- 0x30d3c, 0x30d50,
+ 0x30d3c, 0x30d3c,
+ 0x30d48, 0x30d50,
0x31200, 0x3120c,
0x31220, 0x31220,
0x31240, 0x31240,
@@ -1040,27 +1386,65 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x322c8, 0x322fc,
0x32600, 0x32630,
0x32a00, 0x32abc,
- 0x32b00, 0x32b70,
- 0x33000, 0x33048,
- 0x33060, 0x3309c,
- 0x330f0, 0x33148,
- 0x33160, 0x3319c,
- 0x331f0, 0x332e4,
- 0x332f8, 0x333e4,
- 0x333f8, 0x33448,
- 0x33460, 0x3349c,
- 0x334f0, 0x33548,
- 0x33560, 0x3359c,
- 0x335f0, 0x336e4,
- 0x336f8, 0x337e4,
+ 0x32b00, 0x32b10,
+ 0x32b20, 0x32b30,
+ 0x32b40, 0x32b50,
+ 0x32b60, 0x32b70,
+ 0x33000, 0x33028,
+ 0x33030, 0x33048,
+ 0x33060, 0x33068,
+ 0x33070, 0x3309c,
+ 0x330f0, 0x33128,
+ 0x33130, 0x33148,
+ 0x33160, 0x33168,
+ 0x33170, 0x3319c,
+ 0x331f0, 0x33238,
+ 0x33240, 0x33240,
+ 0x33248, 0x33250,
+ 0x3325c, 0x33264,
+ 0x33270, 0x332b8,
+ 0x332c0, 0x332e4,
+ 0x332f8, 0x33338,
+ 0x33340, 0x33340,
+ 0x33348, 0x33350,
+ 0x3335c, 0x33364,
+ 0x33370, 0x333b8,
+ 0x333c0, 0x333e4,
+ 0x333f8, 0x33428,
+ 0x33430, 0x33448,
+ 0x33460, 0x33468,
+ 0x33470, 0x3349c,
+ 0x334f0, 0x33528,
+ 0x33530, 0x33548,
+ 0x33560, 0x33568,
+ 0x33570, 0x3359c,
+ 0x335f0, 0x33638,
+ 0x33640, 0x33640,
+ 0x33648, 0x33650,
+ 0x3365c, 0x33664,
+ 0x33670, 0x336b8,
+ 0x336c0, 0x336e4,
+ 0x336f8, 0x33738,
+ 0x33740, 0x33740,
+ 0x33748, 0x33750,
+ 0x3375c, 0x33764,
+ 0x33770, 0x337b8,
+ 0x337c0, 0x337e4,
0x337f8, 0x337fc,
0x33814, 0x33814,
0x3382c, 0x3382c,
0x33880, 0x3388c,
0x338e8, 0x338ec,
- 0x33900, 0x33948,
- 0x33960, 0x3399c,
- 0x339f0, 0x33ae4,
+ 0x33900, 0x33928,
+ 0x33930, 0x33948,
+ 0x33960, 0x33968,
+ 0x33970, 0x3399c,
+ 0x339f0, 0x33a38,
+ 0x33a40, 0x33a40,
+ 0x33a48, 0x33a50,
+ 0x33a5c, 0x33a64,
+ 0x33a70, 0x33ab8,
+ 0x33ac0, 0x33ae4,
0x33af8, 0x33b10,
0x33b28, 0x33b28,
0x33b3c, 0x33b50,
@@ -1069,21 +1453,32 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x33c3c, 0x33c50,
0x33cf0, 0x33cfc,
0x34000, 0x34030,
+ 0x34038, 0x34038,
+ 0x34040, 0x34040,
0x34100, 0x34144,
- 0x34190, 0x341d0,
+ 0x34190, 0x341a0,
+ 0x341a8, 0x341b8,
+ 0x341c4, 0x341c8,
+ 0x341d0, 0x341d0,
0x34200, 0x34318,
- 0x34400, 0x3452c,
+ 0x34400, 0x344b4,
+ 0x344c0, 0x3452c,
0x34540, 0x3461c,
- 0x34800, 0x34834,
+ 0x34800, 0x34828,
+ 0x34834, 0x34834,
0x348c0, 0x34908,
0x34910, 0x349ac,
- 0x34a00, 0x34a2c,
+ 0x34a00, 0x34a14,
+ 0x34a1c, 0x34a2c,
0x34a44, 0x34a50,
- 0x34a74, 0x34c24,
+ 0x34a74, 0x34a74,
+ 0x34a7c, 0x34afc,
+ 0x34b08, 0x34c24,
0x34d00, 0x34d00,
0x34d08, 0x34d14,
0x34d1c, 0x34d20,
- 0x34d3c, 0x34d50,
+ 0x34d3c, 0x34d3c,
+ 0x34d48, 0x34d50,
0x35200, 0x3520c,
0x35220, 0x35220,
0x35240, 0x35240,
@@ -1103,27 +1498,65 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x362c8, 0x362fc,
0x36600, 0x36630,
0x36a00, 0x36abc,
- 0x36b00, 0x36b70,
- 0x37000, 0x37048,
- 0x37060, 0x3709c,
- 0x370f0, 0x37148,
- 0x37160, 0x3719c,
- 0x371f0, 0x372e4,
- 0x372f8, 0x373e4,
- 0x373f8, 0x37448,
- 0x37460, 0x3749c,
- 0x374f0, 0x37548,
- 0x37560, 0x3759c,
- 0x375f0, 0x376e4,
- 0x376f8, 0x377e4,
+ 0x36b00, 0x36b10,
+ 0x36b20, 0x36b30,
+ 0x36b40, 0x36b50,
+ 0x36b60, 0x36b70,
+ 0x37000, 0x37028,
+ 0x37030, 0x37048,
+ 0x37060, 0x37068,
+ 0x37070, 0x3709c,
+ 0x370f0, 0x37128,
+ 0x37130, 0x37148,
+ 0x37160, 0x37168,
+ 0x37170, 0x3719c,
+ 0x371f0, 0x37238,
+ 0x37240, 0x37240,
+ 0x37248, 0x37250,
+ 0x3725c, 0x37264,
+ 0x37270, 0x372b8,
+ 0x372c0, 0x372e4,
+ 0x372f8, 0x37338,
+ 0x37340, 0x37340,
+ 0x37348, 0x37350,
+ 0x3735c, 0x37364,
+ 0x37370, 0x373b8,
+ 0x373c0, 0x373e4,
+ 0x373f8, 0x37428,
+ 0x37430, 0x37448,
+ 0x37460, 0x37468,
+ 0x37470, 0x3749c,
+ 0x374f0, 0x37528,
+ 0x37530, 0x37548,
+ 0x37560, 0x37568,
+ 0x37570, 0x3759c,
+ 0x375f0, 0x37638,
+ 0x37640, 0x37640,
+ 0x37648, 0x37650,
+ 0x3765c, 0x37664,
+ 0x37670, 0x376b8,
+ 0x376c0, 0x376e4,
+ 0x376f8, 0x37738,
+ 0x37740, 0x37740,
+ 0x37748, 0x37750,
+ 0x3775c, 0x37764,
+ 0x37770, 0x377b8,
+ 0x377c0, 0x377e4,
0x377f8, 0x377fc,
0x37814, 0x37814,
0x3782c, 0x3782c,
0x37880, 0x3788c,
0x378e8, 0x378ec,
- 0x37900, 0x37948,
- 0x37960, 0x3799c,
- 0x379f0, 0x37ae4,
+ 0x37900, 0x37928,
+ 0x37930, 0x37948,
+ 0x37960, 0x37968,
+ 0x37970, 0x3799c,
+ 0x379f0, 0x37a38,
+ 0x37a40, 0x37a40,
+ 0x37a48, 0x37a50,
+ 0x37a5c, 0x37a64,
+ 0x37a70, 0x37ab8,
+ 0x37ac0, 0x37ae4,
0x37af8, 0x37b10,
0x37b28, 0x37b28,
0x37b3c, 0x37b50,
@@ -1132,21 +1565,32 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x37c3c, 0x37c50,
0x37cf0, 0x37cfc,
0x38000, 0x38030,
+ 0x38038, 0x38038,
+ 0x38040, 0x38040,
0x38100, 0x38144,
- 0x38190, 0x381d0,
+ 0x38190, 0x381a0,
+ 0x381a8, 0x381b8,
+ 0x381c4, 0x381c8,
+ 0x381d0, 0x381d0,
0x38200, 0x38318,
- 0x38400, 0x3852c,
+ 0x38400, 0x384b4,
+ 0x384c0, 0x3852c,
0x38540, 0x3861c,
- 0x38800, 0x38834,
+ 0x38800, 0x38828,
+ 0x38834, 0x38834,
0x388c0, 0x38908,
0x38910, 0x389ac,
- 0x38a00, 0x38a2c,
+ 0x38a00, 0x38a14,
+ 0x38a1c, 0x38a2c,
0x38a44, 0x38a50,
- 0x38a74, 0x38c24,
+ 0x38a74, 0x38a74,
+ 0x38a7c, 0x38afc,
+ 0x38b08, 0x38c24,
0x38d00, 0x38d00,
0x38d08, 0x38d14,
0x38d1c, 0x38d20,
- 0x38d3c, 0x38d50,
+ 0x38d3c, 0x38d3c,
+ 0x38d48, 0x38d50,
0x39200, 0x3920c,
0x39220, 0x39220,
0x39240, 0x39240,
@@ -1166,27 +1610,65 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x3a2c8, 0x3a2fc,
0x3a600, 0x3a630,
0x3aa00, 0x3aabc,
- 0x3ab00, 0x3ab70,
- 0x3b000, 0x3b048,
- 0x3b060, 0x3b09c,
- 0x3b0f0, 0x3b148,
- 0x3b160, 0x3b19c,
- 0x3b1f0, 0x3b2e4,
- 0x3b2f8, 0x3b3e4,
- 0x3b3f8, 0x3b448,
- 0x3b460, 0x3b49c,
- 0x3b4f0, 0x3b548,
- 0x3b560, 0x3b59c,
- 0x3b5f0, 0x3b6e4,
- 0x3b6f8, 0x3b7e4,
+ 0x3ab00, 0x3ab10,
+ 0x3ab20, 0x3ab30,
+ 0x3ab40, 0x3ab50,
+ 0x3ab60, 0x3ab70,
+ 0x3b000, 0x3b028,
+ 0x3b030, 0x3b048,
+ 0x3b060, 0x3b068,
+ 0x3b070, 0x3b09c,
+ 0x3b0f0, 0x3b128,
+ 0x3b130, 0x3b148,
+ 0x3b160, 0x3b168,
+ 0x3b170, 0x3b19c,
+ 0x3b1f0, 0x3b238,
+ 0x3b240, 0x3b240,
+ 0x3b248, 0x3b250,
+ 0x3b25c, 0x3b264,
+ 0x3b270, 0x3b2b8,
+ 0x3b2c0, 0x3b2e4,
+ 0x3b2f8, 0x3b338,
+ 0x3b340, 0x3b340,
+ 0x3b348, 0x3b350,
+ 0x3b35c, 0x3b364,
+ 0x3b370, 0x3b3b8,
+ 0x3b3c0, 0x3b3e4,
+ 0x3b3f8, 0x3b428,
+ 0x3b430, 0x3b448,
+ 0x3b460, 0x3b468,
+ 0x3b470, 0x3b49c,
+ 0x3b4f0, 0x3b528,
+ 0x3b530, 0x3b548,
+ 0x3b560, 0x3b568,
+ 0x3b570, 0x3b59c,
+ 0x3b5f0, 0x3b638,
+ 0x3b640, 0x3b640,
+ 0x3b648, 0x3b650,
+ 0x3b65c, 0x3b664,
+ 0x3b670, 0x3b6b8,
+ 0x3b6c0, 0x3b6e4,
+ 0x3b6f8, 0x3b738,
+ 0x3b740, 0x3b740,
+ 0x3b748, 0x3b750,
+ 0x3b75c, 0x3b764,
+ 0x3b770, 0x3b7b8,
+ 0x3b7c0, 0x3b7e4,
0x3b7f8, 0x3b7fc,
0x3b814, 0x3b814,
0x3b82c, 0x3b82c,
0x3b880, 0x3b88c,
0x3b8e8, 0x3b8ec,
- 0x3b900, 0x3b948,
- 0x3b960, 0x3b99c,
- 0x3b9f0, 0x3bae4,
+ 0x3b900, 0x3b928,
+ 0x3b930, 0x3b948,
+ 0x3b960, 0x3b968,
+ 0x3b970, 0x3b99c,
+ 0x3b9f0, 0x3ba38,
+ 0x3ba40, 0x3ba40,
+ 0x3ba48, 0x3ba50,
+ 0x3ba5c, 0x3ba64,
+ 0x3ba70, 0x3bab8,
+ 0x3bac0, 0x3bae4,
0x3baf8, 0x3bb10,
0x3bb28, 0x3bb28,
0x3bb3c, 0x3bb50,
@@ -1195,21 +1677,32 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x3bc3c, 0x3bc50,
0x3bcf0, 0x3bcfc,
0x3c000, 0x3c030,
+ 0x3c038, 0x3c038,
+ 0x3c040, 0x3c040,
0x3c100, 0x3c144,
- 0x3c190, 0x3c1d0,
+ 0x3c190, 0x3c1a0,
+ 0x3c1a8, 0x3c1b8,
+ 0x3c1c4, 0x3c1c8,
+ 0x3c1d0, 0x3c1d0,
0x3c200, 0x3c318,
- 0x3c400, 0x3c52c,
+ 0x3c400, 0x3c4b4,
+ 0x3c4c0, 0x3c52c,
0x3c540, 0x3c61c,
- 0x3c800, 0x3c834,
+ 0x3c800, 0x3c828,
+ 0x3c834, 0x3c834,
0x3c8c0, 0x3c908,
0x3c910, 0x3c9ac,
- 0x3ca00, 0x3ca2c,
+ 0x3ca00, 0x3ca14,
+ 0x3ca1c, 0x3ca2c,
0x3ca44, 0x3ca50,
- 0x3ca74, 0x3cc24,
+ 0x3ca74, 0x3ca74,
+ 0x3ca7c, 0x3cafc,
+ 0x3cb08, 0x3cc24,
0x3cd00, 0x3cd00,
0x3cd08, 0x3cd14,
0x3cd1c, 0x3cd20,
- 0x3cd3c, 0x3cd50,
+ 0x3cd3c, 0x3cd3c,
+ 0x3cd48, 0x3cd50,
0x3d200, 0x3d20c,
0x3d220, 0x3d220,
0x3d240, 0x3d240,
@@ -1229,27 +1722,65 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x3e2c8, 0x3e2fc,
0x3e600, 0x3e630,
0x3ea00, 0x3eabc,
- 0x3eb00, 0x3eb70,
- 0x3f000, 0x3f048,
- 0x3f060, 0x3f09c,
- 0x3f0f0, 0x3f148,
- 0x3f160, 0x3f19c,
- 0x3f1f0, 0x3f2e4,
- 0x3f2f8, 0x3f3e4,
- 0x3f3f8, 0x3f448,
- 0x3f460, 0x3f49c,
- 0x3f4f0, 0x3f548,
- 0x3f560, 0x3f59c,
- 0x3f5f0, 0x3f6e4,
- 0x3f6f8, 0x3f7e4,
+ 0x3eb00, 0x3eb10,
+ 0x3eb20, 0x3eb30,
+ 0x3eb40, 0x3eb50,
+ 0x3eb60, 0x3eb70,
+ 0x3f000, 0x3f028,
+ 0x3f030, 0x3f048,
+ 0x3f060, 0x3f068,
+ 0x3f070, 0x3f09c,
+ 0x3f0f0, 0x3f128,
+ 0x3f130, 0x3f148,
+ 0x3f160, 0x3f168,
+ 0x3f170, 0x3f19c,
+ 0x3f1f0, 0x3f238,
+ 0x3f240, 0x3f240,
+ 0x3f248, 0x3f250,
+ 0x3f25c, 0x3f264,
+ 0x3f270, 0x3f2b8,
+ 0x3f2c0, 0x3f2e4,
+ 0x3f2f8, 0x3f338,
+ 0x3f340, 0x3f340,
+ 0x3f348, 0x3f350,
+ 0x3f35c, 0x3f364,
+ 0x3f370, 0x3f3b8,
+ 0x3f3c0, 0x3f3e4,
+ 0x3f3f8, 0x3f428,
+ 0x3f430, 0x3f448,
+ 0x3f460, 0x3f468,
+ 0x3f470, 0x3f49c,
+ 0x3f4f0, 0x3f528,
+ 0x3f530, 0x3f548,
+ 0x3f560, 0x3f568,
+ 0x3f570, 0x3f59c,
+ 0x3f5f0, 0x3f638,
+ 0x3f640, 0x3f640,
+ 0x3f648, 0x3f650,
+ 0x3f65c, 0x3f664,
+ 0x3f670, 0x3f6b8,
+ 0x3f6c0, 0x3f6e4,
+ 0x3f6f8, 0x3f738,
+ 0x3f740, 0x3f740,
+ 0x3f748, 0x3f750,
+ 0x3f75c, 0x3f764,
+ 0x3f770, 0x3f7b8,
+ 0x3f7c0, 0x3f7e4,
0x3f7f8, 0x3f7fc,
0x3f814, 0x3f814,
0x3f82c, 0x3f82c,
0x3f880, 0x3f88c,
0x3f8e8, 0x3f8ec,
- 0x3f900, 0x3f948,
- 0x3f960, 0x3f99c,
- 0x3f9f0, 0x3fae4,
+ 0x3f900, 0x3f928,
+ 0x3f930, 0x3f948,
+ 0x3f960, 0x3f968,
+ 0x3f970, 0x3f99c,
+ 0x3f9f0, 0x3fa38,
+ 0x3fa40, 0x3fa40,
+ 0x3fa48, 0x3fa50,
+ 0x3fa5c, 0x3fa64,
+ 0x3fa70, 0x3fab8,
+ 0x3fac0, 0x3fae4,
0x3faf8, 0x3fb10,
0x3fb28, 0x3fb28,
0x3fb3c, 0x3fb50,
@@ -1258,107 +1789,224 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x3fc3c, 0x3fc50,
0x3fcf0, 0x3fcfc,
0x40000, 0x4000c,
- 0x40040, 0x40068,
- 0x4007c, 0x40144,
+ 0x40040, 0x40050,
+ 0x40060, 0x40068,
+ 0x4007c, 0x4008c,
+ 0x40094, 0x400b0,
+ 0x400c0, 0x40144,
0x40180, 0x4018c,
- 0x40200, 0x40298,
- 0x402ac, 0x4033c,
+ 0x40200, 0x40254,
+ 0x40260, 0x40264,
+ 0x40270, 0x40288,
+ 0x40290, 0x40298,
+ 0x402ac, 0x402c8,
+ 0x402d0, 0x402e0,
+ 0x402f0, 0x402f0,
+ 0x40300, 0x4033c,
0x403f8, 0x403fc,
0x41304, 0x413c4,
- 0x41400, 0x4141c,
+ 0x41400, 0x4140c,
+ 0x41414, 0x4141c,
0x41480, 0x414d0,
- 0x44000, 0x44078,
- 0x440c0, 0x44278,
- 0x442c0, 0x44478,
- 0x444c0, 0x44678,
- 0x446c0, 0x44878,
- 0x448c0, 0x449fc,
- 0x45000, 0x45068,
+ 0x44000, 0x44054,
+ 0x4405c, 0x44078,
+ 0x440c0, 0x44174,
+ 0x44180, 0x441ac,
+ 0x441b4, 0x441b8,
+ 0x441c0, 0x44254,
+ 0x4425c, 0x44278,
+ 0x442c0, 0x44374,
+ 0x44380, 0x443ac,
+ 0x443b4, 0x443b8,
+ 0x443c0, 0x44454,
+ 0x4445c, 0x44478,
+ 0x444c0, 0x44574,
+ 0x44580, 0x445ac,
+ 0x445b4, 0x445b8,
+ 0x445c0, 0x44654,
+ 0x4465c, 0x44678,
+ 0x446c0, 0x44774,
+ 0x44780, 0x447ac,
+ 0x447b4, 0x447b8,
+ 0x447c0, 0x44854,
+ 0x4485c, 0x44878,
+ 0x448c0, 0x44974,
+ 0x44980, 0x449ac,
+ 0x449b4, 0x449b8,
+ 0x449c0, 0x449fc,
+ 0x45000, 0x45004,
+ 0x45010, 0x45030,
+ 0x45040, 0x45060,
+ 0x45068, 0x45068,
0x45080, 0x45084,
0x450a0, 0x450b0,
- 0x45200, 0x45268,
+ 0x45200, 0x45204,
+ 0x45210, 0x45230,
+ 0x45240, 0x45260,
+ 0x45268, 0x45268,
0x45280, 0x45284,
0x452a0, 0x452b0,
0x460c0, 0x460e4,
- 0x47000, 0x4708c,
+ 0x47000, 0x4703c,
+ 0x47044, 0x4708c,
0x47200, 0x47250,
- 0x47400, 0x47420,
+ 0x47400, 0x47408,
+ 0x47414, 0x47420,
0x47600, 0x47618,
0x47800, 0x47814,
0x48000, 0x4800c,
- 0x48040, 0x48068,
- 0x4807c, 0x48144,
+ 0x48040, 0x48050,
+ 0x48060, 0x48068,
+ 0x4807c, 0x4808c,
+ 0x48094, 0x480b0,
+ 0x480c0, 0x48144,
0x48180, 0x4818c,
- 0x48200, 0x48298,
- 0x482ac, 0x4833c,
+ 0x48200, 0x48254,
+ 0x48260, 0x48264,
+ 0x48270, 0x48288,
+ 0x48290, 0x48298,
+ 0x482ac, 0x482c8,
+ 0x482d0, 0x482e0,
+ 0x482f0, 0x482f0,
+ 0x48300, 0x4833c,
0x483f8, 0x483fc,
0x49304, 0x493c4,
- 0x49400, 0x4941c,
+ 0x49400, 0x4940c,
+ 0x49414, 0x4941c,
0x49480, 0x494d0,
- 0x4c000, 0x4c078,
- 0x4c0c0, 0x4c278,
- 0x4c2c0, 0x4c478,
- 0x4c4c0, 0x4c678,
- 0x4c6c0, 0x4c878,
- 0x4c8c0, 0x4c9fc,
- 0x4d000, 0x4d068,
+ 0x4c000, 0x4c054,
+ 0x4c05c, 0x4c078,
+ 0x4c0c0, 0x4c174,
+ 0x4c180, 0x4c1ac,
+ 0x4c1b4, 0x4c1b8,
+ 0x4c1c0, 0x4c254,
+ 0x4c25c, 0x4c278,
+ 0x4c2c0, 0x4c374,
+ 0x4c380, 0x4c3ac,
+ 0x4c3b4, 0x4c3b8,
+ 0x4c3c0, 0x4c454,
+ 0x4c45c, 0x4c478,
+ 0x4c4c0, 0x4c574,
+ 0x4c580, 0x4c5ac,
+ 0x4c5b4, 0x4c5b8,
+ 0x4c5c0, 0x4c654,
+ 0x4c65c, 0x4c678,
+ 0x4c6c0, 0x4c774,
+ 0x4c780, 0x4c7ac,
+ 0x4c7b4, 0x4c7b8,
+ 0x4c7c0, 0x4c854,
+ 0x4c85c, 0x4c878,
+ 0x4c8c0, 0x4c974,
+ 0x4c980, 0x4c9ac,
+ 0x4c9b4, 0x4c9b8,
+ 0x4c9c0, 0x4c9fc,
+ 0x4d000, 0x4d004,
+ 0x4d010, 0x4d030,
+ 0x4d040, 0x4d060,
+ 0x4d068, 0x4d068,
0x4d080, 0x4d084,
0x4d0a0, 0x4d0b0,
- 0x4d200, 0x4d268,
+ 0x4d200, 0x4d204,
+ 0x4d210, 0x4d230,
+ 0x4d240, 0x4d260,
+ 0x4d268, 0x4d268,
0x4d280, 0x4d284,
0x4d2a0, 0x4d2b0,
0x4e0c0, 0x4e0e4,
- 0x4f000, 0x4f08c,
+ 0x4f000, 0x4f03c,
+ 0x4f044, 0x4f08c,
0x4f200, 0x4f250,
- 0x4f400, 0x4f420,
+ 0x4f400, 0x4f408,
+ 0x4f414, 0x4f420,
0x4f600, 0x4f618,
0x4f800, 0x4f814,
- 0x50000, 0x500cc,
+ 0x50000, 0x50084,
+ 0x50090, 0x500cc,
0x50400, 0x50400,
- 0x50800, 0x508cc,
+ 0x50800, 0x50884,
+ 0x50890, 0x508cc,
0x50c00, 0x50c00,
0x51000, 0x5101c,
0x51300, 0x51308,
};
static const unsigned int t6_reg_ranges[] = {
- 0x1008, 0x114c,
- 0x1180, 0x11b4,
- 0x11fc, 0x1250,
+ 0x1008, 0x101c,
+ 0x1024, 0x10a8,
+ 0x10b4, 0x10f8,
+ 0x1100, 0x1114,
+ 0x111c, 0x112c,
+ 0x1138, 0x113c,
+ 0x1144, 0x114c,
+ 0x1180, 0x1184,
+ 0x1190, 0x1194,
+ 0x11a0, 0x11a4,
+ 0x11b0, 0x11b4,
+ 0x11fc, 0x1254,
0x1280, 0x133c,
0x1800, 0x18fc,
0x3000, 0x302c,
- 0x3060, 0x30d8,
+ 0x3060, 0x30b0,
+ 0x30b8, 0x30d8,
0x30e0, 0x30fc,
0x3140, 0x357c,
0x35a8, 0x35cc,
0x35ec, 0x35ec,
0x3600, 0x5624,
- 0x56cc, 0x575c,
+ 0x56cc, 0x56ec,
+ 0x56f4, 0x5720,
+ 0x5728, 0x575c,
0x580c, 0x5814,
- 0x5890, 0x58bc,
+ 0x5890, 0x589c,
+ 0x58a4, 0x58ac,
+ 0x58b8, 0x58bc,
0x5940, 0x595c,
0x5980, 0x598c,
- 0x59b0, 0x59dc,
+ 0x59b0, 0x59c8,
+ 0x59d0, 0x59dc,
0x59fc, 0x5a18,
0x5a60, 0x5a6c,
- 0x5a80, 0x5a9c,
+ 0x5a80, 0x5a8c,
+ 0x5a94, 0x5a9c,
0x5b94, 0x5bfc,
- 0x5c10, 0x5ec0,
- 0x5ec8, 0x5ec8,
- 0x6000, 0x6040,
- 0x6058, 0x6154,
+ 0x5c10, 0x5e48,
+ 0x5e50, 0x5e94,
+ 0x5ea0, 0x5eb0,
+ 0x5ec0, 0x5ec0,
+ 0x5ec8, 0x5ecc,
+ 0x6000, 0x6020,
+ 0x6028, 0x6040,
+ 0x6058, 0x609c,
+ 0x60a8, 0x619c,
0x7700, 0x7798,
0x77c0, 0x7880,
0x78cc, 0x78fc,
- 0x7b00, 0x7c54,
- 0x7d00, 0x7efc,
- 0x8dc0, 0x8de0,
- 0x8df8, 0x8e84,
+ 0x7b00, 0x7b58,
+ 0x7b60, 0x7b84,
+ 0x7b8c, 0x7c54,
+ 0x7d00, 0x7d38,
+ 0x7d40, 0x7d84,
+ 0x7d8c, 0x7ddc,
+ 0x7de4, 0x7e04,
+ 0x7e10, 0x7e1c,
+ 0x7e24, 0x7e38,
+ 0x7e40, 0x7e44,
+ 0x7e4c, 0x7e78,
+ 0x7e80, 0x7edc,
+ 0x7ee8, 0x7efc,
+ 0x8dc0, 0x8de4,
+ 0x8df8, 0x8e04,
+ 0x8e10, 0x8e84,
0x8ea0, 0x8f88,
- 0x8fb8, 0x911c,
+ 0x8fb8, 0x9058,
+ 0x9060, 0x9060,
+ 0x9068, 0x90f8,
+ 0x9100, 0x9124,
0x9400, 0x9470,
- 0x9600, 0x971c,
+ 0x9600, 0x9600,
+ 0x9608, 0x9638,
+ 0x9640, 0x9704,
+ 0x9710, 0x971c,
0x9800, 0x9808,
0x9820, 0x983c,
0x9850, 0x9864,
@@ -1371,110 +2019,171 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x9f00, 0x9f6c,
0x9f80, 0xa020,
0xd004, 0xd03c,
+ 0xd100, 0xd118,
+ 0xd200, 0xd214,
+ 0xd220, 0xd234,
+ 0xd240, 0xd254,
+ 0xd260, 0xd274,
+ 0xd280, 0xd294,
+ 0xd2a0, 0xd2b4,
+ 0xd2c0, 0xd2d4,
+ 0xd2e0, 0xd2f4,
+ 0xd300, 0xd31c,
0xdfc0, 0xdfe0,
0xe000, 0xf008,
0x11000, 0x11014,
- 0x11048, 0x11110,
- 0x11118, 0x1117c,
- 0x11190, 0x11260,
+ 0x11048, 0x1106c,
+ 0x11074, 0x11088,
+ 0x11098, 0x11120,
+ 0x1112c, 0x1117c,
+ 0x11190, 0x112e0,
0x11300, 0x1130c,
- 0x12000, 0x1205c,
+ 0x12000, 0x1206c,
0x19040, 0x1906c,
0x19078, 0x19080,
- 0x1908c, 0x19124,
- 0x19150, 0x191b0,
+ 0x1908c, 0x190e8,
+ 0x190f0, 0x190f8,
+ 0x19100, 0x19110,
+ 0x19120, 0x19124,
+ 0x19150, 0x19194,
+ 0x1919c, 0x191b0,
0x191d0, 0x191e8,
- 0x19238, 0x192b8,
- 0x193f8, 0x19474,
+ 0x19238, 0x192b0,
+ 0x192bc, 0x192bc,
+ 0x19348, 0x1934c,
+ 0x193f8, 0x19418,
+ 0x19420, 0x19428,
+ 0x19430, 0x19444,
+ 0x1944c, 0x1946c,
+ 0x19474, 0x19474,
0x19490, 0x194cc,
0x194f0, 0x194f8,
- 0x19c00, 0x19c80,
- 0x19c94, 0x19cbc,
- 0x19ce4, 0x19d28,
+ 0x19c00, 0x19c48,
+ 0x19c50, 0x19c80,
+ 0x19c94, 0x19c98,
+ 0x19ca0, 0x19cbc,
+ 0x19ce4, 0x19ce4,
+ 0x19cf0, 0x19cf8,
+ 0x19d00, 0x19d28,
0x19d50, 0x19d78,
- 0x19d94, 0x19dc8,
+ 0x19d94, 0x19d98,
+ 0x19da0, 0x19dc8,
0x19df0, 0x19e10,
0x19e50, 0x19e6c,
- 0x19ea0, 0x19f34,
+ 0x19ea0, 0x19ebc,
+ 0x19ec4, 0x19ef4,
+ 0x19f04, 0x19f2c,
+ 0x19f34, 0x19f34,
0x19f40, 0x19f50,
0x19f90, 0x19fac,
- 0x19fc4, 0x19fe4,
- 0x1a000, 0x1a06c,
- 0x1a0b0, 0x1a120,
- 0x1a128, 0x1a138,
+ 0x19fc4, 0x19fc8,
+ 0x19fd0, 0x19fe4,
+ 0x1a000, 0x1a004,
+ 0x1a010, 0x1a06c,
+ 0x1a0b0, 0x1a0e4,
+ 0x1a0ec, 0x1a0f8,
+ 0x1a100, 0x1a108,
+ 0x1a114, 0x1a120,
+ 0x1a128, 0x1a130,
+ 0x1a138, 0x1a138,
0x1a190, 0x1a1c4,
0x1a1fc, 0x1a1fc,
0x1e008, 0x1e00c,
- 0x1e040, 0x1e04c,
+ 0x1e040, 0x1e044,
+ 0x1e04c, 0x1e04c,
0x1e284, 0x1e290,
0x1e2c0, 0x1e2c0,
0x1e2e0, 0x1e2e0,
0x1e300, 0x1e384,
0x1e3c0, 0x1e3c8,
0x1e408, 0x1e40c,
- 0x1e440, 0x1e44c,
+ 0x1e440, 0x1e444,
+ 0x1e44c, 0x1e44c,
0x1e684, 0x1e690,
0x1e6c0, 0x1e6c0,
0x1e6e0, 0x1e6e0,
0x1e700, 0x1e784,
0x1e7c0, 0x1e7c8,
0x1e808, 0x1e80c,
- 0x1e840, 0x1e84c,
+ 0x1e840, 0x1e844,
+ 0x1e84c, 0x1e84c,
0x1ea84, 0x1ea90,
0x1eac0, 0x1eac0,
0x1eae0, 0x1eae0,
0x1eb00, 0x1eb84,
0x1ebc0, 0x1ebc8,
0x1ec08, 0x1ec0c,
- 0x1ec40, 0x1ec4c,
+ 0x1ec40, 0x1ec44,
+ 0x1ec4c, 0x1ec4c,
0x1ee84, 0x1ee90,
0x1eec0, 0x1eec0,
0x1eee0, 0x1eee0,
0x1ef00, 0x1ef84,
0x1efc0, 0x1efc8,
0x1f008, 0x1f00c,
- 0x1f040, 0x1f04c,
+ 0x1f040, 0x1f044,
+ 0x1f04c, 0x1f04c,
0x1f284, 0x1f290,
0x1f2c0, 0x1f2c0,
0x1f2e0, 0x1f2e0,
0x1f300, 0x1f384,
0x1f3c0, 0x1f3c8,
0x1f408, 0x1f40c,
- 0x1f440, 0x1f44c,
+ 0x1f440, 0x1f444,
+ 0x1f44c, 0x1f44c,
0x1f684, 0x1f690,
0x1f6c0, 0x1f6c0,
0x1f6e0, 0x1f6e0,
0x1f700, 0x1f784,
0x1f7c0, 0x1f7c8,
0x1f808, 0x1f80c,
- 0x1f840, 0x1f84c,
+ 0x1f840, 0x1f844,
+ 0x1f84c, 0x1f84c,
0x1fa84, 0x1fa90,
0x1fac0, 0x1fac0,
0x1fae0, 0x1fae0,
0x1fb00, 0x1fb84,
0x1fbc0, 0x1fbc8,
0x1fc08, 0x1fc0c,
- 0x1fc40, 0x1fc4c,
+ 0x1fc40, 0x1fc44,
+ 0x1fc4c, 0x1fc4c,
0x1fe84, 0x1fe90,
0x1fec0, 0x1fec0,
0x1fee0, 0x1fee0,
0x1ff00, 0x1ff84,
0x1ffc0, 0x1ffc8,
- 0x30000, 0x30070,
- 0x30100, 0x3015c,
- 0x30190, 0x301d0,
- 0x30200, 0x30318,
- 0x30400, 0x3052c,
+ 0x30000, 0x30030,
+ 0x30038, 0x30038,
+ 0x30040, 0x30040,
+ 0x30048, 0x30048,
+ 0x30050, 0x30050,
+ 0x3005c, 0x30060,
+ 0x30068, 0x30068,
+ 0x30070, 0x30070,
+ 0x30100, 0x30168,
+ 0x30190, 0x301a0,
+ 0x301a8, 0x301b8,
+ 0x301c4, 0x301c8,
+ 0x301d0, 0x301d0,
+ 0x30200, 0x30320,
+ 0x30400, 0x304b4,
+ 0x304c0, 0x3052c,
0x30540, 0x3061c,
- 0x30800, 0x3088c,
+ 0x30800, 0x308a0,
0x308c0, 0x30908,
0x30910, 0x309b8,
0x30a00, 0x30a04,
- 0x30a0c, 0x30a2c,
+ 0x30a0c, 0x30a14,
+ 0x30a1c, 0x30a2c,
0x30a44, 0x30a50,
- 0x30a74, 0x30c24,
- 0x30d00, 0x30d3c,
- 0x30d44, 0x30d7c,
+ 0x30a74, 0x30a74,
+ 0x30a7c, 0x30afc,
+ 0x30b08, 0x30c24,
+ 0x30d00, 0x30d14,
+ 0x30d1c, 0x30d3c,
+ 0x30d44, 0x30d4c,
+ 0x30d54, 0x30d74,
+ 0x30d7c, 0x30d7c,
0x30de0, 0x30de0,
0x30e00, 0x30ed4,
0x30f00, 0x30fa4,
@@ -1503,7 +2212,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x31bb0, 0x31bb4,
0x31bc8, 0x31bd4,
0x32140, 0x3218c,
- 0x321f0, 0x32200,
+ 0x321f0, 0x321f4,
+ 0x32200, 0x32200,
0x32218, 0x32218,
0x32400, 0x32400,
0x32408, 0x3241c,
@@ -1512,47 +2222,108 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x326a8, 0x326a8,
0x326ec, 0x326ec,
0x32a00, 0x32abc,
- 0x32b00, 0x32b78,
+ 0x32b00, 0x32b38,
+ 0x32b40, 0x32b58,
+ 0x32b60, 0x32b78,
0x32c00, 0x32c00,
0x32c08, 0x32c3c,
0x32e00, 0x32e2c,
0x32f00, 0x32f2c,
- 0x33000, 0x330ac,
- 0x330c0, 0x331ac,
- 0x331c0, 0x332c4,
- 0x332e4, 0x333c4,
- 0x333e4, 0x334ac,
- 0x334c0, 0x335ac,
- 0x335c0, 0x336c4,
- 0x336e4, 0x337c4,
+ 0x33000, 0x3302c,
+ 0x33034, 0x33050,
+ 0x33058, 0x33058,
+ 0x33060, 0x3308c,
+ 0x3309c, 0x330ac,
+ 0x330c0, 0x330c0,
+ 0x330c8, 0x330d0,
+ 0x330d8, 0x330e0,
+ 0x330ec, 0x3312c,
+ 0x33134, 0x33150,
+ 0x33158, 0x33158,
+ 0x33160, 0x3318c,
+ 0x3319c, 0x331ac,
+ 0x331c0, 0x331c0,
+ 0x331c8, 0x331d0,
+ 0x331d8, 0x331e0,
+ 0x331ec, 0x33290,
+ 0x33298, 0x332c4,
+ 0x332e4, 0x33390,
+ 0x33398, 0x333c4,
+ 0x333e4, 0x3342c,
+ 0x33434, 0x33450,
+ 0x33458, 0x33458,
+ 0x33460, 0x3348c,
+ 0x3349c, 0x334ac,
+ 0x334c0, 0x334c0,
+ 0x334c8, 0x334d0,
+ 0x334d8, 0x334e0,
+ 0x334ec, 0x3352c,
+ 0x33534, 0x33550,
+ 0x33558, 0x33558,
+ 0x33560, 0x3358c,
+ 0x3359c, 0x335ac,
+ 0x335c0, 0x335c0,
+ 0x335c8, 0x335d0,
+ 0x335d8, 0x335e0,
+ 0x335ec, 0x33690,
+ 0x33698, 0x336c4,
+ 0x336e4, 0x33790,
+ 0x33798, 0x337c4,
0x337e4, 0x337fc,
0x33814, 0x33814,
0x33854, 0x33868,
0x33880, 0x3388c,
0x338c0, 0x338d0,
0x338e8, 0x338ec,
- 0x33900, 0x339ac,
- 0x339c0, 0x33ac4,
+ 0x33900, 0x3392c,
+ 0x33934, 0x33950,
+ 0x33958, 0x33958,
+ 0x33960, 0x3398c,
+ 0x3399c, 0x339ac,
+ 0x339c0, 0x339c0,
+ 0x339c8, 0x339d0,
+ 0x339d8, 0x339e0,
+ 0x339ec, 0x33a90,
+ 0x33a98, 0x33ac4,
0x33ae4, 0x33b10,
- 0x33b24, 0x33b50,
+ 0x33b24, 0x33b28,
+ 0x33b38, 0x33b50,
0x33bf0, 0x33c10,
- 0x33c24, 0x33c50,
+ 0x33c24, 0x33c28,
+ 0x33c38, 0x33c50,
0x33cf0, 0x33cfc,
- 0x34000, 0x34070,
- 0x34100, 0x3415c,
- 0x34190, 0x341d0,
- 0x34200, 0x34318,
- 0x34400, 0x3452c,
+ 0x34000, 0x34030,
+ 0x34038, 0x34038,
+ 0x34040, 0x34040,
+ 0x34048, 0x34048,
+ 0x34050, 0x34050,
+ 0x3405c, 0x34060,
+ 0x34068, 0x34068,
+ 0x34070, 0x34070,
+ 0x34100, 0x34168,
+ 0x34190, 0x341a0,
+ 0x341a8, 0x341b8,
+ 0x341c4, 0x341c8,
+ 0x341d0, 0x341d0,
+ 0x34200, 0x34320,
+ 0x34400, 0x344b4,
+ 0x344c0, 0x3452c,
0x34540, 0x3461c,
- 0x34800, 0x3488c,
+ 0x34800, 0x348a0,
0x348c0, 0x34908,
0x34910, 0x349b8,
0x34a00, 0x34a04,
- 0x34a0c, 0x34a2c,
+ 0x34a0c, 0x34a14,
+ 0x34a1c, 0x34a2c,
0x34a44, 0x34a50,
- 0x34a74, 0x34c24,
- 0x34d00, 0x34d3c,
- 0x34d44, 0x34d7c,
+ 0x34a74, 0x34a74,
+ 0x34a7c, 0x34afc,
+ 0x34b08, 0x34c24,
+ 0x34d00, 0x34d14,
+ 0x34d1c, 0x34d3c,
+ 0x34d44, 0x34d4c,
+ 0x34d54, 0x34d74,
+ 0x34d7c, 0x34d7c,
0x34de0, 0x34de0,
0x34e00, 0x34ed4,
0x34f00, 0x34fa4,
@@ -1581,7 +2352,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x35bb0, 0x35bb4,
0x35bc8, 0x35bd4,
0x36140, 0x3618c,
- 0x361f0, 0x36200,
+ 0x361f0, 0x361f4,
+ 0x36200, 0x36200,
0x36218, 0x36218,
0x36400, 0x36400,
0x36408, 0x3641c,
@@ -1590,31 +2362,75 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x366a8, 0x366a8,
0x366ec, 0x366ec,
0x36a00, 0x36abc,
- 0x36b00, 0x36b78,
+ 0x36b00, 0x36b38,
+ 0x36b40, 0x36b58,
+ 0x36b60, 0x36b78,
0x36c00, 0x36c00,
0x36c08, 0x36c3c,
0x36e00, 0x36e2c,
0x36f00, 0x36f2c,
- 0x37000, 0x370ac,
- 0x370c0, 0x371ac,
- 0x371c0, 0x372c4,
- 0x372e4, 0x373c4,
- 0x373e4, 0x374ac,
- 0x374c0, 0x375ac,
- 0x375c0, 0x376c4,
- 0x376e4, 0x377c4,
+ 0x37000, 0x3702c,
+ 0x37034, 0x37050,
+ 0x37058, 0x37058,
+ 0x37060, 0x3708c,
+ 0x3709c, 0x370ac,
+ 0x370c0, 0x370c0,
+ 0x370c8, 0x370d0,
+ 0x370d8, 0x370e0,
+ 0x370ec, 0x3712c,
+ 0x37134, 0x37150,
+ 0x37158, 0x37158,
+ 0x37160, 0x3718c,
+ 0x3719c, 0x371ac,
+ 0x371c0, 0x371c0,
+ 0x371c8, 0x371d0,
+ 0x371d8, 0x371e0,
+ 0x371ec, 0x37290,
+ 0x37298, 0x372c4,
+ 0x372e4, 0x37390,
+ 0x37398, 0x373c4,
+ 0x373e4, 0x3742c,
+ 0x37434, 0x37450,
+ 0x37458, 0x37458,
+ 0x37460, 0x3748c,
+ 0x3749c, 0x374ac,
+ 0x374c0, 0x374c0,
+ 0x374c8, 0x374d0,
+ 0x374d8, 0x374e0,
+ 0x374ec, 0x3752c,
+ 0x37534, 0x37550,
+ 0x37558, 0x37558,
+ 0x37560, 0x3758c,
+ 0x3759c, 0x375ac,
+ 0x375c0, 0x375c0,
+ 0x375c8, 0x375d0,
+ 0x375d8, 0x375e0,
+ 0x375ec, 0x37690,
+ 0x37698, 0x376c4,
+ 0x376e4, 0x37790,
+ 0x37798, 0x377c4,
0x377e4, 0x377fc,
0x37814, 0x37814,
0x37854, 0x37868,
0x37880, 0x3788c,
0x378c0, 0x378d0,
0x378e8, 0x378ec,
- 0x37900, 0x379ac,
- 0x379c0, 0x37ac4,
+ 0x37900, 0x3792c,
+ 0x37934, 0x37950,
+ 0x37958, 0x37958,
+ 0x37960, 0x3798c,
+ 0x3799c, 0x379ac,
+ 0x379c0, 0x379c0,
+ 0x379c8, 0x379d0,
+ 0x379d8, 0x379e0,
+ 0x379ec, 0x37a90,
+ 0x37a98, 0x37ac4,
0x37ae4, 0x37b10,
- 0x37b24, 0x37b50,
+ 0x37b24, 0x37b28,
+ 0x37b38, 0x37b50,
0x37bf0, 0x37c10,
- 0x37c24, 0x37c50,
+ 0x37c24, 0x37c28,
+ 0x37c38, 0x37c50,
0x37cf0, 0x37cfc,
0x40040, 0x40040,
0x40080, 0x40084,
@@ -1626,36 +2442,62 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x40280, 0x40280,
0x40304, 0x40304,
0x40330, 0x4033c,
- 0x41304, 0x413dc,
- 0x41400, 0x4141c,
+ 0x41304, 0x413c8,
+ 0x413d0, 0x413dc,
+ 0x413f0, 0x413f0,
+ 0x41400, 0x4140c,
+ 0x41414, 0x4141c,
0x41480, 0x414d0,
0x44000, 0x4407c,
- 0x440c0, 0x4427c,
- 0x442c0, 0x4447c,
- 0x444c0, 0x4467c,
- 0x446c0, 0x4487c,
- 0x448c0, 0x44a7c,
- 0x44ac0, 0x44c7c,
- 0x44cc0, 0x44e7c,
- 0x44ec0, 0x4507c,
- 0x450c0, 0x451fc,
- 0x45800, 0x45868,
+ 0x440c0, 0x441ac,
+ 0x441b4, 0x4427c,
+ 0x442c0, 0x443ac,
+ 0x443b4, 0x4447c,
+ 0x444c0, 0x445ac,
+ 0x445b4, 0x4467c,
+ 0x446c0, 0x447ac,
+ 0x447b4, 0x4487c,
+ 0x448c0, 0x449ac,
+ 0x449b4, 0x44a7c,
+ 0x44ac0, 0x44bac,
+ 0x44bb4, 0x44c7c,
+ 0x44cc0, 0x44dac,
+ 0x44db4, 0x44e7c,
+ 0x44ec0, 0x44fac,
+ 0x44fb4, 0x4507c,
+ 0x450c0, 0x451ac,
+ 0x451b4, 0x451fc,
+ 0x45800, 0x45804,
+ 0x45810, 0x45830,
+ 0x45840, 0x45860,
+ 0x45868, 0x45868,
0x45880, 0x45884,
0x458a0, 0x458b0,
- 0x45a00, 0x45a68,
+ 0x45a00, 0x45a04,
+ 0x45a10, 0x45a30,
+ 0x45a40, 0x45a60,
+ 0x45a68, 0x45a68,
0x45a80, 0x45a84,
0x45aa0, 0x45ab0,
0x460c0, 0x460e4,
- 0x47000, 0x4708c,
+ 0x47000, 0x4703c,
+ 0x47044, 0x4708c,
0x47200, 0x47250,
- 0x47400, 0x47420,
+ 0x47400, 0x47408,
+ 0x47414, 0x47420,
0x47600, 0x47618,
- 0x47800, 0x4782c,
- 0x50000, 0x500cc,
+ 0x47800, 0x47814,
+ 0x47820, 0x4782c,
+ 0x50000, 0x50084,
+ 0x50090, 0x500cc,
+ 0x50300, 0x50384,
0x50400, 0x50400,
- 0x50800, 0x508cc,
+ 0x50800, 0x50884,
+ 0x50890, 0x508cc,
+ 0x50b00, 0x50b84,
0x50c00, 0x50c00,
- 0x51000, 0x510b0,
+ 0x51000, 0x51020,
+ 0x51028, 0x510b0,
0x51300, 0x51324,
};
@@ -2129,6 +2971,65 @@ int t4_get_exprom_version(struct adapter *adap, u32 *vers)
return 0;
}
+/**
+ * t4_check_fw_version - check if the FW is supported with this driver
+ * @adap: the adapter
+ *
+ * Checks if an adapter's FW is compatible with the driver. Returns 0
+ * if there's exact match, a negative error if the version could not be
+ * read or there's a major version mismatch
+ */
+int t4_check_fw_version(struct adapter *adap)
+{
+ int i, ret, major, minor, micro;
+ int exp_major, exp_minor, exp_micro;
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ ret = t4_get_fw_version(adap, &adap->params.fw_vers);
+ /* Try multiple times before returning error */
+ for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
+ ret = t4_get_fw_version(adap, &adap->params.fw_vers);
+
+ if (ret)
+ return ret;
+
+ major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
+ minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
+ micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
+
+ switch (chip_version) {
+ case CHELSIO_T4:
+ exp_major = T4FW_MIN_VERSION_MAJOR;
+ exp_minor = T4FW_MIN_VERSION_MINOR;
+ exp_micro = T4FW_MIN_VERSION_MICRO;
+ break;
+ case CHELSIO_T5:
+ exp_major = T5FW_MIN_VERSION_MAJOR;
+ exp_minor = T5FW_MIN_VERSION_MINOR;
+ exp_micro = T5FW_MIN_VERSION_MICRO;
+ break;
+ case CHELSIO_T6:
+ exp_major = T6FW_MIN_VERSION_MAJOR;
+ exp_minor = T6FW_MIN_VERSION_MINOR;
+ exp_micro = T6FW_MIN_VERSION_MICRO;
+ break;
+ default:
+ dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
+ adap->chip);
+ return -EINVAL;
+ }
+
+ if (major < exp_major || (major == exp_major && minor < exp_minor) ||
+ (major == exp_major && minor == exp_minor && micro < exp_micro)) {
+ dev_err(adap->pdev_dev,
+ "Card has firmware version %u.%u.%u, minimum "
+ "supported firmware is %u.%u.%u.\n", major, minor,
+ micro, exp_major, exp_minor, exp_micro);
+ return -EFAULT;
+ }
+ return 0;
+}
+
/* Is the given firmware API compatible with the one the driver was compiled
* with?
*/
@@ -3281,6 +4182,8 @@ static void mem_intr_handler(struct adapter *adapter, int idx)
if (v & ECC_CE_INT_CAUSE_F) {
u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
+ t4_edc_err_read(adapter, idx);
+
t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
if (printk_ratelimit())
dev_warn(adapter->pdev_dev,
@@ -3488,7 +4391,9 @@ int t4_slow_intr_handler(struct adapter *adapter)
void t4_intr_enable(struct adapter *adapter)
{
u32 val = 0;
- u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+ u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+ u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
@@ -3513,7 +4418,9 @@ void t4_intr_enable(struct adapter *adapter)
*/
void t4_intr_disable(struct adapter *adapter)
{
- u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+ u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+ u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
@@ -3687,6 +4594,11 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
return 0;
}
+static unsigned int t4_use_ldst(struct adapter *adap)
+{
+ return (adap->flags & FW_OK) || !adap->use_bd;
+}
+
/**
* t4_fw_tp_pio_rw - Access TP PIO through LDST
* @adap: the adapter
@@ -3730,7 +4642,7 @@ static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
*/
void t4_read_rss_key(struct adapter *adap, u32 *key)
{
- if (adap->flags & FW_OK)
+ if (t4_use_ldst(adap))
t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
else
t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
@@ -3760,7 +4672,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
(vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
rss_key_addr_cnt = 32;
- if (adap->flags & FW_OK)
+ if (t4_use_ldst(adap))
t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
else
t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
@@ -3789,7 +4701,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
u32 *valp)
{
- if (adapter->flags & FW_OK)
+ if (t4_use_ldst(adapter))
t4_fw_tp_pio_rw(adapter, valp, 1,
TP_RSS_PF0_CONFIG_A + index, 1);
else
@@ -3829,7 +4741,7 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
/* Grab the VFL/VFH values ...
*/
- if (adapter->flags & FW_OK) {
+ if (t4_use_ldst(adapter)) {
t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
} else {
@@ -3850,7 +4762,7 @@ u32 t4_read_rss_pf_map(struct adapter *adapter)
{
u32 pfmap;
- if (adapter->flags & FW_OK)
+ if (t4_use_ldst(adapter))
t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
else
t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
@@ -3868,7 +4780,7 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter)
{
u32 pfmask;
- if (adapter->flags & FW_OK)
+ if (t4_use_ldst(adapter))
t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
else
t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
@@ -3924,43 +4836,25 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
*/
void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
{
- /* T6 and later has 2 channels */
- if (adap->params.arch.nchan == NCHAN) {
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tnl_cong_drops, 8,
- TP_MIB_TNL_CNG_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tnl_tx_drops, 4,
- TP_MIB_TNL_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->ofld_vlan_drops, 4,
- TP_MIB_OFD_VLN_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tcp6_in_errs, 4,
- TP_MIB_TCP_V6IN_ERR_0_A);
- } else {
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tnl_cong_drops, 2,
- TP_MIB_TNL_CNG_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->ofld_chan_drops, 2,
- TP_MIB_OFD_CHN_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->ofld_vlan_drops, 2,
- TP_MIB_OFD_VLN_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A);
- }
+ int nchan = adap->params.arch.nchan;
+
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_cong_drops, nchan, TP_MIB_TNL_CNG_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_chan_drops, nchan, TP_MIB_OFD_CHN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_vlan_drops, nchan, TP_MIB_OFD_VLN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp6_in_errs, nchan, TP_MIB_TCP_V6IN_ERR_0_A);
+
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
&st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
}
@@ -3974,16 +4868,13 @@ void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
*/
void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
{
- /* T6 and later has 2 channels */
- if (adap->params.arch.nchan == NCHAN) {
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
- 8, TP_MIB_CPL_IN_REQ_0_A);
- } else {
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
- 2, TP_MIB_CPL_IN_REQ_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
- 2, TP_MIB_CPL_OUT_RSP_0_A);
- }
+ int nchan = adap->params.arch.nchan;
+
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+ nchan, TP_MIB_CPL_IN_REQ_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
+ nchan, TP_MIB_CPL_OUT_RSP_0_A);
+
}
/**
@@ -4238,6 +5129,119 @@ void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
}
/**
+ * t4_set_trace_filter - configure one of the tracing filters
+ * @adap: the adapter
+ * @tp: the desired trace filter parameters
+ * @idx: which filter to configure
+ * @enable: whether to enable or disable the filter
+ *
+ * Configures one of the tracing filters available in HW. If @enable is
+ * %0 @tp is not examined and may be %NULL. The user is responsible to
+ * set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
+ */
+int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
+ int idx, int enable)
+{
+ int i, ofst = idx * 4;
+ u32 data_reg, mask_reg, cfg;
+ u32 multitrc = TRCMULTIFILTER_F;
+
+ if (!enable) {
+ t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
+ return 0;
+ }
+
+ cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
+ if (cfg & TRCMULTIFILTER_F) {
+ /* If multiple tracers are enabled, then maximum
+ * capture size is 2.5KB (FIFO size of a single channel)
+ * minus 2 flits for CPL_TRACE_PKT header.
+ */
+ if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
+ return -EINVAL;
+ } else {
+ /* If multiple tracers are disabled, to avoid deadlocks
+ * maximum packet capture size of 9600 bytes is recommended.
+ * Also in this mode, only trace0 can be enabled and running.
+ */
+ multitrc = 0;
+ if (tp->snap_len > 9600 || idx)
+ return -EINVAL;
+ }
+
+ if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
+ tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
+ tp->min_len > TFMINPKTSIZE_M)
+ return -EINVAL;
+
+ /* stop the tracer we'll be changing */
+ t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
+
+ idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
+ data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
+ mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
+
+ for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+ t4_write_reg(adap, data_reg, tp->data[i]);
+ t4_write_reg(adap, mask_reg, ~tp->mask[i]);
+ }
+ t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
+ TFCAPTUREMAX_V(tp->snap_len) |
+ TFMINPKTSIZE_V(tp->min_len));
+ t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
+ TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
+ (is_t4(adap->params.chip) ?
+ TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
+ T5_TFPORT_V(tp->port) | T5_TFEN_F |
+ T5_TFINVERTMATCH_V(tp->invert)));
+
+ return 0;
+}
+
+/**
+ * t4_get_trace_filter - query one of the tracing filters
+ * @adap: the adapter
+ * @tp: the current trace filter parameters
+ * @idx: which trace filter to query
+ * @enabled: non-zero if the filter is enabled
+ *
+ * Returns the current settings of one of the HW tracing filters.
+ */
+void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
+ int *enabled)
+{
+ u32 ctla, ctlb;
+ int i, ofst = idx * 4;
+ u32 data_reg, mask_reg;
+
+ ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
+ ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
+
+ if (is_t4(adap->params.chip)) {
+ *enabled = !!(ctla & TFEN_F);
+ tp->port = TFPORT_G(ctla);
+ tp->invert = !!(ctla & TFINVERTMATCH_F);
+ } else {
+ *enabled = !!(ctla & T5_TFEN_F);
+ tp->port = T5_TFPORT_G(ctla);
+ tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
+ }
+ tp->snap_len = TFCAPTUREMAX_G(ctlb);
+ tp->min_len = TFMINPKTSIZE_G(ctlb);
+ tp->skip_ofst = TFOFFSET_G(ctla);
+ tp->skip_len = TFLENGTH_G(ctla);
+
+ ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
+ data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
+ mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
+
+ for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+ tp->mask[i] = ~t4_read_reg(adap, mask_reg);
+ tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
+ }
+}
+
+/**
* t4_pmtx_get_stats - returns the HW stats from PMTX
* @adap: the adapter
* @cnt: where to store the count statistics
@@ -6294,7 +7298,7 @@ int t4_init_tp_params(struct adapter *adap)
/* Cache the adapter's Compressed Filter Mode and global Incress
* Configuration.
*/
- if (adap->flags & FW_OK) {
+ if (t4_use_ldst(adap)) {
t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
TP_VLAN_PRI_MAP_A, 1);
t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index c8488f430d19..13708fde1668 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -47,7 +47,6 @@ enum {
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
- L2T_SIZE = 4096, /* # of L2T entries */
PM_NSTATS = 5, /* # of PM stats */
MBOX_LEN = 64, /* mailbox size in bytes */
TRACE_LEN = 112, /* length of trace data and mask */
@@ -264,4 +263,9 @@ enum {
#undef FLASH_START
#undef FLASH_MAX_SIZE
+#define SGE_TIMESTAMP_S 0
+#define SGE_TIMESTAMP_M 0xfffffffffffffffULL
+#define SGE_TIMESTAMP_V(x) ((__u64)(x) << SGE_TIMESTAMP_S)
+#define SGE_TIMESTAMP_G(x) (((__u64)(x) >> SGE_TIMESTAMP_S) & SGE_TIMESTAMP_M)
+
#endif /* __T4_HW_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 132cb8fc0bf7..b99144afd4ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -660,6 +660,9 @@ struct cpl_tx_pkt {
#define TXPKT_OVLAN_IDX_S 12
#define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S)
+#define TXPKT_T5_OVLAN_IDX_S 12
+#define TXPKT_T5_OVLAN_IDX_V(x) ((x) << TXPKT_T5_OVLAN_IDX_S)
+
#define TXPKT_INTF_S 16
#define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index d7ca106927b0..03ed00c49823 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -142,6 +142,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */
CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */
CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
@@ -155,6 +157,27 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5093), /* Custom T580-LP-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5094), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */
+
+ /* T6 adapters:
+ */
+ CH_PCI_ID_TABLE_FENTRY(0x6001),
+ CH_PCI_ID_TABLE_FENTRY(0x6002),
+ CH_PCI_ID_TABLE_FENTRY(0x6003),
+ CH_PCI_ID_TABLE_FENTRY(0x6004),
+ CH_PCI_ID_TABLE_FENTRY(0x6005),
+ CH_PCI_ID_TABLE_FENTRY(0x6006),
+ CH_PCI_ID_TABLE_FENTRY(0x6007),
+ CH_PCI_ID_TABLE_FENTRY(0x6009),
+ CH_PCI_ID_TABLE_FENTRY(0x600d),
+ CH_PCI_ID_TABLE_FENTRY(0x6010),
+ CH_PCI_ID_TABLE_FENTRY(0x6011),
+ CH_PCI_ID_TABLE_FENTRY(0x6014),
+ CH_PCI_ID_TABLE_FENTRY(0x6015),
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 375a825573b0..fc3044c8ac1c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -136,6 +136,20 @@
#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \
& INGPACKBOUNDARY_M)
+#define VFIFO_ENABLE_S 10
+#define VFIFO_ENABLE_V(x) ((x) << VFIFO_ENABLE_S)
+#define VFIFO_ENABLE_F VFIFO_ENABLE_V(1U)
+
+#define SGE_DBVFIFO_BADDR_A 0x1138
+
+#define DBVFIFO_SIZE_S 6
+#define DBVFIFO_SIZE_M 0xfffU
+#define DBVFIFO_SIZE_G(x) (((x) >> DBVFIFO_SIZE_S) & DBVFIFO_SIZE_M)
+
+#define T6_DBVFIFO_SIZE_S 0
+#define T6_DBVFIFO_SIZE_M 0x1fffU
+#define T6_DBVFIFO_SIZE_G(x) (((x) >> T6_DBVFIFO_SIZE_S) & T6_DBVFIFO_SIZE_M)
+
#define GLOBALENABLE_S 0
#define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S)
#define GLOBALENABLE_F GLOBALENABLE_V(1U)
@@ -303,6 +317,8 @@
#define SGE_FL_BUFFER_SIZE7_A 0x1060
#define SGE_FL_BUFFER_SIZE8_A 0x1064
+#define SGE_IMSG_CTXT_BADDR_A 0x1088
+#define SGE_FLM_CACHE_BADDR_A 0x108c
#define SGE_INGRESS_RX_THRESHOLD_A 0x10a0
#define THRESHOLD_0_S 24
@@ -338,6 +354,11 @@
#define EGRTHRESHOLDPACKING_G(x) \
(((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M)
+#define T6_EGRTHRESHOLDPACKING_S 16
+#define T6_EGRTHRESHOLDPACKING_M 0xffU
+#define T6_EGRTHRESHOLDPACKING_G(x) \
+ (((x) >> T6_EGRTHRESHOLDPACKING_S) & T6_EGRTHRESHOLDPACKING_M)
+
#define SGE_TIMESTAMP_LO_A 0x1098
#define SGE_TIMESTAMP_HI_A 0x109c
@@ -352,6 +373,7 @@
#define TSVAL_G(x) (((x) >> TSVAL_S) & TSVAL_M)
#define SGE_DBFIFO_STATUS_A 0x10a4
+#define SGE_DBVFIFO_SIZE_A 0x113c
#define HP_INT_THRESH_S 28
#define HP_INT_THRESH_M 0xfU
@@ -864,6 +886,10 @@
/* registers for module MA */
#define MA_EDRAM0_BAR_A 0x77c0
+#define EDRAM0_BASE_S 16
+#define EDRAM0_BASE_M 0xfffU
+#define EDRAM0_BASE_G(x) (((x) >> EDRAM0_BASE_S) & EDRAM0_BASE_M)
+
#define EDRAM0_SIZE_S 0
#define EDRAM0_SIZE_M 0xfffU
#define EDRAM0_SIZE_V(x) ((x) << EDRAM0_SIZE_S)
@@ -871,6 +897,10 @@
#define MA_EDRAM1_BAR_A 0x77c4
+#define EDRAM1_BASE_S 16
+#define EDRAM1_BASE_M 0xfffU
+#define EDRAM1_BASE_G(x) (((x) >> EDRAM1_BASE_S) & EDRAM1_BASE_M)
+
#define EDRAM1_SIZE_S 0
#define EDRAM1_SIZE_M 0xfffU
#define EDRAM1_SIZE_V(x) ((x) << EDRAM1_SIZE_S)
@@ -878,6 +908,11 @@
#define MA_EXT_MEMORY_BAR_A 0x77c8
+#define EXT_MEM_BASE_S 16
+#define EXT_MEM_BASE_M 0xfffU
+#define EXT_MEM_BASE_V(x) ((x) << EXT_MEM_BASE_S)
+#define EXT_MEM_BASE_G(x) (((x) >> EXT_MEM_BASE_S) & EXT_MEM_BASE_M)
+
#define EXT_MEM_SIZE_S 0
#define EXT_MEM_SIZE_M 0xfffU
#define EXT_MEM_SIZE_V(x) ((x) << EXT_MEM_SIZE_S)
@@ -885,6 +920,10 @@
#define MA_EXT_MEMORY1_BAR_A 0x7808
+#define EXT_MEM1_BASE_S 16
+#define EXT_MEM1_BASE_M 0xfffU
+#define EXT_MEM1_BASE_G(x) (((x) >> EXT_MEM1_BASE_S) & EXT_MEM1_BASE_M)
+
#define EXT_MEM1_SIZE_S 0
#define EXT_MEM1_SIZE_M 0xfffU
#define EXT_MEM1_SIZE_V(x) ((x) << EXT_MEM1_SIZE_S)
@@ -892,6 +931,10 @@
#define MA_EXT_MEMORY0_BAR_A 0x77c8
+#define EXT_MEM0_BASE_S 16
+#define EXT_MEM0_BASE_M 0xfffU
+#define EXT_MEM0_BASE_G(x) (((x) >> EXT_MEM0_BASE_S) & EXT_MEM0_BASE_M)
+
#define EXT_MEM0_SIZE_S 0
#define EXT_MEM0_SIZE_M 0xfffU
#define EXT_MEM0_SIZE_V(x) ((x) << EXT_MEM0_SIZE_S)
@@ -973,6 +1016,10 @@
/* registers for module CIM */
#define CIM_BOOT_CFG_A 0x7b00
+#define CIM_SDRAM_BASE_ADDR_A 0x7b14
+#define CIM_SDRAM_ADDR_SIZE_A 0x7b18
+#define CIM_EXTMEM2_BASE_ADDR_A 0x7b1c
+#define CIM_EXTMEM2_ADDR_SIZE_A 0x7b20
#define CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A 0x290
#define BOOTADDR_M 0xffffff00U
@@ -1231,6 +1278,33 @@
#define TP_OUT_CONFIG_A 0x7d04
#define TP_GLOBAL_CONFIG_A 0x7d08
+#define TP_CMM_TCB_BASE_A 0x7d10
+#define TP_CMM_MM_BASE_A 0x7d14
+#define TP_CMM_TIMER_BASE_A 0x7d18
+#define TP_PMM_TX_BASE_A 0x7d20
+#define TP_PMM_RX_BASE_A 0x7d28
+#define TP_PMM_RX_PAGE_SIZE_A 0x7d2c
+#define TP_PMM_RX_MAX_PAGE_A 0x7d30
+#define TP_PMM_TX_PAGE_SIZE_A 0x7d34
+#define TP_PMM_TX_MAX_PAGE_A 0x7d38
+#define TP_CMM_MM_MAX_PSTRUCT_A 0x7e6c
+
+#define PMRXNUMCHN_S 31
+#define PMRXNUMCHN_V(x) ((x) << PMRXNUMCHN_S)
+#define PMRXNUMCHN_F PMRXNUMCHN_V(1U)
+
+#define PMTXNUMCHN_S 30
+#define PMTXNUMCHN_M 0x3U
+#define PMTXNUMCHN_G(x) (((x) >> PMTXNUMCHN_S) & PMTXNUMCHN_M)
+
+#define PMTXMAXPAGE_S 0
+#define PMTXMAXPAGE_M 0x1fffffU
+#define PMTXMAXPAGE_G(x) (((x) >> PMTXMAXPAGE_S) & PMTXMAXPAGE_M)
+
+#define PMRXMAXPAGE_S 0
+#define PMRXMAXPAGE_M 0x1fffffU
+#define PMRXMAXPAGE_G(x) (((x) >> PMRXMAXPAGE_S) & PMRXMAXPAGE_M)
+
#define DBGLAMODE_S 14
#define DBGLAMODE_M 0x3U
#define DBGLAMODE_G(x) (((x) >> DBGLAMODE_S) & DBGLAMODE_M)
@@ -1338,6 +1412,9 @@
#define MTUVALUE_G(x) (((x) >> MTUVALUE_S) & MTUVALUE_M)
#define TP_RSS_LKP_TABLE_A 0x7dec
+#define TP_CMM_MM_RX_FLST_BASE_A 0x7e60
+#define TP_CMM_MM_TX_FLST_BASE_A 0x7e64
+#define TP_CMM_MM_PS_FLST_BASE_A 0x7e68
#define LKPTBLROWVLD_S 31
#define LKPTBLROWVLD_V(x) ((x) << LKPTBLROWVLD_S)
@@ -1483,6 +1560,11 @@
#define TP_MIB_RQE_DFR_PKT_A 0x64
#define ULP_TX_INT_CAUSE_A 0x8dcc
+#define ULP_TX_TPT_LLIMIT_A 0x8dd4
+#define ULP_TX_TPT_ULIMIT_A 0x8dd8
+#define ULP_TX_PBL_LLIMIT_A 0x8ddc
+#define ULP_TX_PBL_ULIMIT_A 0x8de0
+#define ULP_TX_ERR_TABLE_BASE_A 0x8e04
#define PBL_BOUND_ERR_CH3_S 31
#define PBL_BOUND_ERR_CH3_V(x) ((x) << PBL_BOUND_ERR_CH3_S)
@@ -1804,6 +1886,9 @@
#define TRCMULTIFILTER_F TRCMULTIFILTER_V(1U)
#define MPS_TRC_RSS_CONTROL_A 0x9808
+#define MPS_TRC_FILTER1_RSS_CONTROL_A 0x9ff4
+#define MPS_TRC_FILTER2_RSS_CONTROL_A 0x9ffc
+#define MPS_TRC_FILTER3_RSS_CONTROL_A 0xa004
#define MPS_T5_TRC_RSS_CONTROL_A 0xa00c
#define RSSCONTROL_S 16
@@ -1812,6 +1897,59 @@
#define QUEUENUMBER_S 0
#define QUEUENUMBER_V(x) ((x) << QUEUENUMBER_S)
+#define TFINVERTMATCH_S 24
+#define TFINVERTMATCH_V(x) ((x) << TFINVERTMATCH_S)
+#define TFINVERTMATCH_F TFINVERTMATCH_V(1U)
+
+#define TFEN_S 22
+#define TFEN_V(x) ((x) << TFEN_S)
+#define TFEN_F TFEN_V(1U)
+
+#define TFPORT_S 18
+#define TFPORT_M 0xfU
+#define TFPORT_V(x) ((x) << TFPORT_S)
+#define TFPORT_G(x) (((x) >> TFPORT_S) & TFPORT_M)
+
+#define TFLENGTH_S 8
+#define TFLENGTH_M 0x1fU
+#define TFLENGTH_V(x) ((x) << TFLENGTH_S)
+#define TFLENGTH_G(x) (((x) >> TFLENGTH_S) & TFLENGTH_M)
+
+#define TFOFFSET_S 0
+#define TFOFFSET_M 0x1fU
+#define TFOFFSET_V(x) ((x) << TFOFFSET_S)
+#define TFOFFSET_G(x) (((x) >> TFOFFSET_S) & TFOFFSET_M)
+
+#define T5_TFINVERTMATCH_S 25
+#define T5_TFINVERTMATCH_V(x) ((x) << T5_TFINVERTMATCH_S)
+#define T5_TFINVERTMATCH_F T5_TFINVERTMATCH_V(1U)
+
+#define T5_TFEN_S 23
+#define T5_TFEN_V(x) ((x) << T5_TFEN_S)
+#define T5_TFEN_F T5_TFEN_V(1U)
+
+#define T5_TFPORT_S 18
+#define T5_TFPORT_M 0x1fU
+#define T5_TFPORT_V(x) ((x) << T5_TFPORT_S)
+#define T5_TFPORT_G(x) (((x) >> T5_TFPORT_S) & T5_TFPORT_M)
+
+#define MPS_TRC_FILTER_MATCH_CTL_A_A 0x9810
+#define MPS_TRC_FILTER_MATCH_CTL_B_A 0x9820
+
+#define TFMINPKTSIZE_S 16
+#define TFMINPKTSIZE_M 0x1ffU
+#define TFMINPKTSIZE_V(x) ((x) << TFMINPKTSIZE_S)
+#define TFMINPKTSIZE_G(x) (((x) >> TFMINPKTSIZE_S) & TFMINPKTSIZE_M)
+
+#define TFCAPTUREMAX_S 0
+#define TFCAPTUREMAX_M 0x3fffU
+#define TFCAPTUREMAX_V(x) ((x) << TFCAPTUREMAX_S)
+#define TFCAPTUREMAX_G(x) (((x) >> TFCAPTUREMAX_S) & TFCAPTUREMAX_M)
+
+#define MPS_TRC_FILTER0_MATCH_A 0x9c00
+#define MPS_TRC_FILTER0_DONT_CARE_A 0x9c80
+#define MPS_TRC_FILTER1_MATCH_A 0x9d00
+
#define TP_RSS_CONFIG_A 0x7df0
#define TNL4TUPENIPV6_S 31
@@ -2247,12 +2385,32 @@
#define MATCHSRAM_V(x) ((x) << MATCHSRAM_S)
#define MATCHSRAM_F MATCHSRAM_V(1U)
+#define MPS_RX_PG_RSV0_A 0x11010
+#define MPS_RX_PG_RSV4_A 0x11020
#define MPS_RX_PERR_INT_CAUSE_A 0x11074
+#define MPS_RX_MAC_BG_PG_CNT0_A 0x11208
+#define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218
#define MPS_CLS_TCAM_Y_L_A 0xf000
#define MPS_CLS_TCAM_DATA0_A 0xf000
#define MPS_CLS_TCAM_DATA1_A 0xf004
+#define USED_S 16
+#define USED_M 0x7ffU
+#define USED_G(x) (((x) >> USED_S) & USED_M)
+
+#define ALLOC_S 0
+#define ALLOC_M 0x7ffU
+#define ALLOC_G(x) (((x) >> ALLOC_S) & ALLOC_M)
+
+#define T5_USED_S 16
+#define T5_USED_M 0xfffU
+#define T5_USED_G(x) (((x) >> T5_USED_S) & T5_USED_M)
+
+#define T5_ALLOC_S 0
+#define T5_ALLOC_M 0xfffU
+#define T5_ALLOC_G(x) (((x) >> T5_ALLOC_S) & T5_ALLOC_M)
+
#define DMACH_S 0
#define DMACH_M 0xffffU
#define DMACH_G(x) (((x) >> DMACH_S) & DMACH_M)
@@ -2410,8 +2568,21 @@
#define SLVFIFOPARINT_F SLVFIFOPARINT_V(1U)
#define ULP_RX_INT_CAUSE_A 0x19158
+#define ULP_RX_ISCSI_LLIMIT_A 0x1915c
+#define ULP_RX_ISCSI_ULIMIT_A 0x19160
#define ULP_RX_ISCSI_TAGMASK_A 0x19164
#define ULP_RX_ISCSI_PSZ_A 0x19168
+#define ULP_RX_TDDP_LLIMIT_A 0x1916c
+#define ULP_RX_TDDP_ULIMIT_A 0x19170
+#define ULP_RX_STAG_LLIMIT_A 0x1917c
+#define ULP_RX_STAG_ULIMIT_A 0x19180
+#define ULP_RX_RQ_LLIMIT_A 0x19184
+#define ULP_RX_RQ_ULIMIT_A 0x19188
+#define ULP_RX_PBL_LLIMIT_A 0x1918c
+#define ULP_RX_PBL_ULIMIT_A 0x19190
+#define ULP_RX_CTX_BASE_A 0x19194
+#define ULP_RX_RQUDP_LLIMIT_A 0x191a4
+#define ULP_RX_RQUDP_ULIMIT_A 0x191a8
#define ULP_RX_LA_CTL_A 0x1923c
#define ULP_RX_LA_RDPTR_A 0x19240
#define ULP_RX_LA_RDDATA_A 0x19244
@@ -2473,6 +2644,10 @@
#define SOURCEPF_M 0x7U
#define SOURCEPF_G(x) (((x) >> SOURCEPF_S) & SOURCEPF_M)
+#define T6_SOURCEPF_S 9
+#define T6_SOURCEPF_M 0x7U
+#define T6_SOURCEPF_G(x) (((x) >> T6_SOURCEPF_S) & T6_SOURCEPF_M)
+
#define PL_INT_CAUSE_A 0x1940c
#define ULP_TX_S 27
@@ -2612,7 +2787,20 @@
#define T6_LIPMISS_V(x) ((x) << T6_LIPMISS_S)
#define T6_LIPMISS_F T6_LIPMISS_V(1U)
+#define LE_DB_CONFIG_A 0x19c04
+#define LE_DB_SERVER_INDEX_A 0x19c18
+#define LE_DB_SRVR_START_INDEX_A 0x19c18
+#define LE_DB_ACT_CNT_IPV4_A 0x19c20
+#define LE_DB_ACT_CNT_IPV6_A 0x19c24
+#define LE_DB_HASH_TID_BASE_A 0x19c30
+#define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30
#define LE_DB_INT_CAUSE_A 0x19c3c
+#define LE_DB_TID_HASHBASE_A 0x19df8
+#define T6_LE_DB_HASH_TID_BASE_A 0x19df8
+
+#define HASHEN_S 20
+#define HASHEN_V(x) ((x) << HASHEN_S)
+#define HASHEN_F HASHEN_V(1U)
#define REQQPARERR_S 16
#define REQQPARERR_V(x) ((x) << REQQPARERR_S)
@@ -2634,6 +2822,10 @@
#define LIP0_V(x) ((x) << LIP0_S)
#define LIP0_F LIP0_V(1U)
+#define BASEADDR_S 3
+#define BASEADDR_M 0x1fffffffU
+#define BASEADDR_G(x) (((x) >> BASEADDR_S) & BASEADDR_M)
+
#define TCAMINTPERR_S 13
#define TCAMINTPERR_V(x) ((x) << TCAMINTPERR_S)
#define TCAMINTPERR_F TCAMINTPERR_V(1U)
@@ -2740,10 +2932,11 @@
#define EDC_H_BIST_DATA_PATTERN_A 0x50010
#define EDC_H_BIST_STATUS_RDATA_A 0x50028
+#define EDC_H_ECC_ERR_ADDR_A 0x50084
#define EDC_T51_BASE_ADDR 0x50800
-#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
-#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
+#define EDC_T5_STRIDE (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
+#define EDC_T5_REG(reg, idx) (reg + EDC_T5_STRIDE * idx)
#define PL_VF_REV_A 0x4
#define PL_VF_WHOAMI_A 0x0
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index ab4674684acc..a32de30ea663 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -762,8 +762,6 @@ enum fw_ldst_func_mod_index {
struct fw_ldst_cmd {
__be32 op_to_addrspace;
-#define FW_LDST_CMD_ADDRSPACE_S 0
-#define FW_LDST_CMD_ADDRSPACE_V(x) ((x) << FW_LDST_CMD_ADDRSPACE_S)
__be32 cycles_to_len16;
union fw_ldst {
struct fw_ldst_addrval {
@@ -788,6 +786,13 @@ struct fw_ldst_cmd {
__be16 vctl;
__be16 rval;
} mdio;
+ struct fw_ldst_cim_rq {
+ u8 req_first64[8];
+ u8 req_second64[8];
+ u8 resp_first64[8];
+ u8 resp_second64[8];
+ __be32 r3[2];
+ } cim_rq;
union fw_ldst_mps {
struct fw_ldst_mps_rplc {
__be16 fid_idx;
@@ -828,9 +833,33 @@ struct fw_ldst_cmd {
__be16 nset_pkd;
__be32 data[12];
} pcie;
+ struct fw_ldst_i2c_deprecated {
+ u8 pid_pkd;
+ u8 base;
+ u8 boffset;
+ u8 data;
+ __be32 r9;
+ } i2c_deprecated;
+ struct fw_ldst_i2c {
+ u8 pid;
+ u8 did;
+ u8 boffset;
+ u8 blen;
+ __be32 r9;
+ __u8 data[48];
+ } i2c;
+ struct fw_ldst_le {
+ __be32 index;
+ __be32 r9;
+ u8 val[33];
+ u8 r11[7];
+ } le;
} u;
};
+#define FW_LDST_CMD_ADDRSPACE_S 0
+#define FW_LDST_CMD_ADDRSPACE_V(x) ((x) << FW_LDST_CMD_ADDRSPACE_S)
+
#define FW_LDST_CMD_MSG_S 31
#define FW_LDST_CMD_MSG_V(x) ((x) << FW_LDST_CMD_MSG_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 32b213559b02..c4b262ca7d43 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,18 +36,29 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0D
-#define T4FW_VERSION_MICRO 0x20
+#define T4FW_VERSION_MINOR 0x0E
+#define T4FW_VERSION_MICRO 0x04
#define T4FW_VERSION_BUILD 0x00
+#define T4FW_MIN_VERSION_MAJOR 0x01
+#define T4FW_MIN_VERSION_MINOR 0x04
+#define T4FW_MIN_VERSION_MICRO 0x00
+
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0D
-#define T5FW_VERSION_MICRO 0x20
+#define T5FW_VERSION_MINOR 0x0E
+#define T5FW_VERSION_MICRO 0x04
#define T5FW_VERSION_BUILD 0x00
+#define T5FW_MIN_VERSION_MAJOR 0x00
+#define T5FW_MIN_VERSION_MINOR 0x00
+#define T5FW_MIN_VERSION_MICRO 0x00
+
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x0D
-#define T6FW_VERSION_MICRO 0x2D
+#define T6FW_VERSION_MINOR 0x0E
+#define T6FW_VERSION_MICRO 0x04
#define T6FW_VERSION_BUILD 0x00
+#define T6FW_MIN_VERSION_MAJOR 0x00
+#define T6FW_MIN_VERSION_MINOR 0x00
+#define T6FW_MIN_VERSION_MICRO 0x00
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index b2b5e5bbe04c..0cfa5d72cafd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -56,7 +56,7 @@
* Generic information about the driver.
*/
#define DRV_VERSION "2.0.0-ko"
-#define DRV_DESC "Chelsio T4/T5 Virtual Function (VF) Network Driver"
+#define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
/*
* Module Parameters.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index ad53e5ad2acd..fa3786a9d30e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1898,7 +1898,10 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
rspq->unhandled_irqs++;
val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
- if (is_t4(rspq->adapter->params.chip)) {
+ /* If we don't have access to the new User GTS (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(!rspq->bar2_addr)) {
t4_write_reg(rspq->adapter,
T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
val | INGRESSQID_V((u32)rspq->cntxt_id));
@@ -1998,10 +2001,13 @@ static unsigned int process_intrq(struct adapter *adapter)
}
val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
- if (is_t4(adapter->params.chip))
+ /* If we don't have access to the new User GTS (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(!intrq->bar2_addr)) {
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
val | INGRESSQID_V(intrq->cntxt_id));
- else {
+ } else {
writel(val | INGRESSQID_V(intrq->bar2_qid),
intrq->bar2_addr + SGE_UDB_GTS);
wmb();
@@ -2662,8 +2668,22 @@ int t4vf_sge_init(struct adapter *adapter)
* give it more Free List entries. (Note that the SGE's Egress
* Congestion Threshold is in units of 2 Free List pointers.)
*/
- s->fl_starve_thres
- = EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
+ switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
+ case CHELSIO_T4:
+ s->fl_starve_thres =
+ EGRTHRESHOLD_G(sge_params->sge_congestion_control);
+ break;
+ case CHELSIO_T5:
+ s->fl_starve_thres =
+ EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+ break;
+ case CHELSIO_T6:
+ default:
+ s->fl_starve_thres =
+ T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+ break;
+ }
+ s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
/*
* Set up tasklet timers.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 0db6dc9e9ed2..63dd5fdac5b9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -619,7 +619,8 @@ int t4vf_get_sge_params(struct adapter *adapter)
*/
whoami = t4_read_reg(adapter,
T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
- pf = SOURCEPF_G(whoami);
+ pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
s_hps = (HOSTPAGESIZEPF0_S +
(HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 84b6a2b46aec..1671fa3332c2 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.1.1.83"
+#define DRV_VERSION "2.3.0.12"
#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
@@ -50,6 +50,7 @@ struct enic_msix_entry {
char devname[IFNAMSIZ];
irqreturn_t (*isr)(int, void *);
void *devid;
+ cpumask_var_t affinity_mask;
};
/* Store only the lower range. Higher range is given by fw. */
@@ -143,6 +144,7 @@ struct enic {
struct vnic_dev *vdev;
struct timer_list notify_timer;
struct work_struct reset;
+ struct work_struct tx_hang_reset;
struct work_struct change_mtu_work;
struct msix_entry msix_entry[ENIC_INTR_MAX];
struct enic_msix_entry msix[ENIC_INTR_MAX];
@@ -191,6 +193,25 @@ struct enic {
struct vnic_gen_stats gen_stats;
};
+static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
+{
+ struct enic *enic = vdev->priv;
+
+ return enic->netdev;
+}
+
+/* wrappers function for kernel log
+ * Make sure variable vdev of struct vnic_dev is available in the block where
+ * these macros are used
+ */
+#define vdev_info(args...) dev_info(&vdev->pdev->dev, args)
+#define vdev_warn(args...) dev_warn(&vdev->pdev->dev, args)
+#define vdev_err(args...) dev_err(&vdev->pdev->dev, args)
+
+#define vdev_netinfo(args...) netdev_info(vnic_get_netdev(vdev), args)
+#define vdev_netwarn(args...) netdev_warn(vnic_get_netdev(vdev), args)
+#define vdev_neterr(args...) netdev_err(vnic_get_netdev(vdev), args)
+
static inline struct device *enic_get_dev(struct enic *enic)
{
return &(enic->pdev->dev);
@@ -243,6 +264,32 @@ static inline unsigned int enic_msix_notify_intr(struct enic *enic)
return enic->rq_count + enic->wq_count + 1;
}
+static inline bool enic_is_err_intr(struct enic *enic, int intr)
+{
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ return intr == enic_legacy_err_intr();
+ case VNIC_DEV_INTR_MODE_MSIX:
+ return intr == enic_msix_err_intr(enic);
+ case VNIC_DEV_INTR_MODE_MSI:
+ default:
+ return false;
+ }
+}
+
+static inline bool enic_is_notify_intr(struct enic *enic, int intr)
+{
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ return intr == enic_legacy_notify_intr();
+ case VNIC_DEV_INTR_MODE_MSIX:
+ return intr == enic_msix_notify_intr(enic);
+ case VNIC_DEV_INTR_MODE_MSI:
+ default:
+ return false;
+ }
+}
+
static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr)
{
if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) {
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index d106186f4f4a..3c677ed3c29e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -177,7 +177,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
int res, i;
enic = netdev_priv(dev);
- res = skb_flow_dissect_flow_keys(skb, &keys);
+ res = skb_flow_dissect_flow_keys(skb, &keys, 0);
if (!res || keys.basic.n_proto != htons(ETH_P_IP) ||
(keys.basic.ip_proto != IPPROTO_TCP &&
keys.basic.ip_proto != IPPROTO_UDP))
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index f3f1601a76f3..f44a39c40642 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -224,7 +224,8 @@ static int enic_get_coalesce(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
- ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
+ if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
+ ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
if (rxcoal->use_adaptive_rx_coalesce)
ecmd->use_adaptive_rx_coalesce = 1;
@@ -234,6 +235,53 @@ static int enic_get_coalesce(struct net_device *netdev,
return 0;
}
+static int enic_coalesce_valid(struct enic *enic,
+ struct ethtool_coalesce *ec)
+{
+ u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
+ u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
+ ec->rx_coalesce_usecs_high);
+ u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
+ ec->rx_coalesce_usecs_low);
+
+ if (ec->rx_max_coalesced_frames ||
+ ec->rx_coalesce_usecs_irq ||
+ ec->rx_max_coalesced_frames_irq ||
+ ec->tx_max_coalesced_frames ||
+ ec->tx_coalesce_usecs_irq ||
+ ec->tx_max_coalesced_frames_irq ||
+ ec->stats_block_coalesce_usecs ||
+ ec->use_adaptive_tx_coalesce ||
+ ec->pkt_rate_low ||
+ ec->rx_max_coalesced_frames_low ||
+ ec->tx_coalesce_usecs_low ||
+ ec->tx_max_coalesced_frames_low ||
+ ec->pkt_rate_high ||
+ ec->rx_max_coalesced_frames_high ||
+ ec->tx_coalesce_usecs_high ||
+ ec->tx_max_coalesced_frames_high ||
+ ec->rate_sample_interval)
+ return -EINVAL;
+
+ if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
+ ec->tx_coalesce_usecs)
+ return -EINVAL;
+
+ if ((ec->tx_coalesce_usecs > coalesce_usecs_max) ||
+ (ec->rx_coalesce_usecs > coalesce_usecs_max) ||
+ (ec->rx_coalesce_usecs_low > coalesce_usecs_max) ||
+ (ec->rx_coalesce_usecs_high > coalesce_usecs_max))
+ netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
+ coalesce_usecs_max);
+
+ if (ec->rx_coalesce_usecs_high &&
+ (rx_coalesce_usecs_high <
+ rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
+ return -EINVAL;
+
+ return 0;
+}
+
static int enic_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ecmd)
{
@@ -244,8 +292,12 @@ static int enic_set_coalesce(struct net_device *netdev,
u32 rx_coalesce_usecs_high;
u32 coalesce_usecs_max;
unsigned int i, intr;
+ int ret;
struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
+ ret = enic_coalesce_valid(enic, ecmd);
+ if (ret)
+ return ret;
coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
coalesce_usecs_max);
@@ -257,59 +309,24 @@ static int enic_set_coalesce(struct net_device *netdev,
rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
coalesce_usecs_max);
- switch (vnic_dev_get_intr_mode(enic->vdev)) {
- case VNIC_DEV_INTR_MODE_INTX:
- if (tx_coalesce_usecs != rx_coalesce_usecs)
- return -EINVAL;
- if (ecmd->use_adaptive_rx_coalesce ||
- ecmd->rx_coalesce_usecs_low ||
- ecmd->rx_coalesce_usecs_high)
- return -EINVAL;
-
- intr = enic_legacy_io_intr();
- vnic_intr_coalescing_timer_set(&enic->intr[intr],
- tx_coalesce_usecs);
- break;
- case VNIC_DEV_INTR_MODE_MSI:
- if (tx_coalesce_usecs != rx_coalesce_usecs)
- return -EINVAL;
- if (ecmd->use_adaptive_rx_coalesce ||
- ecmd->rx_coalesce_usecs_low ||
- ecmd->rx_coalesce_usecs_high)
- return -EINVAL;
-
- vnic_intr_coalescing_timer_set(&enic->intr[0],
- tx_coalesce_usecs);
- break;
- case VNIC_DEV_INTR_MODE_MSIX:
- if (ecmd->rx_coalesce_usecs_high &&
- (rx_coalesce_usecs_high <
- rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
- return -EINVAL;
-
+ if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
for (i = 0; i < enic->wq_count; i++) {
intr = enic_msix_wq_intr(enic, i);
vnic_intr_coalescing_timer_set(&enic->intr[intr],
- tx_coalesce_usecs);
- }
-
- rxcoal->use_adaptive_rx_coalesce =
- !!ecmd->use_adaptive_rx_coalesce;
- if (!rxcoal->use_adaptive_rx_coalesce)
- enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
-
- if (ecmd->rx_coalesce_usecs_high) {
- rxcoal->range_end = rx_coalesce_usecs_high;
- rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
- rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
- ENIC_AIC_LARGE_PKT_DIFF;
+ tx_coalesce_usecs);
}
- break;
- default:
- break;
+ enic->tx_coalesce_usecs = tx_coalesce_usecs;
+ }
+ rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
+ if (!rxcoal->use_adaptive_rx_coalesce)
+ enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
+ if (ecmd->rx_coalesce_usecs_high) {
+ rxcoal->range_end = rx_coalesce_usecs_high;
+ rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
+ rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
+ ENIC_AIC_LARGE_PKT_DIFF;
}
- enic->tx_coalesce_usecs = tx_coalesce_usecs;
enic->rx_coalesce_usecs = rx_coalesce_usecs;
return 0;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 918a8e42139b..b36643ef0593 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -39,6 +39,7 @@
#include <linux/prefetch.h>
#include <net/ip6_checksum.h>
#include <linux/ktime.h>
+#include <linux/numa.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
@@ -112,6 +113,71 @@ static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
{3, 6}, /* 10 - 40 Gbps */
};
+static void enic_init_affinity_hint(struct enic *enic)
+{
+ int numa_node = dev_to_node(&enic->pdev->dev);
+ int i;
+
+ for (i = 0; i < enic->intr_count; i++) {
+ if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) ||
+ (enic->msix[i].affinity_mask &&
+ !cpumask_empty(enic->msix[i].affinity_mask)))
+ continue;
+ if (zalloc_cpumask_var(&enic->msix[i].affinity_mask,
+ GFP_KERNEL))
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ enic->msix[i].affinity_mask);
+ }
+}
+
+static void enic_free_affinity_hint(struct enic *enic)
+{
+ int i;
+
+ for (i = 0; i < enic->intr_count; i++) {
+ if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i))
+ continue;
+ free_cpumask_var(enic->msix[i].affinity_mask);
+ }
+}
+
+static void enic_set_affinity_hint(struct enic *enic)
+{
+ int i;
+ int err;
+
+ for (i = 0; i < enic->intr_count; i++) {
+ if (enic_is_err_intr(enic, i) ||
+ enic_is_notify_intr(enic, i) ||
+ !enic->msix[i].affinity_mask ||
+ cpumask_empty(enic->msix[i].affinity_mask))
+ continue;
+ err = irq_set_affinity_hint(enic->msix_entry[i].vector,
+ enic->msix[i].affinity_mask);
+ if (err)
+ netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n",
+ err);
+ }
+
+ for (i = 0; i < enic->wq_count; i++) {
+ int wq_intr = enic_msix_wq_intr(enic, i);
+
+ if (enic->msix[wq_intr].affinity_mask &&
+ !cpumask_empty(enic->msix[wq_intr].affinity_mask))
+ netif_set_xps_queue(enic->netdev,
+ enic->msix[wq_intr].affinity_mask,
+ i);
+ }
+}
+
+static void enic_unset_affinity_hint(struct enic *enic)
+{
+ int i;
+
+ for (i = 0; i < enic->intr_count; i++)
+ irq_set_affinity_hint(enic->msix_entry[i].vector, NULL);
+}
+
int enic_is_dynamic(struct enic *enic)
{
return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -178,13 +244,15 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
return 0;
}
-static void enic_log_q_error(struct enic *enic)
+static bool enic_log_q_error(struct enic *enic)
{
unsigned int i;
u32 error_status;
+ bool err = false;
for (i = 0; i < enic->wq_count; i++) {
error_status = vnic_wq_error_status(&enic->wq[i]);
+ err |= error_status;
if (error_status)
netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
i, error_status);
@@ -192,10 +260,13 @@ static void enic_log_q_error(struct enic *enic)
for (i = 0; i < enic->rq_count; i++) {
error_status = vnic_rq_error_status(&enic->rq[i]);
+ err |= error_status;
if (error_status)
netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
i, error_status);
}
+
+ return err;
}
static void enic_msglvl_check(struct enic *enic)
@@ -333,10 +404,9 @@ static irqreturn_t enic_isr_msix_err(int irq, void *data)
vnic_intr_return_all_credits(&enic->intr[intr]);
- enic_log_q_error(enic);
-
- /* schedule recovery from WQ/RQ error */
- schedule_work(&enic->reset);
+ if (enic_log_q_error(enic))
+ /* schedule recovery from WQ/RQ error */
+ schedule_work(&enic->reset);
return IRQ_HANDLED;
}
@@ -804,7 +874,7 @@ static void enic_set_rx_mode(struct net_device *netdev)
static void enic_tx_timeout(struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
- schedule_work(&enic->reset);
+ schedule_work(&enic->tx_hang_reset);
}
static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
@@ -1149,6 +1219,64 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
return 0;
}
+static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+ unsigned int intr = enic_msix_rq_intr(enic, rq->index);
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ u32 timer = cq->tobe_rx_coal_timeval;
+
+ if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
+ vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
+ cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
+ }
+}
+
+static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
+ int index;
+ u32 timer;
+ u32 range_start;
+ u32 traffic;
+ u64 delta;
+ ktime_t now = ktime_get();
+
+ delta = ktime_us_delta(now, cq->prev_ts);
+ if (delta < ENIC_AIC_TS_BREAK)
+ return;
+ cq->prev_ts = now;
+
+ traffic = pkt_size_counter->large_pkt_bytes_cnt +
+ pkt_size_counter->small_pkt_bytes_cnt;
+ /* The table takes Mbps
+ * traffic *= 8 => bits
+ * traffic *= (10^6 / delta) => bps
+ * traffic /= 10^6 => Mbps
+ *
+ * Combining, traffic *= (8 / delta)
+ */
+
+ traffic <<= 3;
+ traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
+
+ for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
+ if (traffic < mod_table[index].rx_rate)
+ break;
+ range_start = (pkt_size_counter->small_pkt_bytes_cnt >
+ pkt_size_counter->large_pkt_bytes_cnt << 1) ?
+ rx_coal->small_pkt_range_start :
+ rx_coal->large_pkt_range_start;
+ timer = range_start + ((rx_coal->range_end - range_start) *
+ mod_table[index].range_percent / 100);
+ /* Damping */
+ cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
+
+ pkt_size_counter->large_pkt_bytes_cnt = 0;
+ pkt_size_counter->small_pkt_bytes_cnt = 0;
+}
+
static int enic_poll(struct napi_struct *napi, int budget)
{
struct net_device *netdev = napi->dev;
@@ -1199,6 +1327,11 @@ static int enic_poll(struct napi_struct *napi, int budget)
if (err)
rq_work_done = rq_work_to_do;
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ /* Call the function which refreshes the intr coalescing timer
+ * value based on the traffic.
+ */
+ enic_calc_int_moderation(enic, &enic->rq[0]);
if (rq_work_done < rq_work_to_do) {
@@ -1207,70 +1340,14 @@ static int enic_poll(struct napi_struct *napi, int budget)
*/
napi_complete(napi);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_set_int_moderation(enic, &enic->rq[0]);
vnic_intr_unmask(&enic->intr[intr]);
}
return rq_work_done;
}
-static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
-{
- unsigned int intr = enic_msix_rq_intr(enic, rq->index);
- struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
- u32 timer = cq->tobe_rx_coal_timeval;
-
- if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
- vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
- cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
- }
-}
-
-static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
-{
- struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
- struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
- struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
- int index;
- u32 timer;
- u32 range_start;
- u32 traffic;
- u64 delta;
- ktime_t now = ktime_get();
-
- delta = ktime_us_delta(now, cq->prev_ts);
- if (delta < ENIC_AIC_TS_BREAK)
- return;
- cq->prev_ts = now;
-
- traffic = pkt_size_counter->large_pkt_bytes_cnt +
- pkt_size_counter->small_pkt_bytes_cnt;
- /* The table takes Mbps
- * traffic *= 8 => bits
- * traffic *= (10^6 / delta) => bps
- * traffic /= 10^6 => Mbps
- *
- * Combining, traffic *= (8 / delta)
- */
-
- traffic <<= 3;
- traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
-
- for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
- if (traffic < mod_table[index].rx_rate)
- break;
- range_start = (pkt_size_counter->small_pkt_bytes_cnt >
- pkt_size_counter->large_pkt_bytes_cnt << 1) ?
- rx_coal->small_pkt_range_start :
- rx_coal->large_pkt_range_start;
- timer = range_start + ((rx_coal->range_end - range_start) *
- mod_table[index].range_percent / 100);
- /* Damping */
- cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
-
- pkt_size_counter->large_pkt_bytes_cnt = 0;
- pkt_size_counter->small_pkt_bytes_cnt = 0;
-}
-
#ifdef CONFIG_RFS_ACCEL
static void enic_free_rx_cpu_rmap(struct enic *enic)
{
@@ -1407,10 +1484,8 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
if (err)
work_done = work_to_do;
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- /* Call the function which refreshes
- * the intr coalescing timer value based on
- * the traffic. This is supported only in
- * the case of MSI-x mode
+ /* Call the function which refreshes the intr coalescing timer
+ * value based on the traffic.
*/
enic_calc_int_moderation(enic, &enic->rq[rq]);
@@ -1569,12 +1644,6 @@ static void enic_set_rx_coal_setting(struct enic *enic)
int index = -1;
struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
- /* If intr mode is not MSIX, do not do adaptive coalescing */
- if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
- netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
- return;
- }
-
/* 1. Read the link speed from fw
* 2. Pick the default range for the speed
* 3. Update it in enic->rx_coalesce_setting
@@ -1646,6 +1715,8 @@ static int enic_open(struct net_device *netdev)
netdev_err(netdev, "Unable to request irq.\n");
return err;
}
+ enic_init_affinity_hint(enic);
+ enic_set_affinity_hint(enic);
err = enic_dev_notify_set(enic);
if (err) {
@@ -1698,6 +1769,7 @@ err_out_free_rq:
vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
enic_dev_notify_unset(enic);
err_out_free_intr:
+ enic_unset_affinity_hint(enic);
enic_free_intr(enic);
return err;
@@ -1751,6 +1823,7 @@ static int enic_stop(struct net_device *netdev)
}
enic_dev_notify_unset(enic);
+ enic_unset_affinity_hint(enic);
enic_free_intr(enic);
for (i = 0; i < enic->wq_count; i++)
@@ -1925,6 +1998,19 @@ static int enic_dev_open(struct enic *enic)
return err;
}
+static int enic_dev_soft_reset(struct enic *enic)
+{
+ int err;
+
+ err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset,
+ vnic_dev_soft_reset_done, 0);
+ if (err)
+ netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n",
+ err);
+
+ return err;
+}
+
static int enic_dev_hang_reset(struct enic *enic)
{
int err;
@@ -2061,6 +2147,26 @@ static void enic_reset(struct work_struct *work)
rtnl_lock();
spin_lock(&enic->enic_api_lock);
+ enic_stop(enic->netdev);
+ enic_dev_soft_reset(enic);
+ enic_reset_addr_lists(enic);
+ enic_init_vnic_resources(enic);
+ enic_set_rss_nic_cfg(enic);
+ enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_open(enic->netdev);
+ spin_unlock(&enic->enic_api_lock);
+ call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
+
+ rtnl_unlock();
+}
+
+static void enic_tx_hang_reset(struct work_struct *work)
+{
+ struct enic *enic = container_of(work, struct enic, tx_hang_reset);
+
+ rtnl_lock();
+
+ spin_lock(&enic->enic_api_lock);
enic_dev_hang_notify(enic);
enic_stop(enic->netdev);
enic_dev_hang_reset(enic);
@@ -2273,6 +2379,7 @@ static void enic_dev_deinit(struct enic *enic)
enic_free_vnic_resources(enic);
enic_clear_intr_mode(enic);
+ enic_free_affinity_hint(enic);
}
static void enic_kdump_kernel_config(struct enic *enic)
@@ -2368,6 +2475,7 @@ static int enic_dev_init(struct enic *enic)
return 0;
err_out_free_vnic_resources:
+ enic_free_affinity_hint(enic);
enic_clear_intr_mode(enic);
enic_free_vnic_resources(enic);
@@ -2485,6 +2593,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_iounmap;
}
+ err = vnic_devcmd_init(enic->vdev);
+
+ if (err)
+ goto err_out_vnic_unregister;
+
#ifdef CONFIG_PCI_IOV
/* Get number of subvnics */
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
@@ -2579,6 +2692,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
+ INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
for (i = 0; i < enic->wq_count; i++)
@@ -2659,8 +2773,8 @@ err_out_disable_sriov_pp:
pci_disable_sriov(pdev);
enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
}
-err_out_vnic_unregister:
#endif
+err_out_vnic_unregister:
vnic_dev_unregister(enic->vdev);
err_out_iounmap:
enic_iounmap(enic);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c
index 0daa1c7073cb..abeda2a9ea27 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c
@@ -24,6 +24,7 @@
#include "vnic_dev.h"
#include "vnic_cq.h"
+#include "enic.h"
void vnic_cq_free(struct vnic_cq *cq)
{
@@ -42,7 +43,7 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
if (!cq->ctrl) {
- pr_err("Failed to hook CQ[%d] resource\n", index);
+ vdev_err("Failed to hook CQ[%d] resource\n", index);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 62f7b7baf93c..1ffd1050860b 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -27,46 +27,9 @@
#include "vnic_resource.h"
#include "vnic_devcmd.h"
#include "vnic_dev.h"
+#include "vnic_wq.h"
#include "vnic_stats.h"
-
-enum vnic_proxy_type {
- PROXY_NONE,
- PROXY_BY_BDF,
- PROXY_BY_INDEX,
-};
-
-struct vnic_res {
- void __iomem *vaddr;
- dma_addr_t bus_addr;
- unsigned int count;
-};
-
-struct vnic_intr_coal_timer_info {
- u32 mul;
- u32 div;
- u32 max_usec;
-};
-
-struct vnic_dev {
- void *priv;
- struct pci_dev *pdev;
- struct vnic_res res[RES_TYPE_MAX];
- enum vnic_dev_intr_mode intr_mode;
- struct vnic_devcmd __iomem *devcmd;
- struct vnic_devcmd_notify *notify;
- struct vnic_devcmd_notify notify_copy;
- dma_addr_t notify_pa;
- u32 notify_sz;
- dma_addr_t linkstatus_pa;
- struct vnic_stats *stats;
- dma_addr_t stats_pa;
- struct vnic_devcmd_fw_info *fw_info;
- dma_addr_t fw_info_pa;
- enum vnic_proxy_type proxy;
- u32 proxy_index;
- u64 args[VNIC_DEVCMD_NARGS];
- struct vnic_intr_coal_timer_info intr_coal_timer_info;
-};
+#include "enic.h"
#define VNIC_MAX_RES_HDR_SIZE \
(sizeof(struct vnic_resource_header) + \
@@ -90,14 +53,14 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
return -EINVAL;
if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
- pr_err("vNIC BAR0 res hdr length error\n");
+ vdev_err("vNIC BAR0 res hdr length error\n");
return -EINVAL;
}
rh = bar->vaddr;
mrh = bar->vaddr;
if (!rh) {
- pr_err("vNIC BAR0 res hdr not mem-mapped\n");
+ vdev_err("vNIC BAR0 res hdr not mem-mapped\n");
return -EINVAL;
}
@@ -106,11 +69,10 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
(ioread32(&rh->version) != VNIC_RES_VERSION)) {
if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
(ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
- pr_err("vNIC BAR0 res magic/version error "
- "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
- VNIC_RES_MAGIC, VNIC_RES_VERSION,
- MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
- ioread32(&rh->magic), ioread32(&rh->version));
+ vdev_err("vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
+ VNIC_RES_MAGIC, VNIC_RES_VERSION,
+ MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
+ ioread32(&rh->magic), ioread32(&rh->version));
return -EINVAL;
}
}
@@ -144,17 +106,15 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
/* each count is stride bytes long */
len = count * VNIC_RES_STRIDE;
if (len + bar_offset > bar[bar_num].len) {
- pr_err("vNIC BAR0 resource %d "
- "out-of-bounds, offset 0x%x + "
- "size 0x%x > bar len 0x%lx\n",
- type, bar_offset,
- len,
- bar[bar_num].len);
+ vdev_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
+ type, bar_offset, len,
+ bar[bar_num].len);
return -EINVAL;
}
break;
case RES_TYPE_INTR_PBA_LEGACY:
case RES_TYPE_DEVCMD:
+ case RES_TYPE_DEVCMD2:
len = count;
break;
default:
@@ -238,8 +198,8 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
&ring->base_addr_unaligned);
if (!ring->descs_unaligned) {
- pr_err("Failed to allocate ring (size=%d), aborting\n",
- (int)ring->size);
+ vdev_err("Failed to allocate ring (size=%d), aborting\n",
+ (int)ring->size);
return -ENOMEM;
}
@@ -281,7 +241,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
return -ENODEV;
}
if (status & STAT_BUSY) {
- pr_err("Busy devcmd %d\n", _CMD_N(cmd));
+ vdev_neterr("Busy devcmd %d\n", _CMD_N(cmd));
return -EBUSY;
}
@@ -315,8 +275,8 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
return -err;
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
- pr_err("Error %d devcmd %d\n",
- err, _CMD_N(cmd));
+ vdev_neterr("Error %d devcmd %d\n",
+ err, _CMD_N(cmd));
return -err;
}
@@ -330,10 +290,162 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
}
}
- pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
+ vdev_neterr("Timedout devcmd %d\n", _CMD_N(cmd));
return -ETIMEDOUT;
}
+static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait)
+{
+ struct devcmd2_controller *dc2c = vdev->devcmd2;
+ struct devcmd2_result *result = dc2c->result + dc2c->next_result;
+ unsigned int i;
+ int delay, err;
+ u32 fetch_index, new_posted;
+ u32 posted = dc2c->posted;
+
+ fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index);
+
+ if (fetch_index == 0xFFFFFFFF)
+ return -ENODEV;
+
+ new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
+
+ if (new_posted == fetch_index) {
+ vdev_neterr("devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
+ _CMD_N(cmd), fetch_index, posted);
+ return -EBUSY;
+ }
+ dc2c->cmd_ring[posted].cmd = cmd;
+ dc2c->cmd_ring[posted].flags = 0;
+
+ if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+ dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
+ if (_CMD_DIR(cmd) & _CMD_DIR_WRITE)
+ for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+ dc2c->cmd_ring[posted].args[i] = vdev->args[i];
+
+ /* Adding write memory barrier prevents compiler and/or CPU reordering,
+ * thus avoiding descriptor posting before descriptor is initialized.
+ * Otherwise, hardware can read stale descriptor fields.
+ */
+ wmb();
+ iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
+ dc2c->posted = new_posted;
+
+ if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
+ return 0;
+
+ for (delay = 0; delay < wait; delay++) {
+ if (result->color == dc2c->color) {
+ dc2c->next_result++;
+ if (dc2c->next_result == dc2c->result_size) {
+ dc2c->next_result = 0;
+ dc2c->color = dc2c->color ? 0 : 1;
+ }
+ if (result->error) {
+ err = result->error;
+ if (err != ERR_ECMDUNKNOWN ||
+ cmd != CMD_CAPABILITY)
+ vdev_neterr("Error %d devcmd %d\n",
+ err, _CMD_N(cmd));
+ return -err;
+ }
+ if (_CMD_DIR(cmd) & _CMD_DIR_READ)
+ for (i = 0; i < VNIC_DEVCMD2_NARGS; i++)
+ vdev->args[i] = result->results[i];
+
+ return 0;
+ }
+ udelay(100);
+ }
+
+ vdev_neterr("devcmd %d timed out\n", _CMD_N(cmd));
+
+ return -ETIMEDOUT;
+}
+
+static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
+{
+ vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+ if (!vdev->devcmd)
+ return -ENODEV;
+ vdev->devcmd_rtn = _vnic_dev_cmd;
+
+ return 0;
+}
+
+static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
+{
+ int err;
+ unsigned int fetch_index;
+
+ if (vdev->devcmd2)
+ return 0;
+
+ vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_KERNEL);
+ if (!vdev->devcmd2)
+ return -ENOMEM;
+
+ vdev->devcmd2->color = 1;
+ vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
+ err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE,
+ DEVCMD2_DESC_SIZE);
+ if (err)
+ goto err_free_devcmd2;
+
+ fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
+ if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
+ vdev_err("Fatal error in devcmd2 init - hardware surprise removal");
+
+ return -ENODEV;
+ }
+
+ enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0,
+ 0);
+ vdev->devcmd2->posted = fetch_index;
+ vnic_wq_enable(&vdev->devcmd2->wq);
+
+ err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
+ DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
+ if (err)
+ goto err_free_wq;
+
+ vdev->devcmd2->result = vdev->devcmd2->results_ring.descs;
+ vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs;
+ vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
+ vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr |
+ VNIC_PADDR_TARGET;
+ vdev->args[1] = DEVCMD2_RING_SIZE;
+
+ err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
+ if (err)
+ goto err_free_desc_ring;
+
+ vdev->devcmd_rtn = _vnic_dev_cmd2;
+
+ return 0;
+
+err_free_desc_ring:
+ vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+err_free_wq:
+ vnic_wq_disable(&vdev->devcmd2->wq);
+ vnic_wq_free(&vdev->devcmd2->wq);
+err_free_devcmd2:
+ kfree(vdev->devcmd2);
+ vdev->devcmd2 = NULL;
+
+ return err;
+}
+
+static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
+{
+ vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+ vnic_wq_disable(&vdev->devcmd2->wq);
+ vnic_wq_free(&vdev->devcmd2->wq);
+ kfree(vdev->devcmd2);
+}
+
static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait)
@@ -348,7 +460,7 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
vdev->args[2] = *a0;
vdev->args[3] = *a1;
- err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
+ err = vdev->devcmd_rtn(vdev, proxy_cmd, wait);
if (err)
return err;
@@ -357,7 +469,8 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
err = (int)vdev->args[1];
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
- pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
+ vdev_neterr("Error %d proxy devcmd %d\n", err,
+ _CMD_N(cmd));
return err;
}
@@ -375,7 +488,7 @@ static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
vdev->args[0] = *a0;
vdev->args[1] = *a1;
- err = _vnic_dev_cmd(vdev, cmd, wait);
+ err = vdev->devcmd_rtn(vdev, cmd, wait);
*a0 = vdev->args[0];
*a1 = vdev->args[1];
@@ -546,14 +659,14 @@ int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
return 0;
}
-static int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
}
-static int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
@@ -650,7 +763,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
if (err)
- pr_err("Can't set packet filter\n");
+ vdev_neterr("Can't set packet filter\n");
return err;
}
@@ -667,7 +780,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
if (err)
- pr_err("Can't add addr [%pM], %d\n", addr, err);
+ vdev_neterr("Can't add addr [%pM], %d\n", addr, err);
return err;
}
@@ -684,7 +797,7 @@ int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
if (err)
- pr_err("Can't del addr [%pM], %d\n", addr, err);
+ vdev_neterr("Can't del addr [%pM], %d\n", addr, err);
return err;
}
@@ -728,7 +841,7 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
dma_addr_t notify_pa;
if (vdev->notify || vdev->notify_pa) {
- pr_err("notify block %p still allocated", vdev->notify);
+ vdev_neterr("notify block %p still allocated", vdev->notify);
return -EINVAL;
}
@@ -838,7 +951,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
memset(vdev->args, 0, sizeof(vdev->args));
if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT))
- err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait);
+ err = vdev->devcmd_rtn(vdev, CMD_INTR_COAL_CONVERT, wait);
else
err = ERR_ECMDUNKNOWN;
@@ -847,7 +960,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
*/
if ((err == ERR_ECMDUNKNOWN) ||
(!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
- pr_warn("Using default conversion factor for interrupt coalesce timer\n");
+ vdev_netwarn("Using default conversion factor for interrupt coalesce timer\n");
vnic_dev_intr_coal_timer_info_default(vdev);
return 0;
}
@@ -938,6 +1051,9 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
pci_free_consistent(vdev->pdev,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
+ if (vdev->devcmd2)
+ vnic_dev_deinit_devcmd2(vdev);
+
kfree(vdev);
}
}
@@ -959,10 +1075,6 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
if (vnic_dev_discover_res(vdev, bar, num_bars))
goto err_out;
- vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
- if (!vdev->devcmd)
- goto err_out;
-
return vdev;
err_out:
@@ -977,6 +1089,29 @@ struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
}
EXPORT_SYMBOL(vnic_dev_get_pdev);
+int vnic_devcmd_init(struct vnic_dev *vdev)
+{
+ void __iomem *res;
+ int err;
+
+ res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+ if (res) {
+ err = vnic_dev_init_devcmd2(vdev);
+ if (err)
+ vdev_warn("DEVCMD2 init failed: %d, Using DEVCMD1",
+ err);
+ else
+ return 0;
+ } else {
+ vdev_warn("DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n");
+ }
+ err = vnic_dev_init_devcmd1(vdev);
+ if (err)
+ vdev_err("DEVCMD1 initialization failed: %d", err);
+
+ return err;
+}
+
int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
{
u64 a0, a1 = len;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index 1fb214efceba..54156c484424 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -70,7 +70,48 @@ struct vnic_dev_ring {
unsigned int desc_avail;
};
-struct vnic_dev;
+enum vnic_proxy_type {
+ PROXY_NONE,
+ PROXY_BY_BDF,
+ PROXY_BY_INDEX,
+};
+
+struct vnic_res {
+ void __iomem *vaddr;
+ dma_addr_t bus_addr;
+ unsigned int count;
+};
+
+struct vnic_intr_coal_timer_info {
+ u32 mul;
+ u32 div;
+ u32 max_usec;
+};
+
+struct vnic_dev {
+ void *priv;
+ struct pci_dev *pdev;
+ struct vnic_res res[RES_TYPE_MAX];
+ enum vnic_dev_intr_mode intr_mode;
+ struct vnic_devcmd __iomem *devcmd;
+ struct vnic_devcmd_notify *notify;
+ struct vnic_devcmd_notify notify_copy;
+ dma_addr_t notify_pa;
+ u32 notify_sz;
+ dma_addr_t linkstatus_pa;
+ struct vnic_stats *stats;
+ dma_addr_t stats_pa;
+ struct vnic_devcmd_fw_info *fw_info;
+ dma_addr_t fw_info_pa;
+ enum vnic_proxy_type proxy;
+ u32 proxy_index;
+ u64 args[VNIC_DEVCMD_NARGS];
+ struct vnic_intr_coal_timer_info intr_coal_timer_info;
+ struct devcmd2_controller *devcmd2;
+ int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait);
+};
+
struct vnic_stats;
void *vnic_dev_priv(struct vnic_dev *vdev);
@@ -114,7 +155,9 @@ int vnic_dev_deinit(struct vnic_dev *vdev);
void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev);
int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev);
int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode);
enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
@@ -135,5 +178,6 @@ int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
struct filter *data);
+int vnic_devcmd_init(struct vnic_dev *vdev);
#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index 435d0cd96c22..2a812880b884 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -365,6 +365,12 @@ enum vnic_devcmd_cmd {
*/
CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
+ /* Initialization for the devcmd2 interface.
+ * in: (u64) a0 = host result buffer physical address
+ * in: (u16) a1 = number of entries in result buffer
+ */
+ CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57),
+
/* Add a filter.
* in: (u64) a0= filter address
* (u32) a1= size of filter
@@ -629,4 +635,26 @@ struct vnic_devcmd {
u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
};
+#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */
+
+#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS
+struct vnic_devcmd2 {
+ u16 pad;
+ u16 flags;
+ u32 cmd;
+ u64 args[VNIC_DEVCMD2_NARGS];
+};
+
+#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS
+struct devcmd2_result {
+ u64 results[VNIC_DEVCMD2_NRESULTS];
+ u32 pad;
+ u16 completed_index;
+ u8 error;
+ u8 color;
+};
+
+#define DEVCMD2_RING_SIZE 32
+#define DEVCMD2_DESC_SIZE 128
+
#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.c b/drivers/net/ethernet/cisco/enic/vnic_intr.c
index 0ca107f7bc8c..942759d9cb3c 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_intr.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_intr.c
@@ -25,6 +25,7 @@
#include "vnic_dev.h"
#include "vnic_intr.h"
+#include "enic.h"
void vnic_intr_free(struct vnic_intr *intr)
{
@@ -39,7 +40,7 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
if (!intr->ctrl) {
- pr_err("Failed to hook INTR[%d].ctrl resource\n", index);
+ vdev_err("Failed to hook INTR[%d].ctrl resource\n", index);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_resource.h b/drivers/net/ethernet/cisco/enic/vnic_resource.h
index e0a73f1ca6f4..4e45f88ac1d4 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_resource.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_resource.h
@@ -48,6 +48,13 @@ enum vnic_res_type {
RES_TYPE_RSVD7,
RES_TYPE_DEVCMD, /* Device command region */
RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
+ RES_TYPE_SUBVNIC, /* subvnic resource type */
+ RES_TYPE_MQ_WQ, /* MQ Work queues */
+ RES_TYPE_MQ_RQ, /* MQ Receive queues */
+ RES_TYPE_MQ_CQ, /* MQ Completion queues */
+ RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */
+ RES_TYPE_DEPRECATED2, /* Old version of devcmd 2 */
+ RES_TYPE_DEVCMD2, /* Device control region */
RES_TYPE_MAX, /* Count of resource types */
};
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index c4b2183bf352..cce2777dfc41 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -26,6 +26,7 @@
#include "vnic_dev.h"
#include "vnic_rq.h"
+#include "enic.h"
static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
{
@@ -91,7 +92,7 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
if (!rq->ctrl) {
- pr_err("Failed to hook RQ[%d] resource\n", index);
+ vdev_err("Failed to hook RQ[%d] resource\n", index);
return -EINVAL;
}
@@ -167,6 +168,7 @@ void vnic_rq_enable(struct vnic_rq *rq)
int vnic_rq_disable(struct vnic_rq *rq)
{
unsigned int wait;
+ struct vnic_dev *vdev = rq->vdev;
iowrite32(0, &rq->ctrl->enable);
@@ -177,7 +179,7 @@ int vnic_rq_disable(struct vnic_rq *rq)
udelay(10);
}
- pr_err("Failed to disable RQ[%d]\n", rq->index);
+ vdev_neterr("Failed to disable RQ[%d]\n", rq->index);
return -ETIMEDOUT;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index b5a1c937fad2..05ad16a7e872 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -26,6 +26,7 @@
#include "vnic_dev.h"
#include "vnic_wq.h"
+#include "enic.h"
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
@@ -94,7 +95,7 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
if (!wq->ctrl) {
- pr_err("Failed to hook WQ[%d] resource\n", index);
+ vdev_err("Failed to hook WQ[%d] resource\n", index);
return -EINVAL;
}
@@ -113,10 +114,27 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
return 0;
}
-static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
- unsigned int fetch_index, unsigned int posted_index,
- unsigned int error_interrupt_enable,
- unsigned int error_interrupt_offset)
+int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ wq->index = 0;
+ wq->vdev = vdev;
+
+ wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+ if (!wq->ctrl)
+ return -EINVAL;
+ vnic_wq_disable(wq);
+ err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+
+ return err;
+}
+
+void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset)
{
u64 paddr;
unsigned int count = wq->ring.desc_count;
@@ -140,7 +158,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
- vnic_wq_init_start(wq, cq_index, 0, 0,
+ enic_wq_init_start(wq, cq_index, 0, 0,
error_interrupt_enable,
error_interrupt_offset);
}
@@ -158,6 +176,7 @@ void vnic_wq_enable(struct vnic_wq *wq)
int vnic_wq_disable(struct vnic_wq *wq)
{
unsigned int wait;
+ struct vnic_dev *vdev = wq->vdev;
iowrite32(0, &wq->ctrl->enable);
@@ -168,7 +187,7 @@ int vnic_wq_disable(struct vnic_wq *wq)
udelay(10);
}
- pr_err("Failed to disable WQ[%d]\n", wq->index);
+ vdev_neterr("Failed to disable WQ[%d]\n", wq->index);
return -ETIMEDOUT;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 296154351823..01209613d57d 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -88,6 +88,18 @@ struct vnic_wq {
unsigned int pkts_outstanding;
};
+struct devcmd2_controller {
+ struct vnic_wq_ctrl __iomem *wq_ctrl;
+ struct vnic_devcmd2 *cmd_ring;
+ struct devcmd2_result *result;
+ u16 next_result;
+ u16 result_size;
+ int color;
+ struct vnic_dev_ring results_ring;
+ struct vnic_wq wq;
+ u32 posted;
+};
+
static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
{
/* how many does SW own? */
@@ -174,5 +186,11 @@ void vnic_wq_enable(struct vnic_wq *wq);
int vnic_wq_disable(struct vnic_wq *wq);
void vnic_wq_clean(struct vnic_wq *wq,
void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
+int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int desc_count, unsigned int desc_size);
+void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+ unsigned int fetch_index, unsigned int posted_index,
+ unsigned int error_interrupt_enable,
+ unsigned int error_interrupt_offset);
#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index c0a7813603c3..cf94b72dbacd 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1226,7 +1226,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
if (int_status & ISR_PRS)
dm9000_rx(dev);
- /* Trnasmit Interrupt check */
+ /* Transmit Interrupt check */
if (int_status & ISR_PTS)
dm9000_tx_done(dev, db);
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index a02ecc4f9002..cadcee645f74 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1597,7 +1597,6 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
- info->eedump_len = DE_EEPROM_SIZE;
}
static int de_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index d1017509b08a..f7b42483921c 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -604,19 +604,7 @@ static struct pci_driver pci_driver = {
.probe = ec_bhf_probe,
.remove = ec_bhf_remove,
};
-
-static int __init ec_bhf_init(void)
-{
- return pci_register_driver(&pci_driver);
-}
-
-static void __exit ec_bhf_exit(void)
-{
- pci_unregister_driver(&pci_driver);
-}
-
-module_init(ec_bhf_init);
-module_exit(ec_bhf_exit);
+module_pci_driver(pci_driver);
module_param(polling_frequency, long, S_IRUGO);
MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8d12b41b3b19..d463563e1f70 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -37,7 +37,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "10.6.0.2"
+#define DRV_VER "10.6.0.3"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -105,6 +105,8 @@
#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
#define FW_VER_LEN 32
+#define CNTL_SERIAL_NUM_WORDS 8 /* Controller serial number words */
+#define CNTL_SERIAL_NUM_WORD_SZ (sizeof(u16)) /* Byte-sz of serial num word */
#define RSS_INDIR_TABLE_LEN 128
#define RSS_HASH_KEY_LEN 40
@@ -228,6 +230,7 @@ struct be_mcc_obj {
struct be_tx_stats {
u64 tx_bytes;
u64 tx_pkts;
+ u64 tx_vxlan_offload_pkts;
u64 tx_reqs;
u64 tx_compl;
ulong tx_jiffies;
@@ -275,6 +278,7 @@ struct be_rx_page_info {
struct be_rx_stats {
u64 rx_bytes;
u64 rx_pkts;
+ u64 rx_vxlan_offload_pkts;
u32 rx_drops_no_skbs; /* skb allocation errors */
u32 rx_drops_no_frags; /* HW has no fetched frags */
u32 rx_post_fail; /* page post alloc failures */
@@ -578,6 +582,7 @@ struct be_adapter {
u16 pvid;
__be16 vxlan_port;
int vxlan_port_count;
+ int vxlan_port_aliases;
struct phy_info phy;
u8 wol_cap;
bool wol_en;
@@ -587,9 +592,11 @@ struct be_adapter {
int be_get_temp_freq;
struct be_hwmon hwmon_info;
u8 pf_number;
+ u8 pci_func_num;
struct rss_info rss_info;
/* Filters for packets that need to be sent to BMC */
u32 bmc_filt_mask;
+ u16 serial_num[CNTL_SERIAL_NUM_WORDS];
};
#define be_physfn(adapter) (!adapter->virtfn)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 9eac3227d2ca..1795c935ff02 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -88,19 +88,21 @@ static inline void *embedded_payload(struct be_mcc_wrb *wrb)
return wrb->payload.embedded_payload;
}
-static void be_mcc_notify(struct be_adapter *adapter)
+static int be_mcc_notify(struct be_adapter *adapter)
{
struct be_queue_info *mccq = &adapter->mcc_obj.q;
u32 val = 0;
if (be_check_error(adapter, BE_ERROR_ANY))
- return;
+ return -EIO;
val |= mccq->id & DB_MCCQ_RING_ID_MASK;
val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
wmb();
iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
+
+ return 0;
}
/* To check if valid bit is set, check the entire word as we don't know
@@ -170,6 +172,12 @@ static void be_async_cmd_process(struct be_adapter *adapter,
return;
}
+ if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE &&
+ subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
+ complete(&adapter->et_cmd_compl);
+ return;
+ }
+
if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
opcode == OPCODE_COMMON_WRITE_OBJECT) &&
subsystem == CMD_SUBSYSTEM_COMMON) {
@@ -541,7 +549,9 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto out;
status = be_mcc_wait_compl(adapter);
if (status == -EIO)
@@ -841,8 +851,10 @@ static int be_cmd_notify_wait(struct be_adapter *adapter,
return status;
dest_wrb = be_cmd_copy(adapter, wrb);
- if (!dest_wrb)
- return -EBUSY;
+ if (!dest_wrb) {
+ status = -EBUSY;
+ goto unlock;
+ }
if (use_mcc(adapter))
status = be_mcc_notify_wait(adapter);
@@ -852,6 +864,7 @@ static int be_cmd_notify_wait(struct be_adapter *adapter,
if (!status)
memcpy(wrb, dest_wrb, sizeof(*wrb));
+unlock:
be_cmd_unlock(adapter);
return status;
}
@@ -1547,7 +1560,10 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
else
hdr->version = 2;
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err;
+
adapter->stats_cmd_sent = true;
err:
@@ -1583,7 +1599,10 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
req->cmd_params.params.reset_stats = 0;
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err;
+
adapter->stats_cmd_sent = true;
err:
@@ -1687,8 +1706,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
sizeof(*req), wrb, NULL);
- be_mcc_notify(adapter);
-
+ status = be_mcc_notify(adapter);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -1860,7 +1878,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
cpu_to_le32(set_eqd[i].delay_multiplier);
}
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -1969,6 +1987,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
be_if_cap_flags(adapter));
}
flags &= be_if_cap_flags(adapter);
+ if (!flags)
+ return -ENOTSUPP;
return __be_cmd_rx_filter(adapter, flags, value);
}
@@ -2320,7 +2340,10 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
sizeof(struct lancer_cmd_req_write_object)));
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err_unlock;
+
spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
@@ -2491,7 +2514,10 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
req->params.op_code = cpu_to_le32(flash_opcode);
req->params.data_buf_size = cpu_to_le32(buf_size);
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err_unlock;
+
spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
@@ -2585,7 +2611,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
wrb = wrb_from_mccq(adapter);
if (!wrb) {
status = -EBUSY;
- goto err;
+ goto err_unlock;
}
req = embedded_payload(wrb);
@@ -2599,8 +2625,19 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
req->loopback_type = loopback_type;
req->loopback_state = enable;
- status = be_mcc_notify_wait(adapter);
-err:
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err_unlock;
+
+ spin_unlock_bh(&adapter->mcc_lock);
+
+ if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+ msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
+ status = -ETIMEDOUT;
+
+ return status;
+
+err_unlock:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2636,7 +2673,9 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
req->num_pkts = cpu_to_le32(num_pkts);
req->loopback_type = cpu_to_le32(loopback_type);
- be_mcc_notify(adapter);
+ status = be_mcc_notify(adapter);
+ if (status)
+ goto err;
spin_unlock_bh(&adapter->mcc_lock);
@@ -2818,10 +2857,11 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
struct be_mcc_wrb *wrb;
struct be_cmd_req_cntl_attribs *req;
struct be_cmd_resp_cntl_attribs *resp;
- int status;
+ int status, i;
int payload_len = max(sizeof(*req), sizeof(*resp));
struct mgmt_controller_attrib *attribs;
struct be_dma_mem attribs_cmd;
+ u32 *serial_num;
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -2852,6 +2892,11 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
if (!status) {
attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
adapter->hba_port_num = attribs->hba_attribs.phy_port;
+ adapter->pci_func_num = attribs->pci_func_num;
+ serial_num = attribs->hba_attribs.controller_serial_number;
+ for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
+ adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
+ (BIT_MASK(16) - 1);
}
err:
@@ -3670,7 +3715,6 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
status = -EINVAL;
goto err;
}
-
adapter->pf_number = desc->pf_num;
be_copy_nic_desc(res, desc);
}
@@ -3682,7 +3726,10 @@ err:
return status;
}
-/* Will use MBOX only if MCCQ has not been created */
+/* Will use MBOX only if MCCQ has not been created
+ * non-zero domain => a PF is querying this on behalf of a VF
+ * zero domain => a PF or a VF is querying this for itself
+ */
int be_cmd_get_profile_config(struct be_adapter *adapter,
struct be_resources *res, u8 query, u8 domain)
{
@@ -3709,10 +3756,15 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
OPCODE_COMMON_GET_PROFILE_CONFIG,
cmd.size, &wrb, &cmd);
- req->hdr.domain = domain;
if (!lancer_chip(adapter))
req->hdr.version = 1;
req->type = ACTIVE_PROFILE_TYPE;
+ /* When a function is querying profile information relating to
+ * itself hdr.pf_number must be set to it's pci_func_num + 1
+ */
+ req->hdr.domain = domain;
+ if (domain == 0)
+ req->hdr.pf_num = adapter->pci_func_num + 1;
/* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
* descriptors with all bits set to "1" for the fields which can be
@@ -3882,12 +3934,16 @@ static void be_fill_vf_res_template(struct be_adapter *adapter,
vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
BE_IF_FLAGS_DEFQ_RSS);
}
-
- nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
} else {
num_vf_qs = 1;
}
+ if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
+ nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+ vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+ }
+
+ nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
nic_vft->rq_count = cpu_to_le16(num_vf_qs);
nic_vft->txq_count = cpu_to_le16(num_vf_qs);
nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 2716e6f30d9a..91155ea74f34 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -289,7 +289,9 @@ struct be_cmd_req_hdr {
u32 timeout; /* dword 1 */
u32 request_length; /* dword 2 */
u8 version; /* dword 3 */
- u8 rsvd[3]; /* dword 3 */
+ u8 rsvd1; /* dword 3 */
+ u8 pf_num; /* dword 3 */
+ u8 rsvd2; /* dword 3 */
};
#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
@@ -620,6 +622,11 @@ enum be_if_flags {
BE_IF_FLAGS_VLAN_PROMISCUOUS |\
BE_IF_FLAGS_MCAST_PROMISCUOUS)
+#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
+ BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
+
+#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
+
/* An RX interface is an object with one or more MAC addresses and
* filtering capabilities. */
struct be_cmd_req_if_create {
@@ -1495,6 +1502,8 @@ struct be_cmd_resp_acpi_wol_magic_config_v1 {
#define BE_PME_D3COLD_CAP 0x80
/********************** LoopBack test *********************/
+#define SET_LB_MODE_TIMEOUT 12000
+
struct be_cmd_req_loopback_test {
struct be_cmd_req_hdr hdr;
u32 loopback_type;
@@ -1635,15 +1644,21 @@ struct be_cmd_req_set_qos {
struct mgmt_hba_attribs {
u32 rsvd0[24];
u8 controller_model_number[32];
- u32 rsvd1[79];
- u8 rsvd2[3];
+ u32 rsvd1[16];
+ u32 controller_serial_number[8];
+ u32 rsvd2[55];
+ u8 rsvd3[3];
u8 phy_port;
- u32 rsvd3[13];
+ u32 rsvd4[13];
} __packed;
struct mgmt_controller_attrib {
struct mgmt_hba_attribs hba_attribs;
- u32 rsvd0[10];
+ u32 rsvd0[2];
+ u16 rsvd1;
+ u8 pci_func_num;
+ u8 rsvd2;
+ u32 rsvd3[7];
} __packed;
struct be_cmd_req_cntl_attribs {
@@ -1758,6 +1773,7 @@ struct be_cmd_req_set_mac_list {
/*********************** HSW Config ***********************/
#define PORT_FWD_TYPE_VEPA 0x3
#define PORT_FWD_TYPE_VEB 0x2
+#define PORT_FWD_TYPE_PASSTHRU 0x1
#define ENABLE_MAC_SPOOFCHK 0x2
#define DISABLE_MAC_SPOOFCHK 0x3
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index b2476dbfd103..f4cb8e425853 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -138,6 +138,7 @@ static const struct be_ethtool_stat et_stats[] = {
static const struct be_ethtool_stat et_rx_stats[] = {
{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
+ {DRVSTAT_RX_INFO(rx_vxlan_offload_pkts)},
{DRVSTAT_RX_INFO(rx_compl)},
{DRVSTAT_RX_INFO(rx_compl_err)},
{DRVSTAT_RX_INFO(rx_mcast_pkts)},
@@ -190,6 +191,7 @@ static const struct be_ethtool_stat et_tx_stats[] = {
{DRVSTAT_TX_INFO(tx_internal_parity_err)},
{DRVSTAT_TX_INFO(tx_bytes)},
{DRVSTAT_TX_INFO(tx_pkts)},
+ {DRVSTAT_TX_INFO(tx_vxlan_offload_pkts)},
/* Number of skbs queued for trasmission by the driver */
{DRVSTAT_TX_INFO(tx_reqs)},
/* Number of times the TX queue was stopped due to lack
@@ -232,9 +234,6 @@ static void be_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->testinfo_len = 0;
- drvinfo->regdump_len = 0;
- drvinfo->eedump_len = 0;
}
static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
@@ -847,10 +846,21 @@ err:
static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
u64 *status)
{
- be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);
+ int ret;
+
+ ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
+ loopback_type, 1);
+ if (ret)
+ return ret;
+
*status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
loopback_type, 1500, 2, 0xabc);
- be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);
+
+ ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
+ BE_NO_LOOPBACK, 1);
+ if (ret)
+ return ret;
+
return *status;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6f642426308c..eb48a977f8da 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -273,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
return 0;
+ /* if device is not running, copy MAC to netdev->dev_addr */
+ if (!netif_running(netdev))
+ goto done;
+
/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
* privilege or if PF did not provision the new MAC address.
* On BE3, this cmd will always fail if the VF doesn't have the
@@ -307,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
status = -EPERM;
goto err;
}
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- dev_info(dev, "MAC address changed to %pM\n", mac);
+done:
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
return 0;
err:
dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
@@ -677,11 +681,14 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
{
struct be_tx_stats *stats = tx_stats(txo);
+ u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
u64_stats_update_begin(&stats->sync);
stats->tx_reqs++;
stats->tx_bytes += skb->len;
- stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
+ stats->tx_pkts += tx_pkts;
+ if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
+ stats->tx_vxlan_offload_pkts += tx_pkts;
u64_stats_update_end(&stats->sync);
}
@@ -1116,11 +1123,12 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
struct sk_buff *skb,
struct be_wrb_params *wrb_params)
{
- /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
- * less may cause a transmit stall on that port. So the work-around is
- * to pad short packets (<= 32 bytes) to a 36-byte length.
+ /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
+ * packets that are 32b or less may cause a transmit stall
+ * on that port. The workaround is to pad such packets
+ * (len <= 32 bytes) to a minimum length of 36b.
*/
- if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
+ if (skb->len <= 32) {
if (skb_put_padto(skb, 36))
return NULL;
}
@@ -1254,7 +1262,7 @@ static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
if (is_udp_pkt((*skb))) {
struct udphdr *udp = udp_hdr((*skb));
- switch (udp->dest) {
+ switch (ntohs(udp->dest)) {
case DHCP_CLIENT_PORT:
os2bmc = is_dhcp_client_filt_enabled(adapter);
goto done;
@@ -1957,6 +1965,8 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
stats->rx_compl++;
stats->rx_bytes += rxcp->pkt_size;
stats->rx_pkts++;
+ if (rxcp->tunneled)
+ stats->rx_vxlan_offload_pkts++;
if (rxcp->pkt_type == BE_MULTICAST_PACKET)
stats->rx_mcast_pkts++;
if (rxcp->err)
@@ -2447,10 +2457,24 @@ static void be_eq_clean(struct be_eq_obj *eqo)
be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
}
-static void be_rx_cq_clean(struct be_rx_obj *rxo)
+/* Free posted rx buffers that were not used */
+static void be_rxq_clean(struct be_rx_obj *rxo)
{
- struct be_rx_page_info *page_info;
struct be_queue_info *rxq = &rxo->q;
+ struct be_rx_page_info *page_info;
+
+ while (atomic_read(&rxq->used) > 0) {
+ page_info = get_rx_page_info(rxo);
+ put_page(page_info->page);
+ memset(page_info, 0, sizeof(*page_info));
+ }
+ BUG_ON(atomic_read(&rxq->used));
+ rxq->tail = 0;
+ rxq->head = 0;
+}
+
+static void be_rx_cq_clean(struct be_rx_obj *rxo)
+{
struct be_queue_info *rx_cq = &rxo->cq;
struct be_rx_compl_info *rxcp;
struct be_adapter *adapter = rxo->adapter;
@@ -2487,16 +2511,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
/* After cleanup, leave the CQ in unarmed state */
be_cq_notify(adapter, rx_cq->id, false, 0);
-
- /* Then free posted rx buffers that were not used */
- while (atomic_read(&rxq->used) > 0) {
- page_info = get_rx_page_info(rxo);
- put_page(page_info->page);
- memset(page_info, 0, sizeof(*page_info));
- }
- BUG_ON(atomic_read(&rxq->used));
- rxq->tail = 0;
- rxq->head = 0;
}
static void be_tx_compl_clean(struct be_adapter *adapter)
@@ -2576,8 +2590,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
napi_hash_del(&eqo->napi);
netif_napi_del(&eqo->napi);
+ free_cpumask_var(eqo->affinity_mask);
}
- free_cpumask_var(eqo->affinity_mask);
be_queue_free(adapter, &eqo->q);
}
}
@@ -2594,13 +2608,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
for_all_evt_queues(adapter, eqo, i) {
int numa_node = dev_to_node(&adapter->pdev->dev);
- if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
- return -ENOMEM;
- cpumask_set_cpu(cpumask_local_spread(i, numa_node),
- eqo->affinity_mask);
- netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
- BE_NAPI_WEIGHT);
- napi_hash_add(&eqo->napi);
+
aic = &adapter->aic_obj[i];
eqo->adapter = adapter;
eqo->idx = i;
@@ -2616,6 +2624,14 @@ static int be_evt_queues_create(struct be_adapter *adapter)
rc = be_cmd_eq_create(adapter, eqo);
if (rc)
return rc;
+
+ if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ eqo->affinity_mask);
+ netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
+ BE_NAPI_WEIGHT);
+ napi_hash_add(&eqo->napi);
}
return 0;
}
@@ -3354,13 +3370,54 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
for_all_rx_queues(adapter, rxo, i) {
q = &rxo->q;
if (q->created) {
+ /* If RXQs are destroyed while in an "out of buffer"
+ * state, there is a possibility of an HW stall on
+ * Lancer. So, post 64 buffers to each queue to relieve
+ * the "out of buffer" condition.
+ * Make sure there's space in the RXQ before posting.
+ */
+ if (lancer_chip(adapter)) {
+ be_rx_cq_clean(rxo);
+ if (atomic_read(&q->used) == 0)
+ be_post_rx_frags(rxo, GFP_KERNEL,
+ MAX_RX_POST);
+ }
+
be_cmd_rxq_destroy(adapter, q);
be_rx_cq_clean(rxo);
+ be_rxq_clean(rxo);
}
be_queue_free(adapter, q);
}
}
+static void be_disable_if_filters(struct be_adapter *adapter)
+{
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id[0], 0);
+
+ be_clear_uc_list(adapter);
+
+ /* The IFACE flags are enabled in the open path and cleared
+ * in the close path. When a VF gets detached from the host and
+ * assigned to a VM the following happens:
+ * - VF's IFACE flags get cleared in the detach path
+ * - IFACE create is issued by the VF in the attach path
+ * Due to a bug in the BE3/Skyhawk-R FW
+ * (Lancer FW doesn't have the bug), the IFACE capability flags
+ * specified along with the IFACE create cmd issued by a VF are not
+ * honoured by FW. As a consequence, if a *new* driver
+ * (that enables/disables IFACE flags in open/close)
+ * is loaded in the host and an *old* driver is * used by a VM/VF,
+ * the IFACE gets created *without* the needed flags.
+ * To avoid this, disable RX-filter flags only for Lancer.
+ */
+ if (lancer_chip(adapter)) {
+ be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
+ adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
+ }
+}
+
static int be_close(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -3373,6 +3430,8 @@ static int be_close(struct net_device *netdev)
if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
return 0;
+ be_disable_if_filters(adapter);
+
be_roce_dev_close(adapter);
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3392,7 +3451,6 @@ static int be_close(struct net_device *netdev)
be_tx_compl_clean(adapter);
be_rx_qs_destroy(adapter);
- be_clear_uc_list(adapter);
for_all_evt_queues(adapter, eqo, i) {
if (msix_enabled(adapter))
@@ -3477,6 +3535,31 @@ static int be_rx_qs_create(struct be_adapter *adapter)
return 0;
}
+static int be_enable_if_filters(struct be_adapter *adapter)
+{
+ int status;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
+ if (status)
+ return status;
+
+ /* For BE3 VFs, the PF programs the initial MAC address */
+ if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
+ status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
+ adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+ if (status)
+ return status;
+ }
+
+ if (adapter->vlans_added)
+ be_vid_config(adapter);
+
+ be_set_rx_mode(adapter->netdev);
+
+ return 0;
+}
+
static int be_open(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -3490,6 +3573,10 @@ static int be_open(struct net_device *netdev)
if (status)
goto err;
+ status = be_enable_if_filters(adapter);
+ if (status)
+ goto err;
+
status = be_irq_register(adapter);
if (status)
goto err;
@@ -3529,15 +3616,15 @@ err:
static int be_setup_wol(struct be_adapter *adapter, bool enable)
{
+ struct device *dev = &adapter->pdev->dev;
struct be_dma_mem cmd;
- int status = 0;
u8 mac[ETH_ALEN];
+ int status;
eth_zero_addr(mac);
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
- cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_KERNEL);
+ cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
if (!cmd.va)
return -ENOMEM;
@@ -3546,24 +3633,18 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
PCICFG_PM_CONTROL_OFFSET,
PCICFG_PM_CONTROL_MASK);
if (status) {
- dev_err(&adapter->pdev->dev,
- "Could not enable Wake-on-lan\n");
- dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
- cmd.dma);
- return status;
+ dev_err(dev, "Could not enable Wake-on-lan\n");
+ goto err;
}
- status = be_cmd_enable_magic_wol(adapter,
- adapter->netdev->dev_addr,
- &cmd);
- pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
- pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
} else {
- status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
- pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
- pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
+ ether_addr_copy(mac, adapter->netdev->dev_addr);
}
- dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+ status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
+ pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
+ pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
+err:
+ dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -3686,16 +3767,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
}
}
-static void be_mac_clear(struct be_adapter *adapter)
-{
- if (adapter->pmac_id) {
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[0], 0);
- kfree(adapter->pmac_id);
- adapter->pmac_id = NULL;
- }
-}
-
#ifdef CONFIG_BE2NET_VXLAN
static void be_disable_vxlan_offloads(struct be_adapter *adapter)
{
@@ -3770,8 +3841,8 @@ static int be_clear(struct be_adapter *adapter)
#ifdef CONFIG_BE2NET_VXLAN
be_disable_vxlan_offloads(adapter);
#endif
- /* delete the primary mac along with the uc-mac list */
- be_mac_clear(adapter);
+ kfree(adapter->pmac_id);
+ adapter->pmac_id = NULL;
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
@@ -3782,25 +3853,11 @@ static int be_clear(struct be_adapter *adapter)
return 0;
}
-static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
- u32 cap_flags, u32 vf)
-{
- u32 en_flags;
-
- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
- BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
-
- en_flags &= cap_flags;
-
- return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
-}
-
static int be_vfs_if_create(struct be_adapter *adapter)
{
struct be_resources res = {0};
+ u32 cap_flags, en_flags, vf;
struct be_vf_cfg *vf_cfg;
- u32 cap_flags, vf;
int status;
/* If a FW profile exists, then cap_flags are updated */
@@ -3821,8 +3878,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
}
}
- status = be_if_create(adapter, &vf_cfg->if_handle,
- cap_flags, vf + 1);
+ en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST |
+ BE_IF_FLAGS_PASS_L3L4_ERRORS);
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
if (status)
return status;
}
@@ -4145,10 +4206,6 @@ static int be_get_config(struct be_adapter *adapter)
int status, level;
u16 profile_id;
- status = be_cmd_get_cntl_attributes(adapter);
- if (status)
- return status;
-
status = be_cmd_query_fw_cfg(adapter);
if (status)
return status;
@@ -4194,15 +4251,8 @@ static int be_mac_setup(struct be_adapter *adapter)
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- } else {
- /* Maybe the HW was reset; dev_addr must be re-programmed */
- memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
}
- /* For BE3-R VFs, the PF programs the initial MAC address */
- if (!(BEx_chip(adapter) && be_virtfn(adapter)))
- be_cmd_pmac_add(adapter, mac, adapter->if_handle,
- &adapter->pmac_id[0], 0);
return 0;
}
@@ -4342,6 +4392,7 @@ static int be_func_init(struct be_adapter *adapter)
static int be_setup(struct be_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
+ u32 en_flags;
int status;
status = be_func_init(adapter);
@@ -4353,6 +4404,11 @@ static int be_setup(struct be_adapter *adapter)
if (!lancer_chip(adapter))
be_cmd_req_native_mode(adapter);
+ /* Need to invoke this cmd first to get the PCI Function Number */
+ status = be_cmd_get_cntl_attributes(adapter);
+ if (status)
+ return status;
+
if (!BE2_chip(adapter) && be_physfn(adapter))
be_alloc_sriov_res(adapter);
@@ -4364,8 +4420,11 @@ static int be_setup(struct be_adapter *adapter)
if (status)
goto err;
- status = be_if_create(adapter, &adapter->if_handle,
- be_if_cap_flags(adapter), 0);
+ /* will enable all the needed filter flags in be_open() */
+ en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
+ en_flags = en_flags & be_if_cap_flags(adapter);
+ status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
+ &adapter->if_handle, 0);
if (status)
goto err;
@@ -4391,11 +4450,6 @@ static int be_setup(struct be_adapter *adapter)
dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
}
- if (adapter->vlans_added)
- be_vid_config(adapter);
-
- be_set_rx_mode(adapter->netdev);
-
status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
if (status)
@@ -4924,7 +4978,7 @@ static bool be_check_ufi_compatibility(struct be_adapter *adapter,
{
if (!fhdr) {
dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
- return -1;
+ return false;
}
/* First letter of the build version is used to identify
@@ -4947,7 +5001,15 @@ static bool be_check_ufi_compatibility(struct be_adapter *adapter,
return false;
}
- return (fhdr->asic_type_rev >= adapter->asic_rev);
+ /* In BE3 FW images the "asic_type_rev" field doesn't track the
+ * asic_rev of the chips it is compatible with.
+ * When asic_type_rev is 0 the image is compatible only with
+ * pre-BE3-R chips (asic_rev < 0x10)
+ */
+ if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
+ return adapter->asic_rev < 0x10;
+ else
+ return (fhdr->asic_type_rev >= adapter->asic_rev);
}
static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
@@ -5079,9 +5141,6 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
int status = 0;
u8 hsw_mode;
- if (!sriov_enabled(adapter))
- return 0;
-
/* BE and Lancer chips support VEB mode only */
if (BEx_chip(adapter) || lancer_chip(adapter)) {
hsw_mode = PORT_FWD_TYPE_VEB;
@@ -5091,6 +5150,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
NULL);
if (status)
return 0;
+
+ if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
+ return 0;
}
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
@@ -5121,9 +5183,14 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
struct device *dev = &adapter->pdev->dev;
int status;
- if (lancer_chip(adapter) || BEx_chip(adapter))
+ if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
+ if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
+ adapter->vxlan_port_aliases++;
+ return;
+ }
+
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
dev_info(dev,
"Only one UDP port supported for VxLAN offloads\n");
@@ -5168,12 +5235,17 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (lancer_chip(adapter) || BEx_chip(adapter))
+ if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
if (adapter->vxlan_port != port)
goto done;
+ if (adapter->vxlan_port_aliases) {
+ adapter->vxlan_port_aliases--;
+ return;
+ }
+
be_disable_vxlan_offloads(adapter);
dev_info(&adapter->pdev->dev,
@@ -5225,6 +5297,27 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
}
#endif
+static int be_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
+ struct be_adapter *adapter = netdev_priv(dev);
+ u8 *id;
+
+ if (MAX_PHYS_ITEM_ID_LEN < id_len)
+ return -ENOSPC;
+
+ ppid->id[0] = adapter->hba_port_num + 1;
+ id = &ppid->id[1];
+ for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
+ i--, id += CNTL_SERIAL_NUM_WORD_SZ)
+ memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
+
+ ppid->id_len = id_len;
+
+ return 0;
+}
+
static const struct net_device_ops be_netdev_ops = {
.ndo_open = be_open,
.ndo_stop = be_close,
@@ -5255,6 +5348,7 @@ static const struct net_device_ops be_netdev_ops = {
.ndo_del_vxlan_port = be_del_vxlan_port,
.ndo_features_check = be_features_check,
#endif
+ .ndo_get_phys_port_id = be_get_phys_port_id,
};
static void be_netdev_init(struct net_device *netdev)
@@ -5813,7 +5907,6 @@ static int be_pci_resume(struct pci_dev *pdev)
if (status)
return status;
- pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
status = be_resume(adapter);
@@ -5893,7 +5986,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
pci_set_master(pdev);
- pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/* Check if card is ok and fw is ready */
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 442410cd2ca4..ff665493ca97 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -201,6 +201,7 @@ struct ethoc {
void __iomem *membase;
int dma_alloc;
resource_size_t io_region_size;
+ bool big_endian;
unsigned int num_bd;
unsigned int num_tx;
@@ -236,12 +237,18 @@ struct ethoc_bd {
static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
{
- return ioread32(dev->iobase + offset);
+ if (dev->big_endian)
+ return ioread32be(dev->iobase + offset);
+ else
+ return ioread32(dev->iobase + offset);
}
static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
{
- iowrite32(data, dev->iobase + offset);
+ if (dev->big_endian)
+ iowrite32be(data, dev->iobase + offset);
+ else
+ iowrite32(data, dev->iobase + offset);
}
static inline void ethoc_read_bd(struct ethoc *dev, int index,
@@ -1106,6 +1113,9 @@ static int ethoc_probe(struct platform_device *pdev)
priv->dma_alloc = buffer_size;
}
+ priv->big_endian = pdata ? pdata->big_endian :
+ of_device_is_big_endian(pdev->dev.of_node);
+
/* calculate the number of TX/RX buffers, maximum 128 supported */
num_bd = min_t(unsigned int,
128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
@@ -1132,10 +1142,6 @@ static int ethoc_probe(struct platform_device *pdev)
memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
priv->phy_id = pdata->phy_id;
} else {
- priv->phy_id = -1;
-
-#ifdef CONFIG_OF
- {
const uint8_t *mac;
mac = of_get_property(pdev->dev.of_node,
@@ -1143,8 +1149,7 @@ static int ethoc_probe(struct platform_device *pdev)
NULL);
if (mac)
memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
- }
-#endif
+ priv->phy_id = -1;
}
/* Check that the given MAC address is valid. If it isn't, read the
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 24a85b292007..63c2bcf8031a 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -150,6 +150,9 @@ static void nps_enet_tx_handler(struct net_device *ndev)
if (!priv->tx_packet_sent || tx_ctrl.ct)
return;
+ /* Ack Tx ctrl register */
+ nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0);
+
/* Check Tx transmit error */
if (unlikely(tx_ctrl.et)) {
ndev->stats.tx_errors++;
@@ -158,11 +161,7 @@ static void nps_enet_tx_handler(struct net_device *ndev)
ndev->stats.tx_bytes += tx_ctrl.nt;
}
- if (priv->tx_skb) {
- dev_kfree_skb(priv->tx_skb);
- priv->tx_skb = NULL;
- }
-
+ dev_kfree_skb(priv->tx_skb);
priv->tx_packet_sent = false;
if (netif_queue_stopped(ndev))
@@ -180,15 +179,16 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
{
struct net_device *ndev = napi->dev;
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_buf_int_enable buf_int_enable;
u32 work_done;
- buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
- buf_int_enable.tx_done = NPS_ENET_ENABLE;
nps_enet_tx_handler(ndev);
work_done = nps_enet_rx_handler(ndev);
if (work_done < budget) {
+ struct nps_enet_buf_int_enable buf_int_enable;
+
napi_complete(napi);
+ buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
+ buf_int_enable.tx_done = NPS_ENET_ENABLE;
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
buf_int_enable.value);
}
@@ -211,12 +211,13 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
{
struct net_device *ndev = dev_instance;
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_buf_int_cause buf_int_cause;
+ struct nps_enet_rx_ctl rx_ctrl;
+ struct nps_enet_tx_ctl tx_ctrl;
- buf_int_cause.value =
- nps_enet_reg_get(priv, NPS_ENET_REG_BUF_INT_CAUSE);
+ rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
+ tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
- if (buf_int_cause.tx_done || buf_int_cause.rx_rdy)
+ if ((!tx_ctrl.ct && priv->tx_packet_sent) || rx_ctrl.cr)
if (likely(napi_schedule_prep(&priv->napi))) {
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
__napi_schedule(&priv->napi);
@@ -307,11 +308,8 @@ static void nps_enet_hw_enable_control(struct net_device *ndev)
/* Discard Packets bigger than max frame length */
max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN;
- if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) {
+ if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH)
ge_mac_cfg_3->max_len = max_frame_length;
- nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
- ge_mac_cfg_3->value);
- }
/* Enable interrupts */
buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
@@ -339,11 +337,14 @@ static void nps_enet_hw_enable_control(struct net_device *ndev)
ge_mac_cfg_0.tx_fc_en = NPS_ENET_ENABLE;
ge_mac_cfg_0.rx_fc_en = NPS_ENET_ENABLE;
ge_mac_cfg_0.tx_fc_retr = NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR;
+ ge_mac_cfg_3->cf_drop = NPS_ENET_ENABLE;
/* Enable Rx and Tx */
ge_mac_cfg_0.rx_en = NPS_ENET_ENABLE;
ge_mac_cfg_0.tx_en = NPS_ENET_ENABLE;
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
+ ge_mac_cfg_3->value);
nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0,
ge_mac_cfg_0.value);
}
@@ -527,10 +528,10 @@ static netdev_tx_t nps_enet_start_xmit(struct sk_buff *skb,
/* This driver handles one frame at a time */
netif_stop_queue(ndev);
- nps_enet_send_frame(ndev, skb);
-
priv->tx_skb = skb;
+ nps_enet_send_frame(ndev, skb);
+
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h
index fc45c9daa1c2..6703674d679c 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.h
+++ b/drivers/net/ethernet/ezchip/nps_enet.h
@@ -36,7 +36,6 @@
#define NPS_ENET_REG_RX_CTL 0x810
#define NPS_ENET_REG_RX_BUF 0x818
#define NPS_ENET_REG_BUF_INT_ENABLE 0x8C0
-#define NPS_ENET_REG_BUF_INT_CAUSE 0x8C4
#define NPS_ENET_REG_GE_MAC_CFG_0 0x1000
#define NPS_ENET_REG_GE_MAC_CFG_1 0x1004
#define NPS_ENET_REG_GE_MAC_CFG_2 0x1008
@@ -108,25 +107,6 @@ struct nps_enet_buf_int_enable {
};
};
-/* Interrupt cause for data buffer events register */
-struct nps_enet_buf_int_cause {
- union {
- /* tx_done: Interrupt in the case when current frame was
- * read from TX buffer.
- * rx_rdy: Interrupt in the case when new frame is ready
- * in RX buffer.
- */
- struct {
- u32
- __reserved:30,
- tx_done:1,
- rx_rdy:1;
- };
-
- u32 value;
- };
-};
-
/* Gbps Eth MAC Configuration 0 register */
struct nps_enet_ge_mac_cfg_0 {
union {
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 32e3807c650e..b2a32209ffbf 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -364,7 +364,7 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
return 0;
}
-static int
+static struct bufdesc *
fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
struct sk_buff *skb,
struct net_device *ndev)
@@ -439,10 +439,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
bdp->cbd_sc = status;
}
- txq->cur_tx = bdp;
-
- return 0;
-
+ return bdp;
dma_mapping_error:
bdp = txq->cur_tx;
for (i = 0; i < frag; i++) {
@@ -450,7 +447,7 @@ dma_mapping_error:
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
bdp->cbd_datlen, DMA_TO_DEVICE);
}
- return NETDEV_TX_OK;
+ return ERR_PTR(-ENOMEM);
}
static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
@@ -467,7 +464,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
unsigned int estatus = 0;
unsigned int index;
int entries_free;
- int ret;
entries_free = fec_enet_get_free_txdesc_num(fep, txq);
if (entries_free < MAX_SKB_FRAGS + 1) {
@@ -485,6 +481,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
/* Fill in a Tx ring entry */
bdp = txq->cur_tx;
+ last_bdp = bdp;
status = bdp->cbd_sc;
status &= ~BD_ENET_TX_STATS;
@@ -513,9 +510,9 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
}
if (nr_frags) {
- ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
- if (ret)
- return ret;
+ last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
+ if (IS_ERR(last_bdp))
+ return NETDEV_TX_OK;
} else {
status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
if (fep->bufdesc_ex) {
@@ -544,7 +541,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
ebdp->cbd_esc = estatus;
}
- last_bdp = txq->cur_tx;
index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
/* Save skb pointer */
txq->tx_skbuff[index] = skb;
@@ -563,6 +559,10 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
skb_tx_timestamp(skb);
+ /* Make sure the update to bdp and tx_skbuff are performed before
+ * cur_tx.
+ */
+ wmb();
txq->cur_tx = bdp;
/* Trigger transmission start */
@@ -1218,10 +1218,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
/* get next bdp of dirty_tx */
bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
- while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
-
- /* current queue is empty */
- if (bdp == txq->cur_tx)
+ while (bdp != READ_ONCE(txq->cur_tx)) {
+ /* Order the load of cur_tx and cbd_sc */
+ rmb();
+ status = READ_ONCE(bdp->cbd_sc);
+ if (status & BD_ENET_TX_READY)
break;
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
@@ -1275,6 +1276,10 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
+ /* Make sure the update to bdp and tx_skbuff are performed
+ * before dirty_tx
+ */
+ wmb();
txq->dirty_tx = bdp;
/* Update pointer to next buffer descriptor to be transmitted */
@@ -1402,6 +1407,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
if ((status & BD_ENET_RX_LAST) == 0)
netdev_err(ndev, "rcv is not +last\n");
+ writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
/* Check for errors. */
if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
@@ -1774,11 +1780,11 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
int ret = 0;
ret = pm_runtime_get_sync(dev);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
fep->mii_timeout = 0;
- init_completion(&fep->mdio_done);
+ reinit_completion(&fep->mdio_done);
/* start a read op */
writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
@@ -1810,14 +1816,16 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
unsigned long time_left;
- int ret = 0;
+ int ret;
ret = pm_runtime_get_sync(dev);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
+ else
+ ret = 0;
fep->mii_timeout = 0;
- init_completion(&fep->mdio_done);
+ reinit_completion(&fep->mdio_done);
/* start a write op */
writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
@@ -2865,7 +2873,7 @@ fec_enet_open(struct net_device *ndev)
int ret;
ret = pm_runtime_get_sync(&fep->pdev->dev);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
pinctrl_pm_select_default_state(&fep->pdev->dev);
@@ -3023,6 +3031,14 @@ fec_set_mac_address(struct net_device *ndev, void *p)
memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
}
+ /* Add netif status check here to avoid system hang in below case:
+ * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
+ * After ethx down, fec all clocks are gated off and then register
+ * access causes system hang.
+ */
+ if (!netif_running(ndev))
+ return 0;
+
writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
(ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
fep->hwp + FEC_ADDR_LOW);
@@ -3054,7 +3070,6 @@ static void fec_poll_controller(struct net_device *dev)
}
#endif
-#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
static inline void fec_enet_set_netdev_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -3078,7 +3093,7 @@ static int fec_set_features(struct net_device *netdev,
struct fec_enet_private *fep = netdev_priv(netdev);
netdev_features_t changed = features ^ netdev->features;
- if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
+ if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
napi_disable(&fep->napi);
netif_tx_lock_bh(netdev);
fec_stop(netdev);
@@ -3246,7 +3261,7 @@ static void fec_reset_phy(struct platform_device *pdev)
return;
}
msleep(msec);
- gpio_set_value(phy_reset, 1);
+ gpio_set_value_cansleep(phy_reset, 1);
}
#else /* CONFIG_OF */
static void fec_reset_phy(struct platform_device *pdev)
@@ -3433,6 +3448,7 @@ fec_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index f457a23d0bfb..f9e74461bdc0 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -112,9 +112,8 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
unsigned long flags;
u32 val, tempval;
int inc;
- struct timespec ts;
+ struct timespec64 ts;
u64 ns;
- u32 remainder;
val = 0;
if (!(fep->hwts_tx_en || fep->hwts_rx_en)) {
@@ -163,8 +162,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
tempval = readl(fep->hwp + FEC_ATIME);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
- ts.tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
- ts.tv_nsec = remainder;
+ ts = ns_to_timespec64(ns);
/* The tempval is less than 3 seconds, and so val is less than
* 4 seconds. No overflow for 32bit calculation.
@@ -506,12 +504,6 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
break;
default:
- /*
- * register RXMTRL must be set in order to do V1 packets,
- * therefore it is not possible to time stamp both V1 Sync and
- * Delay_Req messages and hardware does not support
- * timestamping all packets => return error
- */
fep->hwts_rx_en = 1;
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 56316db6c5a6..cf8e54652df9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -586,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
frag = skb_shinfo(skb)->frags;
while (nr_frags) {
CBDC_SC(bdp,
- BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
+ BD_ENET_TX_TC);
CBDS_SC(bdp, BD_ENET_TX_READY);
if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index b34214e2df5f..016743e355de 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
}
#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
-#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB)
+#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF)
#define FEC_RX_EVENT (FEC_ENET_RXF)
#define FEC_TX_EVENT (FEC_ENET_TXF)
#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 3c40f6b99224..55c36230e176 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -198,11 +198,13 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
/*
+ * Return the TBIPA address, starting from the address
+ * of the mapped GFAR MDIO registers (struct gfar)
* This is mildly evil, but so is our hardware for doing this.
* Also, we have to cast back to struct gfar because of
* definition weirdness done in gianfar.h.
*/
-static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
+static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p)
{
struct gfar __iomem *enet_regs = p;
@@ -210,6 +212,15 @@ static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
}
/*
+ * Return the TBIPA address, starting from the address
+ * of the mapped GFAR MII registers (gfar_mii_regs[] within struct gfar)
+ */
+static uint32_t __iomem *get_gfar_tbipa_from_mii(void __iomem *p)
+{
+ return get_gfar_tbipa_from_mdio(container_of(p, struct gfar, gfar_mii_regs));
+}
+
+/*
* Return the TBIPAR address for an eTSEC2 node
*/
static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
@@ -220,11 +231,12 @@ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
/*
- * Return the TBIPAR address for a QE MDIO node
+ * Return the TBIPAR address for a QE MDIO node, starting from the address
+ * of the mapped MII registers (struct fsl_pq_mii)
*/
static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
{
- struct fsl_pq_mdio __iomem *mdio = p;
+ struct fsl_pq_mdio __iomem *mdio = container_of(p, struct fsl_pq_mdio, mii);
return &mdio->utbipar;
}
@@ -300,14 +312,14 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
.compatible = "fsl,gianfar-tbi",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = 0,
- .get_tbipa = get_gfar_tbipa,
+ .get_tbipa = get_gfar_tbipa_from_mii,
},
},
{
.compatible = "fsl,gianfar-mdio",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = 0,
- .get_tbipa = get_gfar_tbipa,
+ .get_tbipa = get_gfar_tbipa_from_mii,
},
},
{
@@ -315,7 +327,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
.compatible = "gianfar",
.data = &(struct fsl_pq_mdio_data) {
.mii_offset = offsetof(struct fsl_pq_mdio, mii),
- .get_tbipa = get_gfar_tbipa,
+ .get_tbipa = get_gfar_tbipa_from_mdio,
},
},
{
@@ -445,6 +457,16 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
tbipa = data->get_tbipa(priv->map);
+ /*
+ * Add consistency check to make sure TBI is contained
+ * within the mapped range (not because we would get a
+ * segfault, rather to catch bugs in computing TBI
+ * address). Print error message but continue anyway.
+ */
+ if ((void *)tbipa > priv->map + resource_size(&res) - 4)
+ dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n",
+ ((void *)tbipa - priv->map) + 4);
+
iowrite32be(be32_to_cpup(prop), tbipa);
}
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 2b7610f341b0..3e6b9b437497 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -107,17 +107,17 @@
#include "gianfar.h"
-#define TX_TIMEOUT (1*HZ)
+#define TX_TIMEOUT (5*HZ)
-const char gfar_driver_version[] = "1.3";
+const char gfar_driver_version[] = "2.0";
static int gfar_enet_open(struct net_device *dev);
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void gfar_reset_task(struct work_struct *work);
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
-static struct sk_buff *gfar_new_skb(struct net_device *dev,
- dma_addr_t *bufaddr);
+static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
+ int alloc_cnt);
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -141,8 +141,7 @@ static void gfar_netpoll(struct net_device *dev);
#endif
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
-static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
- int amount_pull, struct napi_struct *napi);
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
static void gfar_halt_nodisable(struct gfar_private *priv);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
@@ -169,17 +168,15 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
bdp->lstatus = cpu_to_be32(lstatus);
}
-static int gfar_init_bds(struct net_device *ndev)
+static void gfar_init_bds(struct net_device *ndev)
{
struct gfar_private *priv = netdev_priv(ndev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
struct txbd8 *txbdp;
- struct rxbd8 *rxbdp;
u32 __iomem *rfbptr;
int i, j;
- dma_addr_t bufaddr;
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
@@ -207,40 +204,26 @@ static int gfar_init_bds(struct net_device *ndev)
rfbptr = &regs->rfbptr0;
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- rx_queue->cur_rx = rx_queue->rx_bd_base;
- rx_queue->skb_currx = 0;
- rxbdp = rx_queue->rx_bd_base;
- for (j = 0; j < rx_queue->rx_ring_size; j++) {
- struct sk_buff *skb = rx_queue->rx_skbuff[j];
+ rx_queue->next_to_clean = 0;
+ rx_queue->next_to_use = 0;
+ rx_queue->next_to_alloc = 0;
- if (skb) {
- bufaddr = be32_to_cpu(rxbdp->bufPtr);
- } else {
- skb = gfar_new_skb(ndev, &bufaddr);
- if (!skb) {
- netdev_err(ndev, "Can't allocate RX buffers\n");
- return -ENOMEM;
- }
- rx_queue->rx_skbuff[j] = skb;
- }
-
- gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
- rxbdp++;
- }
+ /* make sure next_to_clean != next_to_use after this
+ * by leaving at least 1 unused descriptor
+ */
+ gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
rx_queue->rfbptr = rfbptr;
rfbptr += 2;
}
-
- return 0;
}
static int gfar_alloc_skb_resources(struct net_device *ndev)
{
void *vaddr;
dma_addr_t addr;
- int i, j, k;
+ int i, j;
struct gfar_private *priv = netdev_priv(ndev);
struct device *dev = priv->dev;
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -279,7 +262,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
rx_queue = priv->rx_queue[i];
rx_queue->rx_bd_base = vaddr;
rx_queue->rx_bd_dma_base = addr;
- rx_queue->dev = ndev;
+ rx_queue->ndev = ndev;
+ rx_queue->dev = dev;
addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
}
@@ -294,25 +278,20 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
if (!tx_queue->tx_skbuff)
goto cleanup;
- for (k = 0; k < tx_queue->tx_ring_size; k++)
- tx_queue->tx_skbuff[k] = NULL;
+ for (j = 0; j < tx_queue->tx_ring_size; j++)
+ tx_queue->tx_skbuff[j] = NULL;
}
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- rx_queue->rx_skbuff =
- kmalloc_array(rx_queue->rx_ring_size,
- sizeof(*rx_queue->rx_skbuff),
- GFP_KERNEL);
- if (!rx_queue->rx_skbuff)
+ rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
+ sizeof(*rx_queue->rx_buff),
+ GFP_KERNEL);
+ if (!rx_queue->rx_buff)
goto cleanup;
-
- for (j = 0; j < rx_queue->rx_ring_size; j++)
- rx_queue->rx_skbuff[j] = NULL;
}
- if (gfar_init_bds(ndev))
- goto cleanup;
+ gfar_init_bds(ndev);
return 0;
@@ -354,28 +333,16 @@ static void gfar_init_rqprm(struct gfar_private *priv)
}
}
-static void gfar_rx_buff_size_config(struct gfar_private *priv)
+static void gfar_rx_offload_en(struct gfar_private *priv)
{
- int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
-
/* set this when rx hw offload (TOE) functions are being used */
priv->uses_rxfcb = 0;
if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
priv->uses_rxfcb = 1;
- if (priv->hwts_rx_en)
+ if (priv->hwts_rx_en || priv->rx_filer_enable)
priv->uses_rxfcb = 1;
-
- if (priv->uses_rxfcb)
- frame_size += GMAC_FCB_LEN;
-
- frame_size += priv->padding;
-
- frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
- INCREMENTAL_BUFFER_SIZE;
-
- priv->rx_buffer_size = frame_size;
}
static void gfar_mac_rx_config(struct gfar_private *priv)
@@ -384,7 +351,7 @@ static void gfar_mac_rx_config(struct gfar_private *priv)
u32 rctrl = 0;
if (priv->rx_filer_enable) {
- rctrl |= RCTRL_FILREN;
+ rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
/* Program the RIR0 reg with the required distribution */
if (priv->poll_mode == GFAR_SQ_POLLING)
gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
@@ -593,9 +560,8 @@ static int gfar_alloc_rx_queues(struct gfar_private *priv)
if (!priv->rx_queue[i])
return -ENOMEM;
- priv->rx_queue[i]->rx_skbuff = NULL;
priv->rx_queue[i]->qindex = i;
- priv->rx_queue[i]->dev = priv->ndev;
+ priv->rx_queue[i]->ndev = priv->ndev;
}
return 0;
}
@@ -941,6 +907,9 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
if (of_find_property(np, "fsl,magic-packet", NULL))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
+ if (of_get_property(np, "fsl,wake-on-filer", NULL))
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
+
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* In the case of a fixed PHY, the DT node associated
@@ -1187,12 +1156,11 @@ void gfar_mac_reset(struct gfar_private *priv)
udelay(3);
- /* Compute rx_buff_size based on config flags */
- gfar_rx_buff_size_config(priv);
+ gfar_rx_offload_en(priv);
/* Initialize the max receive frame/buffer lengths */
- gfar_write(&regs->maxfrm, priv->rx_buffer_size);
- gfar_write(&regs->mrblr, priv->rx_buffer_size);
+ gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
+ gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
/* Initialize the Minimum Frame Length Register */
gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
@@ -1200,12 +1168,11 @@ void gfar_mac_reset(struct gfar_private *priv)
/* Initialize MACCFG2. */
tempval = MACCFG2_INIT_SETTINGS;
- /* If the mtu is larger than the max size for standard
- * ethernet frames (ie, a jumbo frame), then set maccfg2
- * to allow huge frames, and to check the length
+ /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
+ * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
+ * and by checking RxBD[LG] and discarding larger than MAXFRM.
*/
- if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
- gfar_has_errata(priv, GFAR_ERRATA_74))
+ if (gfar_has_errata(priv, GFAR_ERRATA_74))
tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
gfar_write(&regs->maccfg2, tempval);
@@ -1415,8 +1382,6 @@ static int gfar_probe(struct platform_device *ofdev)
priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
dev->needed_headroom = GMAC_FCB_LEN;
- priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
-
/* Initializing some of the rx/tx queue level parameters */
for (i = 0; i < priv->num_tx_queues; i++) {
priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
@@ -1453,8 +1418,14 @@ static int gfar_probe(struct platform_device *ofdev)
goto register_fail;
}
- device_set_wakeup_capable(&dev->dev, priv->device_flags &
- FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
+ priv->wol_supported |= GFAR_WOL_MAGIC;
+
+ if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
+ priv->rx_filer_enable)
+ priv->wol_supported |= GFAR_WOL_FILER_UCAST;
+
+ device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
/* fill out IRQ number and name fields */
for (i = 0; i < priv->num_grps; i++) {
@@ -1517,15 +1488,122 @@ static int gfar_remove(struct platform_device *ofdev)
#ifdef CONFIG_PM
+static void __gfar_filer_disable(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 temp;
+
+ temp = gfar_read(&regs->rctrl);
+ temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
+ gfar_write(&regs->rctrl, temp);
+}
+
+static void __gfar_filer_enable(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 temp;
+
+ temp = gfar_read(&regs->rctrl);
+ temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
+ gfar_write(&regs->rctrl, temp);
+}
+
+/* Filer rules implementing wol capabilities */
+static void gfar_filer_config_wol(struct gfar_private *priv)
+{
+ unsigned int i;
+ u32 rqfcr;
+
+ __gfar_filer_disable(priv);
+
+ /* clear the filer table, reject any packet by default */
+ rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
+ for (i = 0; i <= MAX_FILER_IDX; i++)
+ gfar_write_filer(priv, i, rqfcr, 0);
+
+ i = 0;
+ if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
+ /* unicast packet, accept it */
+ struct net_device *ndev = priv->ndev;
+ /* get the default rx queue index */
+ u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
+ u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
+ (ndev->dev_addr[1] << 8) |
+ ndev->dev_addr[2];
+
+ rqfcr = (qindex << 10) | RQFCR_AND |
+ RQFCR_CMP_EXACT | RQFCR_PID_DAH;
+
+ gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
+
+ dest_mac_addr = (ndev->dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) |
+ ndev->dev_addr[5];
+ rqfcr = (qindex << 10) | RQFCR_GPI |
+ RQFCR_CMP_EXACT | RQFCR_PID_DAL;
+ gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
+ }
+
+ __gfar_filer_enable(priv);
+}
+
+static void gfar_filer_restore_table(struct gfar_private *priv)
+{
+ u32 rqfcr, rqfpr;
+ unsigned int i;
+
+ __gfar_filer_disable(priv);
+
+ for (i = 0; i <= MAX_FILER_IDX; i++) {
+ rqfcr = priv->ftp_rqfcr[i];
+ rqfpr = priv->ftp_rqfpr[i];
+ gfar_write_filer(priv, i, rqfcr, rqfpr);
+ }
+
+ __gfar_filer_enable(priv);
+}
+
+/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
+static void gfar_start_wol_filer(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+ int i = 0;
+
+ /* Enable Rx hw queues */
+ gfar_write(&regs->rqueue, priv->rqueue);
+
+ /* Initialize DMACTRL to have WWR and WOP */
+ tempval = gfar_read(&regs->dmactrl);
+ tempval |= DMACTRL_INIT_SETTINGS;
+ gfar_write(&regs->dmactrl, tempval);
+
+ /* Make sure we aren't stopped */
+ tempval = gfar_read(&regs->dmactrl);
+ tempval &= ~DMACTRL_GRS;
+ gfar_write(&regs->dmactrl, tempval);
+
+ for (i = 0; i < priv->num_grps; i++) {
+ regs = priv->gfargrp[i].regs;
+ /* Clear RHLT, so that the DMA starts polling now */
+ gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
+ /* enable the Filer General Purpose Interrupt */
+ gfar_write(&regs->imask, IMASK_FGPI);
+ }
+
+ /* Enable Rx DMA */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval |= MACCFG1_RX_EN;
+ gfar_write(&regs->maccfg1, tempval);
+}
+
static int gfar_suspend(struct device *dev)
{
struct gfar_private *priv = dev_get_drvdata(dev);
struct net_device *ndev = priv->ndev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
- int magic_packet = priv->wol_en &&
- (priv->device_flags &
- FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ u16 wol = priv->wol_opts;
if (!netif_running(ndev))
return 0;
@@ -1537,7 +1615,7 @@ static int gfar_suspend(struct device *dev)
gfar_halt(priv);
- if (magic_packet) {
+ if (wol & GFAR_WOL_MAGIC) {
/* Enable interrupt on Magic Packet */
gfar_write(&regs->imask, IMASK_MAG);
@@ -1551,6 +1629,10 @@ static int gfar_suspend(struct device *dev)
tempval |= MACCFG1_RX_EN;
gfar_write(&regs->maccfg1, tempval);
+ } else if (wol & GFAR_WOL_FILER_UCAST) {
+ gfar_filer_config_wol(priv);
+ gfar_start_wol_filer(priv);
+
} else {
phy_stop(priv->phydev);
}
@@ -1564,18 +1646,22 @@ static int gfar_resume(struct device *dev)
struct net_device *ndev = priv->ndev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
- int magic_packet = priv->wol_en &&
- (priv->device_flags &
- FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ u16 wol = priv->wol_opts;
if (!netif_running(ndev))
return 0;
- if (magic_packet) {
+ if (wol & GFAR_WOL_MAGIC) {
/* Disable Magic Packet mode */
tempval = gfar_read(&regs->maccfg2);
tempval &= ~MACCFG2_MPEN;
gfar_write(&regs->maccfg2, tempval);
+
+ } else if (wol & GFAR_WOL_FILER_UCAST) {
+ /* need to stop rx only, tx is already down */
+ gfar_halt(priv);
+ gfar_filer_restore_table(priv);
+
} else {
phy_start(priv->phydev);
}
@@ -1599,10 +1685,7 @@ static int gfar_restore(struct device *dev)
return 0;
}
- if (gfar_init_bds(ndev)) {
- free_skb_resources(priv);
- return -ENOMEM;
- }
+ gfar_init_bds(ndev);
gfar_mac_reset(priv);
@@ -1751,8 +1834,10 @@ static void gfar_configure_serdes(struct net_device *dev)
* everything for us? Resetting it takes the link down and requires
* several seconds for it to come back.
*/
- if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
+ if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
+ put_device(&tbiphy->dev);
return;
+ }
/* Single clk mode, mii mode off(for serdes communication) */
phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
@@ -1764,6 +1849,8 @@ static void gfar_configure_serdes(struct net_device *dev)
phy_write(tbiphy, MII_BMCR,
BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
BMCR_SPEED1000);
+
+ put_device(&tbiphy->dev);
}
static int __gfar_is_rx_idle(struct gfar_private *priv)
@@ -1893,26 +1980,32 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
{
- struct rxbd8 *rxbdp;
- struct gfar_private *priv = netdev_priv(rx_queue->dev);
int i;
- rxbdp = rx_queue->rx_bd_base;
+ struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
+
+ if (rx_queue->skb)
+ dev_kfree_skb(rx_queue->skb);
for (i = 0; i < rx_queue->rx_ring_size; i++) {
- if (rx_queue->rx_skbuff[i]) {
- dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
- priv->rx_buffer_size,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
- rx_queue->rx_skbuff[i] = NULL;
- }
+ struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
+
rxbdp->lstatus = 0;
rxbdp->bufPtr = 0;
rxbdp++;
+
+ if (!rxb->page)
+ continue;
+
+ dma_unmap_single(rx_queue->dev, rxb->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_page(rxb->page);
+
+ rxb->page = NULL;
}
- kfree(rx_queue->rx_skbuff);
- rx_queue->rx_skbuff = NULL;
+
+ kfree(rx_queue->rx_buff);
+ rx_queue->rx_buff = NULL;
}
/* If there are any tx skbs or rx skbs still around, free them.
@@ -1937,7 +2030,7 @@ static void free_skb_resources(struct gfar_private *priv)
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- if (rx_queue->rx_skbuff)
+ if (rx_queue->rx_buff)
free_skb_rx_queue(rx_queue);
}
@@ -2005,8 +2098,7 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
/* Install our interrupt handlers for Error,
* Transmit, and Receive
*/
- err = request_irq(gfar_irq(grp, ER)->irq, gfar_error,
- IRQF_NO_SUSPEND,
+ err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
gfar_irq(grp, ER)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2014,6 +2106,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
goto err_irq_fail;
}
+ enable_irq_wake(gfar_irq(grp, ER)->irq);
+
err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
gfar_irq(grp, TX)->name, grp);
if (err < 0) {
@@ -2028,15 +2122,17 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
gfar_irq(grp, RX)->irq);
goto rx_irq_fail;
}
+ enable_irq_wake(gfar_irq(grp, RX)->irq);
+
} else {
- err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt,
- IRQF_NO_SUSPEND,
+ err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
gfar_irq(grp, TX)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
gfar_irq(grp, TX)->irq);
goto err_irq_fail;
}
+ enable_irq_wake(gfar_irq(grp, TX)->irq);
}
return 0;
@@ -2102,6 +2198,11 @@ int startup_gfar(struct net_device *ndev)
/* Start Rx/Tx DMA and enable the interrupts */
gfar_start(priv);
+ /* force link state update after mac reset */
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
phy_start(priv->phydev);
enable_napi(priv);
@@ -2495,7 +2596,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
struct gfar_private *priv = netdev_priv(dev);
int frame_size = new_mtu + ETH_HLEN;
- if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
+ if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) {
netif_err(priv, drv, dev, "Invalid MTU setting\n");
return -EINVAL;
}
@@ -2549,15 +2650,6 @@ static void gfar_timeout(struct net_device *dev)
schedule_work(&priv->reset_task);
}
-static void gfar_align_skb(struct sk_buff *skb)
-{
- /* We need the data buffer to be aligned properly. We will reserve
- * as many bytes as needed to align the data properly
- */
- skb_reserve(skb, RXBUF_ALIGNMENT -
- (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
-}
-
/* Interrupt Handler for Transmit complete */
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
{
@@ -2615,7 +2707,8 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
struct skb_shared_hwtstamps shhwtstamps;
- u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+ u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
+ ~0x7UL);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(*ns);
@@ -2664,49 +2757,85 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
netdev_tx_completed_queue(txq, howmany, bytes_sent);
}
-static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
+static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct sk_buff *skb;
+ struct page *page;
+ dma_addr_t addr;
- skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
- if (!skb)
- return NULL;
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return false;
- gfar_align_skb(skb);
+ addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rxq->dev, addr))) {
+ __free_page(page);
- return skb;
+ return false;
+ }
+
+ rxb->dma = addr;
+ rxb->page = page;
+ rxb->page_offset = 0;
+
+ return true;
}
-static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
+static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct sk_buff *skb;
- dma_addr_t addr;
+ struct gfar_private *priv = netdev_priv(rx_queue->ndev);
+ struct gfar_extra_stats *estats = &priv->extra_stats;
- skb = gfar_alloc_skb(dev);
- if (!skb)
- return NULL;
+ netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
+ atomic64_inc(&estats->rx_alloc_err);
+}
- addr = dma_map_single(priv->dev, skb->data,
- priv->rx_buffer_size, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(priv->dev, addr))) {
- dev_kfree_skb_any(skb);
- return NULL;
+static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
+ int alloc_cnt)
+{
+ struct rxbd8 *bdp;
+ struct gfar_rx_buff *rxb;
+ int i;
+
+ i = rx_queue->next_to_use;
+ bdp = &rx_queue->rx_bd_base[i];
+ rxb = &rx_queue->rx_buff[i];
+
+ while (alloc_cnt--) {
+ /* try reuse page */
+ if (unlikely(!rxb->page)) {
+ if (unlikely(!gfar_new_page(rx_queue, rxb))) {
+ gfar_rx_alloc_err(rx_queue);
+ break;
+ }
+ }
+
+ /* Setup the new RxBD */
+ gfar_init_rxbdp(rx_queue, bdp,
+ rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
+
+ /* Update to the next pointer */
+ bdp++;
+ rxb++;
+
+ if (unlikely(++i == rx_queue->rx_ring_size)) {
+ i = 0;
+ bdp = rx_queue->rx_bd_base;
+ rxb = rx_queue->rx_buff;
+ }
}
- *bufaddr = addr;
- return skb;
+ rx_queue->next_to_use = i;
+ rx_queue->next_to_alloc = i;
}
-static inline void count_errors(unsigned short status, struct net_device *dev)
+static void count_errors(u32 lstatus, struct net_device *ndev)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
struct gfar_extra_stats *estats = &priv->extra_stats;
/* If the packet was truncated, none of the other errors matter */
- if (status & RXBD_TRUNCATED) {
+ if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
stats->rx_length_errors++;
atomic64_inc(&estats->rx_trunc);
@@ -2714,25 +2843,25 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
return;
}
/* Count the errors, if there were any */
- if (status & (RXBD_LARGE | RXBD_SHORT)) {
+ if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
stats->rx_length_errors++;
- if (status & RXBD_LARGE)
+ if (lstatus & BD_LFLAG(RXBD_LARGE))
atomic64_inc(&estats->rx_large);
else
atomic64_inc(&estats->rx_short);
}
- if (status & RXBD_NONOCTET) {
+ if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
stats->rx_frame_errors++;
atomic64_inc(&estats->rx_nonoctet);
}
- if (status & RXBD_CRCERR) {
+ if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
atomic64_inc(&estats->rx_crcerr);
stats->rx_crc_errors++;
}
- if (status & RXBD_OVERRUN) {
+ if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
atomic64_inc(&estats->rx_overrun);
- stats->rx_crc_errors++;
+ stats->rx_over_errors++;
}
}
@@ -2740,7 +2869,14 @@ irqreturn_t gfar_receive(int irq, void *grp_id)
{
struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
unsigned long flags;
- u32 imask;
+ u32 imask, ievent;
+
+ ievent = gfar_read(&grp->regs->ievent);
+
+ if (unlikely(ievent & IEVENT_FGPI)) {
+ gfar_write(&grp->regs->ievent, IEVENT_FGPI);
+ return IRQ_HANDLED;
+ }
if (likely(napi_schedule_prep(&grp->napi_rx))) {
spin_lock_irqsave(&grp->grplock, flags);
@@ -2783,6 +2919,93 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
return IRQ_HANDLED;
}
+static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
+ struct sk_buff *skb, bool first)
+{
+ unsigned int size = lstatus & BD_LENGTH_MASK;
+ struct page *page = rxb->page;
+
+ /* Remove the FCS from the packet length */
+ if (likely(lstatus & BD_LFLAG(RXBD_LAST)))
+ size -= ETH_FCS_LEN;
+
+ if (likely(first))
+ skb_put(skb, size);
+ else
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rxb->page_offset + RXBUF_ALIGNMENT,
+ size, GFAR_RXB_TRUESIZE);
+
+ /* try reuse page */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* change offset to the other half */
+ rxb->page_offset ^= GFAR_RXB_TRUESIZE;
+
+ atomic_inc(&page->_count);
+
+ return true;
+}
+
+static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
+ struct gfar_rx_buff *old_rxb)
+{
+ struct gfar_rx_buff *new_rxb;
+ u16 nta = rxq->next_to_alloc;
+
+ new_rxb = &rxq->rx_buff[nta];
+
+ /* find next buf that can reuse a page */
+ nta++;
+ rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
+
+ /* copy page reference */
+ *new_rxb = *old_rxb;
+
+ /* sync for use by the device */
+ dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
+ old_rxb->page_offset,
+ GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+}
+
+static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
+ u32 lstatus, struct sk_buff *skb)
+{
+ struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
+ struct page *page = rxb->page;
+ bool first = false;
+
+ if (likely(!skb)) {
+ void *buff_addr = page_address(page) + rxb->page_offset;
+
+ skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
+ if (unlikely(!skb)) {
+ gfar_rx_alloc_err(rx_queue);
+ return NULL;
+ }
+ skb_reserve(skb, RXBUF_ALIGNMENT);
+ first = true;
+ }
+
+ dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
+ GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+
+ if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
+ /* reuse the free half of the page */
+ gfar_reuse_rx_page(rx_queue, rxb);
+ } else {
+ /* page cannot be reused, unmap it */
+ dma_unmap_page(rx_queue->dev, rxb->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ /* clear rxb content */
+ rxb->page = NULL;
+
+ return skb;
+}
+
static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
{
/* If valid headers were found, and valid sums
@@ -2797,10 +3020,9 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
}
/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
-static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
- int amount_pull, struct napi_struct *napi)
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
{
- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_private *priv = netdev_priv(ndev);
struct rxfcb *fcb = NULL;
/* fcb is at the beginning if exists */
@@ -2809,10 +3031,8 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* Remove the FCB from the skb
* Remove the padded bytes, if there are any
*/
- if (amount_pull) {
- skb_record_rx_queue(skb, fcb->rq);
- skb_pull(skb, amount_pull);
- }
+ if (priv->uses_rxfcb)
+ skb_pull(skb, GMAC_FCB_LEN);
/* Get receive timestamp from the skb */
if (priv->hwts_rx_en) {
@@ -2826,24 +3046,20 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
if (priv->padding)
skb_pull(skb, priv->padding);
- if (dev->features & NETIF_F_RXCSUM)
+ if (ndev->features & NETIF_F_RXCSUM)
gfar_rx_checksum(skb, fcb);
/* Tell the skb what kind of packet this is */
- skb->protocol = eth_type_trans(skb, dev);
+ skb->protocol = eth_type_trans(skb, ndev);
/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
* Even if vlan rx accel is disabled, on some chips
* RXFCB_VLN is pseudo randomly set.
*/
- if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+ if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
be16_to_cpu(fcb->flags) & RXFCB_VLN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
be16_to_cpu(fcb->vlctl));
-
- /* Send the packet up the stack */
- napi_gro_receive(napi, skb);
-
}
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
@@ -2852,91 +3068,89 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
*/
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
{
- struct net_device *dev = rx_queue->dev;
- struct rxbd8 *bdp, *base;
- struct sk_buff *skb;
- int pkt_len;
- int amount_pull;
- int howmany = 0;
- struct gfar_private *priv = netdev_priv(dev);
+ struct net_device *ndev = rx_queue->ndev;
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct rxbd8 *bdp;
+ int i, howmany = 0;
+ struct sk_buff *skb = rx_queue->skb;
+ int cleaned_cnt = gfar_rxbd_unused(rx_queue);
+ unsigned int total_bytes = 0, total_pkts = 0;
/* Get the first full descriptor */
- bdp = rx_queue->cur_rx;
- base = rx_queue->rx_bd_base;
+ i = rx_queue->next_to_clean;
+
+ while (rx_work_limit--) {
+ u32 lstatus;
- amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
+ if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
+ gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+ cleaned_cnt = 0;
+ }
- while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
- struct sk_buff *newskb;
- dma_addr_t bufaddr;
+ bdp = &rx_queue->rx_bd_base[i];
+ lstatus = be32_to_cpu(bdp->lstatus);
+ if (lstatus & BD_LFLAG(RXBD_EMPTY))
+ break;
+ /* order rx buffer descriptor reads */
rmb();
- /* Add another skb for the future */
- newskb = gfar_new_skb(dev, &bufaddr);
+ /* fetch next to clean buffer from the ring */
+ skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
+ if (unlikely(!skb))
+ break;
- skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
+ cleaned_cnt++;
+ howmany++;
- dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
- priv->rx_buffer_size, DMA_FROM_DEVICE);
-
- if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
- be16_to_cpu(bdp->length) > priv->rx_buffer_size))
- bdp->status = cpu_to_be16(RXBD_LARGE);
-
- /* We drop the frame if we failed to allocate a new buffer */
- if (unlikely(!newskb ||
- !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
- be16_to_cpu(bdp->status) & RXBD_ERR)) {
- count_errors(be16_to_cpu(bdp->status), dev);
-
- if (unlikely(!newskb)) {
- newskb = skb;
- bufaddr = be32_to_cpu(bdp->bufPtr);
- } else if (skb)
- dev_kfree_skb(skb);
- } else {
- /* Increment the number of packets */
- rx_queue->stats.rx_packets++;
- howmany++;
-
- if (likely(skb)) {
- pkt_len = be16_to_cpu(bdp->length) -
- ETH_FCS_LEN;
- /* Remove the FCS from the packet length */
- skb_put(skb, pkt_len);
- rx_queue->stats.rx_bytes += pkt_len;
- skb_record_rx_queue(skb, rx_queue->qindex);
- gfar_process_frame(dev, skb, amount_pull,
- &rx_queue->grp->napi_rx);
+ if (unlikely(++i == rx_queue->rx_ring_size))
+ i = 0;
- } else {
- netif_warn(priv, rx_err, dev, "Missing skb!\n");
- rx_queue->stats.rx_dropped++;
- atomic64_inc(&priv->extra_stats.rx_skbmissing);
- }
+ rx_queue->next_to_clean = i;
+
+ /* fetch next buffer if not the last in frame */
+ if (!(lstatus & BD_LFLAG(RXBD_LAST)))
+ continue;
+
+ if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
+ count_errors(lstatus, ndev);
+ /* discard faulty buffer */
+ dev_kfree_skb(skb);
+ skb = NULL;
+ rx_queue->stats.rx_dropped++;
+ continue;
}
- rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
+ /* Increment the number of packets */
+ total_pkts++;
+ total_bytes += skb->len;
- /* Setup the new bdp */
- gfar_init_rxbdp(rx_queue, bdp, bufaddr);
+ skb_record_rx_queue(skb, rx_queue->qindex);
- /* Update Last Free RxBD pointer for LFC */
- if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
- gfar_write(rx_queue->rfbptr, (u32)bdp);
+ gfar_process_frame(ndev, skb);
- /* Update to the next pointer */
- bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
+ /* Send the packet up the stack */
+ napi_gro_receive(&rx_queue->grp->napi_rx, skb);
- /* update to point at the next skb */
- rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
- RX_RING_MOD_MASK(rx_queue->rx_ring_size);
+ skb = NULL;
}
- /* Update the current rxbd pointer to be the next one */
- rx_queue->cur_rx = bdp;
+ /* Store incomplete frames for completion */
+ rx_queue->skb = skb;
+
+ rx_queue->stats.rx_packets += total_pkts;
+ rx_queue->stats.rx_bytes += total_bytes;
+
+ if (cleaned_cnt)
+ gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+
+ /* Update Last Free RxBD pointer for LFC */
+ if (unlikely(priv->tx_actual_en)) {
+ u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+
+ gfar_write(rx_queue->rfbptr, bdp_dma);
+ }
return howmany;
}
@@ -3381,11 +3595,9 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
netif_dbg(priv, tx_err, dev, "Transmit Error\n");
}
if (events & IEVENT_BSY) {
- dev->stats.rx_errors++;
+ dev->stats.rx_over_errors++;
atomic64_inc(&priv->extra_stats.rx_bsy);
- gfar_receive(irq, grp_id);
-
netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
gfar_read(&regs->rstat));
}
@@ -3454,7 +3666,6 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
struct phy_device *phydev = priv->phydev;
struct gfar_priv_rx_q *rx_queue = NULL;
int i;
- struct rxbd8 *bdp;
if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
return;
@@ -3511,15 +3722,11 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
/* Turn last free buffer recording on */
if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
for (i = 0; i < priv->num_rx_queues; i++) {
+ u32 bdp_dma;
+
rx_queue = priv->rx_queue[i];
- bdp = rx_queue->cur_rx;
- /* skip to previous bd */
- bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
- rx_queue->rx_bd_base,
- rx_queue->rx_ring_size);
-
- if (rx_queue->rfbptr)
- gfar_write(rx_queue->rfbptr, (u32)bdp);
+ bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+ gfar_write(rx_queue->rfbptr, bdp_dma);
}
priv->tx_actual_en = 1;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 5545e4103368..f266b20f9ef5 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -71,11 +71,6 @@ struct ethtool_rx_list {
/* Number of bytes to align the rx bufs to */
#define RXBUF_ALIGNMENT 64
-/* The number of bytes which composes a unit for the purpose of
- * allocating data buffers. ie-for any given MTU, the data buffer
- * will be the next highest multiple of 512 bytes. */
-#define INCREMENTAL_BUFFER_SIZE 512
-
#define PHY_INIT_TIMEOUT 100000
#define DRV_NAME "gfar-enet"
@@ -92,6 +87,8 @@ extern const char gfar_driver_version[];
#define DEFAULT_TX_RING_SIZE 256
#define DEFAULT_RX_RING_SIZE 256
+#define GFAR_RX_BUFF_ALLOC 16
+
#define GFAR_RX_MAX_RING_SIZE 256
#define GFAR_TX_MAX_RING_SIZE 256
@@ -103,11 +100,14 @@ extern const char gfar_driver_version[];
#define DEFAULT_RX_LFC_THR 16
#define DEFAULT_LFC_PTVVAL 4
-#define DEFAULT_RX_BUFFER_SIZE 1536
+#define GFAR_RXB_SIZE 1536
+#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
+ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define GFAR_RXB_TRUESIZE 2048
+
#define TX_RING_MOD_MASK(size) (size-1)
#define RX_RING_MOD_MASK(size) (size-1)
-#define JUMBO_BUFFER_SIZE 9728
-#define JUMBO_FRAME_SIZE 9600
+#define GFAR_JUMBO_FRAME_SIZE 9600
#define DEFAULT_FIFO_TX_THR 0x100
#define DEFAULT_FIFO_TX_STARVE 0x40
@@ -340,6 +340,7 @@ extern const char gfar_driver_version[];
#define IEVENT_MAG 0x00000800
#define IEVENT_GRSC 0x00000100
#define IEVENT_RXF0 0x00000080
+#define IEVENT_FGPI 0x00000010
#define IEVENT_FIR 0x00000008
#define IEVENT_FIQ 0x00000004
#define IEVENT_DPE 0x00000002
@@ -372,6 +373,7 @@ extern const char gfar_driver_version[];
#define IMASK_MAG 0x00000800
#define IMASK_GRSC 0x00000100
#define IMASK_RXFEN0 0x00000080
+#define IMASK_FGPI 0x00000010
#define IMASK_FIR 0x00000008
#define IMASK_FIQ 0x00000004
#define IMASK_DPE 0x00000002
@@ -540,6 +542,9 @@ extern const char gfar_driver_version[];
#define GFAR_INT_NAME_MAX (IFNAMSIZ + 6) /* '_g#_xx' */
+#define GFAR_WOL_MAGIC 0x00000001
+#define GFAR_WOL_FILER_UCAST 0x00000002
+
struct txbd8
{
union {
@@ -640,6 +645,7 @@ struct rmon_mib
};
struct gfar_extra_stats {
+ atomic64_t rx_alloc_err;
atomic64_t rx_large;
atomic64_t rx_short;
atomic64_t rx_nonoctet;
@@ -651,7 +657,6 @@ struct gfar_extra_stats {
atomic64_t eberr;
atomic64_t tx_babt;
atomic64_t tx_underrun;
- atomic64_t rx_skbmissing;
atomic64_t tx_timeout;
};
@@ -917,6 +922,7 @@ struct gfar {
#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
+#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000
#if (MAXGROUPS == 2)
#define DEFAULT_MAPPING 0xAA
@@ -1012,34 +1018,42 @@ struct rx_q_stats {
unsigned long rx_dropped;
};
+struct gfar_rx_buff {
+ dma_addr_t dma;
+ struct page *page;
+ unsigned int page_offset;
+};
+
/**
* struct gfar_priv_rx_q - per rx queue structure
- * @rx_skbuff: skb pointers
- * @skb_currx: currently use skb pointer
+ * @rx_buff: Array of buffer info metadata structs
* @rx_bd_base: First rx buffer descriptor
- * @cur_rx: Next free rx ring entry
+ * @next_to_use: index of the next buffer to be alloc'd
+ * @next_to_clean: index of the next buffer to be cleaned
* @qindex: index of this queue
- * @dev: back pointer to the dev structure
+ * @ndev: back pointer to net_device
* @rx_ring_size: Rx ring size
* @rxcoalescing: enable/disable rx-coalescing
* @rxic: receive interrupt coalescing vlaue
*/
struct gfar_priv_rx_q {
- struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
- dma_addr_t rx_bd_dma_base;
+ struct gfar_rx_buff *rx_buff __aligned(SMP_CACHE_BYTES);
struct rxbd8 *rx_bd_base;
- struct rxbd8 *cur_rx;
- struct net_device *dev;
- struct gfar_priv_grp *grp;
+ struct net_device *ndev;
+ struct device *dev;
+ u16 rx_ring_size;
+ u16 qindex;
+ struct gfar_priv_grp *grp;
+ u16 next_to_clean;
+ u16 next_to_use;
+ u16 next_to_alloc;
+ struct sk_buff *skb;
struct rx_q_stats stats;
- u16 skb_currx;
- u16 qindex;
- unsigned int rx_ring_size;
- /* RX Coalescing values */
+ u32 __iomem *rfbptr;
unsigned char rxcoalescing;
unsigned long rxic;
- u32 __iomem *rfbptr;
+ dma_addr_t rx_bd_dma_base;
};
enum gfar_irqinfo_id {
@@ -1109,7 +1123,6 @@ struct gfar_private {
struct device *dev;
struct net_device *ndev;
enum gfar_errata errata;
- unsigned int rx_buffer_size;
u16 uses_rxfcb;
u16 padding;
@@ -1154,8 +1167,6 @@ struct gfar_private {
extended_hash:1,
bd_stash_en:1,
rx_filer_enable:1,
- /* Wake-on-LAN enabled */
- wol_en:1,
/* Enable priorty based Tx scheduling in Hw */
prio_sched_en:1,
/* Flow control flags */
@@ -1184,6 +1195,10 @@ struct gfar_private {
u32 __iomem *hash_regs[16];
int hash_width;
+ /* wake-on-lan settings */
+ u16 wol_opts;
+ u16 wol_supported;
+
/*Filer table*/
unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
@@ -1292,6 +1307,28 @@ static inline void gfar_clear_txbd_status(struct txbd8 *bdp)
bdp->lstatus = cpu_to_be32(lstatus);
}
+static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq)
+{
+ if (rxq->next_to_clean > rxq->next_to_use)
+ return rxq->next_to_clean - rxq->next_to_use - 1;
+
+ return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1;
+}
+
+static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq)
+{
+ struct rxbd8 *bdp;
+ u32 bdp_dma;
+ int i;
+
+ i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1;
+ bdp = &rxq->rx_bd_base[i];
+ bdp_dma = lower_32_bits(rxq->rx_bd_dma_base);
+ bdp_dma += (uintptr_t)bdp - (uintptr_t)rxq->rx_bd_base;
+
+ return bdp_dma;
+}
+
irqreturn_t gfar_receive(int irq, void *dev_id);
int startup_gfar(struct net_device *dev);
void stop_gfar(struct net_device *dev);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 3c0a8f825b63..4b0ee855edd7 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -61,6 +61,8 @@ static void gfar_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo);
static const char stat_gstrings[][ETH_GSTRING_LEN] = {
+ /* extra stats */
+ "rx-allocation-errors",
"rx-large-frame-errors",
"rx-short-frame-errors",
"rx-non-octet-errors",
@@ -72,8 +74,8 @@ static const char stat_gstrings[][ETH_GSTRING_LEN] = {
"ethernet-bus-error",
"tx-babbling-errors",
"tx-underrun-errors",
- "rx-skb-missing-errors",
"tx-timeout-errors",
+ /* rmon stats */
"tx-rx-64-frames",
"tx-rx-65-127-frames",
"tx-rx-128-255-frames",
@@ -180,8 +182,6 @@ static void gfar_gdrvinfo(struct net_device *dev,
sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
- drvinfo->regdump_len = 0;
- drvinfo->eedump_len = 0;
}
@@ -642,28 +642,49 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct gfar_private *priv = netdev_priv(dev);
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
- wol->supported = WAKE_MAGIC;
- wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
- } else {
- wol->supported = wol->wolopts = 0;
- }
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (priv->wol_supported & GFAR_WOL_MAGIC)
+ wol->supported |= WAKE_MAGIC;
+
+ if (priv->wol_supported & GFAR_WOL_FILER_UCAST)
+ wol->supported |= WAKE_UCAST;
+
+ if (priv->wol_opts & GFAR_WOL_MAGIC)
+ wol->wolopts |= WAKE_MAGIC;
+
+ if (priv->wol_opts & GFAR_WOL_FILER_UCAST)
+ wol->wolopts |= WAKE_UCAST;
}
static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct gfar_private *priv = netdev_priv(dev);
+ u16 wol_opts = 0;
+ int err;
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
- wol->wolopts != 0)
+ if (!priv->wol_supported && wol->wolopts)
return -EINVAL;
- if (wol->wolopts & ~WAKE_MAGIC)
+ if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST))
return -EINVAL;
- device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
+ if (wol->wolopts & WAKE_MAGIC) {
+ wol_opts |= GFAR_WOL_MAGIC;
+ } else {
+ if (wol->wolopts & WAKE_UCAST)
+ wol_opts |= GFAR_WOL_FILER_UCAST;
+ }
- priv->wol_en = !!device_may_wakeup(&dev->dev);
+ wol_opts &= priv->wol_supported;
+ priv->wol_opts = 0;
+
+ err = device_set_wakeup_enable(priv->dev, wol_opts);
+ if (err)
+ return err;
+
+ priv->wol_opts = wol_opts;
return 0;
}
@@ -674,14 +695,14 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
u32 fcr = 0x0, fpr = FPR_FILER_MASK;
if (ethflow & RXH_L2DA) {
- fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
+ fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH |
RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
- fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
+ fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH |
RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
@@ -900,27 +921,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
return 0;
}
-static int gfar_comp_asc(const void *a, const void *b)
-{
- return memcmp(a, b, 4);
-}
-
-static int gfar_comp_desc(const void *a, const void *b)
-{
- return -memcmp(a, b, 4);
-}
-
-static void gfar_swap(void *a, void *b, int size)
-{
- u32 *_a = a;
- u32 *_b = b;
-
- swap(_a[0], _b[0]);
- swap(_a[1], _b[1]);
- swap(_a[2], _b[2]);
- swap(_a[3], _b[3]);
-}
-
/* Write a mask to filer cache */
static void gfar_set_mask(u32 mask, struct filer_table *tab)
{
@@ -1270,310 +1270,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
return 0;
}
-/* Copy size filer entries */
-static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
- struct gfar_filer_entry src[0], s32 size)
-{
- while (size > 0) {
- size--;
- dst[size].ctrl = src[size].ctrl;
- dst[size].prop = src[size].prop;
- }
-}
-
-/* Delete the contents of the filer-table between start and end
- * and collapse them
- */
-static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
-{
- int length;
-
- if (end > MAX_FILER_CACHE_IDX || end < begin)
- return -EINVAL;
-
- end++;
- length = end - begin;
-
- /* Copy */
- while (end < tab->index) {
- tab->fe[begin].ctrl = tab->fe[end].ctrl;
- tab->fe[begin++].prop = tab->fe[end++].prop;
-
- }
- /* Fill up with don't cares */
- while (begin < tab->index) {
- tab->fe[begin].ctrl = 0x60;
- tab->fe[begin].prop = 0xFFFFFFFF;
- begin++;
- }
-
- tab->index -= length;
- return 0;
-}
-
-/* Make space on the wanted location */
-static int gfar_expand_filer_entries(u32 begin, u32 length,
- struct filer_table *tab)
-{
- if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
- begin > MAX_FILER_CACHE_IDX)
- return -EINVAL;
-
- gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
- tab->index - length + 1);
-
- tab->index += length;
- return 0;
-}
-
-static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
-{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
- start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
- (RQFCR_AND | RQFCR_CLE))
- return start;
- }
- return -1;
-}
-
-static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
-{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
- start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
- (RQFCR_CLE))
- return start;
- }
- return -1;
-}
-
-/* Uses hardwares clustering option to reduce
- * the number of filer table entries
- */
-static void gfar_cluster_filer(struct filer_table *tab)
-{
- s32 i = -1, j, iend, jend;
-
- while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
- j = i;
- while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
- /* The cluster entries self and the previous one
- * (a mask) must be identical!
- */
- if (tab->fe[i].ctrl != tab->fe[j].ctrl)
- break;
- if (tab->fe[i].prop != tab->fe[j].prop)
- break;
- if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
- break;
- if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
- break;
- iend = gfar_get_next_cluster_end(i, tab);
- jend = gfar_get_next_cluster_end(j, tab);
- if (jend == -1 || iend == -1)
- break;
-
- /* First we make some free space, where our cluster
- * element should be. Then we copy it there and finally
- * delete in from its old location.
- */
- if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
- -EINVAL)
- break;
-
- gfar_copy_filer_entries(&(tab->fe[iend + 1]),
- &(tab->fe[jend + 1]), jend - j);
-
- if (gfar_trim_filer_entries(jend - 1,
- jend + (jend - j),
- tab) == -EINVAL)
- return;
-
- /* Mask out cluster bit */
- tab->fe[iend].ctrl &= ~(RQFCR_CLE);
- }
- }
-}
-
-/* Swaps the masked bits of a1<>a2 and b1<>b2 */
-static void gfar_swap_bits(struct gfar_filer_entry *a1,
- struct gfar_filer_entry *a2,
- struct gfar_filer_entry *b1,
- struct gfar_filer_entry *b2, u32 mask)
-{
- u32 temp[4];
- temp[0] = a1->ctrl & mask;
- temp[1] = a2->ctrl & mask;
- temp[2] = b1->ctrl & mask;
- temp[3] = b2->ctrl & mask;
-
- a1->ctrl &= ~mask;
- a2->ctrl &= ~mask;
- b1->ctrl &= ~mask;
- b2->ctrl &= ~mask;
-
- a1->ctrl |= temp[1];
- a2->ctrl |= temp[0];
- b1->ctrl |= temp[3];
- b2->ctrl |= temp[2];
-}
-
-/* Generate a list consisting of masks values with their start and
- * end of validity and block as indicator for parts belonging
- * together (glued by ANDs) in mask_table
- */
-static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *tab)
-{
- u32 i, and_index = 0, block_index = 1;
-
- for (i = 0; i < tab->index; i++) {
-
- /* LSByte of control = 0 sets a mask */
- if (!(tab->fe[i].ctrl & 0xF)) {
- mask_table[and_index].mask = tab->fe[i].prop;
- mask_table[and_index].start = i;
- mask_table[and_index].block = block_index;
- if (and_index >= 1)
- mask_table[and_index - 1].end = i - 1;
- and_index++;
- }
- /* cluster starts and ends will be separated because they should
- * hold their position
- */
- if (tab->fe[i].ctrl & RQFCR_CLE)
- block_index++;
- /* A not set AND indicates the end of a depended block */
- if (!(tab->fe[i].ctrl & RQFCR_AND))
- block_index++;
- }
-
- mask_table[and_index - 1].end = i - 1;
-
- return and_index;
-}
-
-/* Sorts the entries of mask_table by the values of the masks.
- * Important: The 0xFF80 flags of the first and last entry of a
- * block must hold their position (which queue, CLusterEnable, ReJEct,
- * AND)
- */
-static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *temp_table, u32 and_index)
-{
- /* Pointer to compare function (_asc or _desc) */
- int (*gfar_comp)(const void *, const void *);
-
- u32 i, size = 0, start = 0, prev = 1;
- u32 old_first, old_last, new_first, new_last;
-
- gfar_comp = &gfar_comp_desc;
-
- for (i = 0; i < and_index; i++) {
- if (prev != mask_table[i].block) {
- old_first = mask_table[start].start + 1;
- old_last = mask_table[i - 1].end;
- sort(mask_table + start, size,
- sizeof(struct gfar_mask_entry),
- gfar_comp, &gfar_swap);
-
- /* Toggle order for every block. This makes the
- * thing more efficient!
- */
- if (gfar_comp == gfar_comp_desc)
- gfar_comp = &gfar_comp_asc;
- else
- gfar_comp = &gfar_comp_desc;
-
- new_first = mask_table[start].start + 1;
- new_last = mask_table[i - 1].end;
-
- gfar_swap_bits(&temp_table->fe[new_first],
- &temp_table->fe[old_first],
- &temp_table->fe[new_last],
- &temp_table->fe[old_last],
- RQFCR_QUEUE | RQFCR_CLE |
- RQFCR_RJE | RQFCR_AND);
-
- start = i;
- size = 0;
- }
- size++;
- prev = mask_table[i].block;
- }
-}
-
-/* Reduces the number of masks needed in the filer table to save entries
- * This is done by sorting the masks of a depended block. A depended block is
- * identified by gluing ANDs or CLE. The sorting order toggles after every
- * block. Of course entries in scope of a mask must change their location with
- * it.
- */
-static int gfar_optimize_filer_masks(struct filer_table *tab)
-{
- struct filer_table *temp_table;
- struct gfar_mask_entry *mask_table;
-
- u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
- s32 ret = 0;
-
- /* We need a copy of the filer table because
- * we want to change its order
- */
- temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
- if (temp_table == NULL)
- return -ENOMEM;
-
- mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
- sizeof(struct gfar_mask_entry), GFP_KERNEL);
-
- if (mask_table == NULL) {
- ret = -ENOMEM;
- goto end;
- }
-
- and_index = gfar_generate_mask_table(mask_table, tab);
-
- gfar_sort_mask_table(mask_table, temp_table, and_index);
-
- /* Now we can copy the data from our duplicated filer table to
- * the real one in the order the mask table says
- */
- for (i = 0; i < and_index; i++) {
- size = mask_table[i].end - mask_table[i].start + 1;
- gfar_copy_filer_entries(&(tab->fe[j]),
- &(temp_table->fe[mask_table[i].start]), size);
- j += size;
- }
-
- /* And finally we just have to check for duplicated masks and drop the
- * second ones
- */
- for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
- if (tab->fe[i].ctrl == 0x80) {
- previous_mask = i++;
- break;
- }
- }
- for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
- if (tab->fe[i].ctrl == 0x80) {
- if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
- /* Two identical ones found!
- * So drop the second one!
- */
- gfar_trim_filer_entries(i, i, tab);
- } else
- /* Not identical! */
- previous_mask = i;
- }
- }
-
- kfree(mask_table);
-end: kfree(temp_table);
- return ret;
-}
-
/* Write the bit-pattern from software's buffer to hardware registers */
static int gfar_write_filer_table(struct gfar_private *priv,
struct filer_table *tab)
@@ -1583,11 +1279,10 @@ static int gfar_write_filer_table(struct gfar_private *priv,
return -EBUSY;
/* Fill regular entries */
- for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
- i++)
+ for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
/* Fill the rest with fall-troughs */
- for (; i < MAX_FILER_IDX - 1; i++)
+ for (; i < MAX_FILER_IDX; i++)
gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
/* Last entry must be default accept
* because that's what people expect
@@ -1621,7 +1316,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
{
struct ethtool_flow_spec_container *j;
struct filer_table *tab;
- s32 i = 0;
s32 ret = 0;
/* So index is set to zero, too! */
@@ -1646,17 +1340,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
}
}
- i = tab->index;
-
- /* Optimizations to save entries */
- gfar_cluster_filer(tab);
- gfar_optimize_filer_masks(tab);
-
- pr_debug("\tSummary:\n"
- "\tData on hardware: %d\n"
- "\tCompression rate: %d%%\n",
- tab->index, 100 - (100 * tab->index) / i);
-
/* Write everything to hardware */
ret = gfar_write_filer_table(priv, tab);
if (ret == -EBUSY) {
@@ -1722,13 +1405,14 @@ static int gfar_add_cls(struct gfar_private *priv,
}
process:
+ priv->rx_list.count++;
ret = gfar_process_filer_changes(priv);
if (ret)
goto clean_list;
- priv->rx_list.count++;
return ret;
clean_list:
+ priv->rx_list.count--;
list_del(&temp->list);
clean_mem:
kfree(temp);
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 8e3cd77aa347..664d0c261269 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -557,6 +557,7 @@ static const struct of_device_id match_table[] = {
{ .compatible = "fsl,etsec-ptp" },
{},
};
+MODULE_DEVICE_TABLE(of, match_table);
static struct platform_driver gianfar_ptp_driver = {
.driver = {
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 4dd40e057f40..650f7888e32b 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1384,6 +1384,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
value = phy_read(tbiphy, ENET_TBI_MII_CR);
value &= ~0x1000; /* Turn off autonegotiation */
phy_write(tbiphy, ENET_TBI_MII_CR, value);
+
+ put_device(&tbiphy->dev);
}
init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
@@ -1702,8 +1704,10 @@ static void uec_configure_serdes(struct net_device *dev)
* everything for us? Resetting it takes the link down and requires
* several seconds for it to come back.
*/
- if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
+ if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) {
+ put_device(&tbiphy->dev);
return;
+ }
/* Single clk mode, mii mode off(for serdes communication) */
phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
@@ -1711,6 +1715,8 @@ static void uec_configure_serdes(struct net_device *dev)
phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
+
+ put_device(&tbiphy->dev);
}
/* Configure the PHY for dev.
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index cc83350d56ba..89714f5e0dfc 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -351,8 +351,6 @@ uec_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
- drvinfo->eedump_len = 0;
- drvinfo->regdump_len = uec_get_regs_len(netdev);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index dead17b5d769..f250dec488fd 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_HISILICON
bool "Hisilicon devices"
default y
- depends on ARM
+ depends on OF && (ARM || ARM64 || COMPILE_TEST)
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -24,11 +24,42 @@ config HIX5HD2_GMAC
config HIP04_ETH
tristate "HISILICON P04 Ethernet support"
- select PHYLIB
select MARVELL_PHY
select MFD_SYSCON
+ select HNS_MDIO
---help---
If you wish to compile a kernel for a hardware with hisilicon p04 SoC and
want to use the internal ethernet then you should answer Y to this.
+config HNS_MDIO
+ tristate
+ select PHYLIB
+ ---help---
+ This selects the HNS MDIO support. It is needed by HNS_DSAF to access
+ the PHY
+
+config HNS
+ tristate "Hisilicon Network Subsystem Support (Framework)"
+ ---help---
+ This selects the framework support for Hisilicon Network Subsystem. It
+ is needed by any driver which provides HNS acceleration engine or make
+ use of the engine
+
+config HNS_DSAF
+ tristate "Hisilicon HNS DSAF device Support"
+ select HNS
+ select HNS_MDIO
+ ---help---
+ This selects the DSAF (Distributed System Area Frabric) network
+ acceleration engine support. The engine is used in Hisilicon hip05,
+ Hi1610 and further ICT SoC
+
+config HNS_ENET
+ tristate "Hisilicon HNS Ethernet Device Support"
+ select PHYLIB
+ select HNS
+ ---help---
+ This selects the general ethernet driver for HNS. This module make
+ use of any HNS AE driver, such as HNS_DSAF
+
endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
index 6c14540a4dc5..390b71fb3000 100644
--- a/drivers/net/ethernet/hisilicon/Makefile
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -3,4 +3,6 @@
#
obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
-obj-$(CONFIG_HIP04_ETH) += hip04_mdio.o hip04_eth.o
+obj-$(CONFIG_HIP04_ETH) += hip04_eth.o
+obj-$(CONFIG_HNS_MDIO) += hns_mdio.o
+obj-$(CONFIG_HNS) += hns/
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index d49bee38cd31..253f8ed0537a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -816,7 +816,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
struct net_device *ndev;
struct hip04_priv *priv;
struct resource *res;
- unsigned int irq;
+ int irq;
int ret;
ndev = alloc_etherdev(sizeof(struct hip04_priv));
@@ -965,7 +965,6 @@ static struct platform_driver hip04_mac_driver = {
.remove = hip04_remove,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = hip04_mac_match,
},
};
diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c
deleted file mode 100644
index b3bac25db99c..000000000000
--- a/drivers/net/ethernet/hisilicon/hip04_mdio.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/* Copyright (c) 2014 Linaro Ltd.
- * Copyright (c) 2014 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/of_mdio.h>
-#include <linux/delay.h>
-
-#define MDIO_CMD_REG 0x0
-#define MDIO_ADDR_REG 0x4
-#define MDIO_WDATA_REG 0x8
-#define MDIO_RDATA_REG 0xc
-#define MDIO_STA_REG 0x10
-
-#define MDIO_START BIT(14)
-#define MDIO_R_VALID BIT(1)
-#define MDIO_READ (BIT(12) | BIT(11) | MDIO_START)
-#define MDIO_WRITE (BIT(12) | BIT(10) | MDIO_START)
-
-struct hip04_mdio_priv {
- void __iomem *base;
-};
-
-#define WAIT_TIMEOUT 10
-static int hip04_mdio_wait_ready(struct mii_bus *bus)
-{
- struct hip04_mdio_priv *priv = bus->priv;
- int i;
-
- for (i = 0; readl_relaxed(priv->base + MDIO_CMD_REG) & MDIO_START; i++) {
- if (i == WAIT_TIMEOUT)
- return -ETIMEDOUT;
- msleep(20);
- }
-
- return 0;
-}
-
-static int hip04_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
-{
- struct hip04_mdio_priv *priv = bus->priv;
- u32 val;
- int ret;
-
- ret = hip04_mdio_wait_ready(bus);
- if (ret < 0)
- goto out;
-
- val = regnum | (mii_id << 5) | MDIO_READ;
- writel_relaxed(val, priv->base + MDIO_CMD_REG);
-
- ret = hip04_mdio_wait_ready(bus);
- if (ret < 0)
- goto out;
-
- val = readl_relaxed(priv->base + MDIO_STA_REG);
- if (val & MDIO_R_VALID) {
- dev_err(bus->parent, "SMI bus read not valid\n");
- ret = -ENODEV;
- goto out;
- }
-
- val = readl_relaxed(priv->base + MDIO_RDATA_REG);
- ret = val & 0xFFFF;
-out:
- return ret;
-}
-
-static int hip04_mdio_write(struct mii_bus *bus, int mii_id,
- int regnum, u16 value)
-{
- struct hip04_mdio_priv *priv = bus->priv;
- u32 val;
- int ret;
-
- ret = hip04_mdio_wait_ready(bus);
- if (ret < 0)
- goto out;
-
- writel_relaxed(value, priv->base + MDIO_WDATA_REG);
- val = regnum | (mii_id << 5) | MDIO_WRITE;
- writel_relaxed(val, priv->base + MDIO_CMD_REG);
-out:
- return ret;
-}
-
-static int hip04_mdio_reset(struct mii_bus *bus)
-{
- int temp, i;
-
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- hip04_mdio_write(bus, i, 22, 0);
- temp = hip04_mdio_read(bus, i, MII_BMCR);
- if (temp < 0)
- continue;
-
- temp |= BMCR_RESET;
- if (hip04_mdio_write(bus, i, MII_BMCR, temp) < 0)
- continue;
- }
-
- mdelay(500);
- return 0;
-}
-
-static int hip04_mdio_probe(struct platform_device *pdev)
-{
- struct resource *r;
- struct mii_bus *bus;
- struct hip04_mdio_priv *priv;
- int ret;
-
- bus = mdiobus_alloc_size(sizeof(struct hip04_mdio_priv));
- if (!bus) {
- dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
- return -ENOMEM;
- }
-
- bus->name = "hip04_mdio_bus";
- bus->read = hip04_mdio_read;
- bus->write = hip04_mdio_write;
- bus->reset = hip04_mdio_reset;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
- bus->parent = &pdev->dev;
- priv = bus->priv;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(priv->base)) {
- ret = PTR_ERR(priv->base);
- goto out_mdio;
- }
-
- ret = of_mdiobus_register(bus, pdev->dev.of_node);
- if (ret < 0) {
- dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
- goto out_mdio;
- }
-
- platform_set_drvdata(pdev, bus);
-
- return 0;
-
-out_mdio:
- mdiobus_free(bus);
- return ret;
-}
-
-static int hip04_mdio_remove(struct platform_device *pdev)
-{
- struct mii_bus *bus = platform_get_drvdata(pdev);
-
- mdiobus_unregister(bus);
- mdiobus_free(bus);
-
- return 0;
-}
-
-static const struct of_device_id hip04_mdio_match[] = {
- { .compatible = "hisilicon,hip04-mdio" },
- { }
-};
-MODULE_DEVICE_TABLE(of, hip04_mdio_match);
-
-static struct platform_driver hip04_mdio_driver = {
- .probe = hip04_mdio_probe,
- .remove = hip04_mdio_remove,
- .driver = {
- .name = "hip04-mdio",
- .owner = THIS_MODULE,
- .of_match_table = hip04_mdio_match,
- },
-};
-
-module_platform_driver(hip04_mdio_driver);
-
-MODULE_DESCRIPTION("HISILICON P04 MDIO interface driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:hip04-mdio");
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index a5e077eac99a..e51892d518ff 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -371,7 +371,7 @@ static void hix5hd2_port_enable(struct hix5hd2_priv *priv)
static void hix5hd2_port_disable(struct hix5hd2_priv *priv)
{
- writel_relaxed(~(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN);
+ writel_relaxed(~(u32)(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN);
writel_relaxed(0, priv->base + DESC_WR_RD_ENA);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/Makefile b/drivers/net/ethernet/hisilicon/hns/Makefile
new file mode 100644
index 000000000000..6010c83e38d8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+obj-$(CONFIG_HNS) += hnae.o
+
+obj-$(CONFIG_HNS_DSAF) += hns_dsaf.o
+hns_dsaf-objs = hns_ae_adapt.o hns_dsaf_gmac.o hns_dsaf_mac.o hns_dsaf_misc.o \
+ hns_dsaf_main.o hns_dsaf_ppe.o hns_dsaf_rcb.o hns_dsaf_xgmac.o
+
+obj-$(CONFIG_HNS_ENET) += hns_enet_drv.o
+hns_enet_drv-objs = hns_enet.o hns_ethtool.o
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
new file mode 100644
index 000000000000..b3645297477e
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -0,0 +1,457 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+
+#include "hnae.h"
+
+#define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
+
+static struct class *hnae_class;
+
+static void
+hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ list_add_tail_rcu(node, head);
+ spin_unlock_irqrestore(lock, flags);
+}
+
+static void hnae_list_del(spinlock_t *lock, struct list_head *node)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ list_del_rcu(node);
+ spin_unlock_irqrestore(lock, flags);
+}
+
+static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+ unsigned int order = hnae_page_order(ring);
+ struct page *p = dev_alloc_pages(order);
+
+ if (!p)
+ return -ENOMEM;
+
+ cb->priv = p;
+ cb->page_offset = 0;
+ cb->reuse_flag = 0;
+ cb->buf = page_address(p);
+ cb->length = hnae_page_size(ring);
+ cb->type = DESC_TYPE_PAGE;
+
+ return 0;
+}
+
+static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+ if (cb->type == DESC_TYPE_SKB)
+ dev_kfree_skb_any((struct sk_buff *)cb->priv);
+ else if (unlikely(is_rx_ring(ring)))
+ put_page((struct page *)cb->priv);
+ memset(cb, 0, sizeof(*cb));
+}
+
+static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+ cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
+ cb->length, ring_to_dma_dir(ring));
+
+ if (dma_mapping_error(ring_to_dev(ring), cb->dma))
+ return -EIO;
+
+ return 0;
+}
+
+static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+ if (cb->type == DESC_TYPE_SKB)
+ dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
+ ring_to_dma_dir(ring));
+ else
+ dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
+ ring_to_dma_dir(ring));
+}
+
+static struct hnae_buf_ops hnae_bops = {
+ .alloc_buffer = hnae_alloc_buffer,
+ .free_buffer = hnae_free_buffer,
+ .map_buffer = hnae_map_buffer,
+ .unmap_buffer = hnae_unmap_buffer,
+};
+
+static int __ae_match(struct device *dev, const void *data)
+{
+ struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
+ const char *ae_id = data;
+
+ if (!strncmp(ae_id, hdev->name, AE_NAME_SIZE))
+ return 1;
+
+ return 0;
+}
+
+static struct hnae_ae_dev *find_ae(const char *ae_id)
+{
+ struct device *dev;
+
+ WARN_ON(!ae_id);
+
+ dev = class_find_device(hnae_class, NULL, ae_id, __ae_match);
+
+ return dev ? cls_to_ae_dev(dev) : NULL;
+}
+
+static void hnae_free_buffers(struct hnae_ring *ring)
+{
+ int i;
+
+ for (i = 0; i < ring->desc_num; i++)
+ hnae_free_buffer_detach(ring, i);
+}
+
+/* Allocate memory for raw pkg, and map with dma */
+static int hnae_alloc_buffers(struct hnae_ring *ring)
+{
+ int i, j, ret;
+
+ for (i = 0; i < ring->desc_num; i++) {
+ ret = hnae_alloc_buffer_attach(ring, i);
+ if (ret)
+ goto out_buffer_fail;
+ }
+
+ return 0;
+
+out_buffer_fail:
+ for (j = i - 1; j >= 0; j--)
+ hnae_free_buffer_detach(ring, j);
+ return ret;
+}
+
+/* free desc along with its attached buffer */
+static void hnae_free_desc(struct hnae_ring *ring)
+{
+ hnae_free_buffers(ring);
+ dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
+ ring->desc_num * sizeof(ring->desc[0]),
+ ring_to_dma_dir(ring));
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+}
+
+/* alloc desc, without buffer attached */
+static int hnae_alloc_desc(struct hnae_ring *ring)
+{
+ int size = ring->desc_num * sizeof(ring->desc[0]);
+
+ ring->desc = kzalloc(size, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->desc_dma_addr = dma_map_single(ring_to_dev(ring),
+ ring->desc, size, ring_to_dma_dir(ring));
+ if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* fini ring, also free the buffer for the ring */
+static void hnae_fini_ring(struct hnae_ring *ring)
+{
+ hnae_free_desc(ring);
+ kfree(ring->desc_cb);
+ ring->desc_cb = NULL;
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+}
+
+/* init ring, and with buffer for rx ring */
+static int
+hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
+{
+ int ret;
+
+ if (ring->desc_num <= 0 || ring->buf_size <= 0)
+ return -EINVAL;
+
+ ring->q = q;
+ ring->flags = flags;
+ assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
+
+ /* not matter for tx or rx ring, the ntc and ntc start from 0 */
+ assert(ring->next_to_use == 0);
+ assert(ring->next_to_clean == 0);
+
+ ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
+ GFP_KERNEL);
+ if (!ring->desc_cb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = hnae_alloc_desc(ring);
+ if (ret)
+ goto out_with_desc_cb;
+
+ if (is_rx_ring(ring)) {
+ ret = hnae_alloc_buffers(ring);
+ if (ret)
+ goto out_with_desc;
+ }
+
+ return 0;
+
+out_with_desc:
+ hnae_free_desc(ring);
+out_with_desc_cb:
+ kfree(ring->desc_cb);
+ ring->desc_cb = NULL;
+out:
+ return ret;
+}
+
+static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
+ struct hnae_ae_dev *dev)
+{
+ int ret;
+
+ q->dev = dev;
+ q->handle = h;
+
+ ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR);
+ if (ret)
+ goto out;
+
+ ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR);
+ if (ret)
+ goto out_with_tx_ring;
+
+ if (dev->ops->init_queue)
+ dev->ops->init_queue(q);
+
+ return 0;
+
+out_with_tx_ring:
+ hnae_fini_ring(&q->tx_ring);
+out:
+ return ret;
+}
+
+static void hnae_fini_queue(struct hnae_queue *q)
+{
+ if (q->dev->ops->fini_queue)
+ q->dev->ops->fini_queue(q);
+
+ hnae_fini_ring(&q->tx_ring);
+ hnae_fini_ring(&q->rx_ring);
+}
+
+/**
+ * ae_chain - define ae chain head
+ */
+static RAW_NOTIFIER_HEAD(ae_chain);
+
+int hnae_register_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&ae_chain, nb);
+}
+EXPORT_SYMBOL(hnae_register_notifier);
+
+void hnae_unregister_notifier(struct notifier_block *nb)
+{
+ if (raw_notifier_chain_unregister(&ae_chain, nb))
+ dev_err(NULL, "notifier chain unregister fail\n");
+}
+EXPORT_SYMBOL(hnae_unregister_notifier);
+
+int hnae_reinit_handle(struct hnae_handle *handle)
+{
+ int i, j;
+ int ret;
+
+ for (i = 0; i < handle->q_num; i++) /* free ring*/
+ hnae_fini_queue(handle->qs[i]);
+
+ if (handle->dev->ops->reset)
+ handle->dev->ops->reset(handle);
+
+ for (i = 0; i < handle->q_num; i++) {/* reinit ring*/
+ ret = hnae_init_queue(handle, handle->qs[i], handle->dev);
+ if (ret)
+ goto out_when_init_queue;
+ }
+ return 0;
+out_when_init_queue:
+ for (j = i - 1; j >= 0; j--)
+ hnae_fini_queue(handle->qs[j]);
+ return ret;
+}
+EXPORT_SYMBOL(hnae_reinit_handle);
+
+/* hnae_get_handle - get a handle from the AE
+ * @owner_dev: the dev use this handle
+ * @ae_id: the id of the ae to be used
+ * @ae_opts: the options set for the handle
+ * @bops: the callbacks for buffer management
+ *
+ * return handle ptr or ERR_PTR
+ */
+struct hnae_handle *hnae_get_handle(struct device *owner_dev,
+ const char *ae_id, u32 port_id,
+ struct hnae_buf_ops *bops)
+{
+ struct hnae_ae_dev *dev;
+ struct hnae_handle *handle;
+ int i, j;
+ int ret;
+
+ dev = find_ae(ae_id);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ handle = dev->ops->get_handle(dev, port_id);
+ if (IS_ERR(handle))
+ return handle;
+
+ handle->dev = dev;
+ handle->owner_dev = owner_dev;
+ handle->bops = bops ? bops : &hnae_bops;
+ handle->eport_id = port_id;
+
+ for (i = 0; i < handle->q_num; i++) {
+ ret = hnae_init_queue(handle, handle->qs[i], dev);
+ if (ret)
+ goto out_when_init_queue;
+ }
+
+ __module_get(dev->owner);
+
+ hnae_list_add(&dev->lock, &handle->node, &dev->handle_list);
+
+ return handle;
+
+out_when_init_queue:
+ for (j = i - 1; j >= 0; j--)
+ hnae_fini_queue(handle->qs[j]);
+
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(hnae_get_handle);
+
+void hnae_put_handle(struct hnae_handle *h)
+{
+ struct hnae_ae_dev *dev = h->dev;
+ int i;
+
+ for (i = 0; i < h->q_num; i++)
+ hnae_fini_queue(h->qs[i]);
+
+ if (h->dev->ops->reset)
+ h->dev->ops->reset(h);
+
+ hnae_list_del(&dev->lock, &h->node);
+
+ if (dev->ops->put_handle)
+ dev->ops->put_handle(h);
+
+ module_put(dev->owner);
+}
+EXPORT_SYMBOL(hnae_put_handle);
+
+static void hnae_release(struct device *dev)
+{
+}
+
+/**
+ * hnae_ae_register - register a AE engine to hnae framework
+ * @hdev: the hnae ae engine device
+ * @owner: the module who provides this dev
+ * NOTE: the duplicated name will not be checked
+ */
+int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
+{
+ static atomic_t id = ATOMIC_INIT(-1);
+ int ret;
+
+ if (!hdev->dev)
+ return -ENODEV;
+
+ if (!hdev->ops || !hdev->ops->get_handle ||
+ !hdev->ops->toggle_ring_irq ||
+ !hdev->ops->toggle_queue_status ||
+ !hdev->ops->get_status || !hdev->ops->adjust_link)
+ return -EINVAL;
+
+ hdev->owner = owner;
+ hdev->id = (int)atomic_inc_return(&id);
+ hdev->cls_dev.parent = hdev->dev;
+ hdev->cls_dev.class = hnae_class;
+ hdev->cls_dev.release = hnae_release;
+ (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
+ ret = device_register(&hdev->cls_dev);
+ if (ret)
+ return ret;
+
+ __module_get(THIS_MODULE);
+
+ INIT_LIST_HEAD(&hdev->handle_list);
+ spin_lock_init(&hdev->lock);
+
+ ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL);
+ if (ret)
+ dev_dbg(hdev->dev,
+ "has not notifier for AE: %s\n", hdev->name);
+
+ return 0;
+}
+EXPORT_SYMBOL(hnae_ae_register);
+
+/**
+ * hnae_ae_unregister - unregisters a HNAE AE engine
+ * @cdev: the device to unregister
+ */
+void hnae_ae_unregister(struct hnae_ae_dev *hdev)
+{
+ device_unregister(&hdev->cls_dev);
+ module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL(hnae_ae_unregister);
+
+static int __init hnae_init(void)
+{
+ hnae_class = class_create(THIS_MODULE, "hnae");
+ return PTR_ERR_OR_ZERO(hnae_class);
+}
+
+static void __exit hnae_exit(void)
+{
+ class_destroy(hnae_class);
+}
+
+subsys_initcall(hnae_init);
+module_exit(hnae_exit);
+
+MODULE_AUTHOR("Hisilicon, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework");
+
+/* vi: set tw=78 noet: */
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
new file mode 100644
index 000000000000..cec95ac8687d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -0,0 +1,585 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __HNAE_H
+#define __HNAE_H
+
+/* Names used in this framework:
+ * ae handle (handle):
+ * a set of queues provided by AE
+ * ring buffer queue (rbq):
+ * the channel between upper layer and the AE, can do tx and rx
+ * ring:
+ * a tx or rx channel within a rbq
+ * ring description (desc):
+ * an element in the ring with packet information
+ * buffer:
+ * a memory region referred by desc with the full packet payload
+ *
+ * "num" means a static number set as a parameter, "count" mean a dynamic
+ * number set while running
+ * "cb" means control block
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <linux/phy.h>
+#include <linux/types.h>
+
+#define HNAE_DRIVER_VERSION "1.3.0"
+#define HNAE_DRIVER_NAME "hns"
+#define HNAE_COPYRIGHT "Copyright(c) 2015 Huawei Corporation."
+#define HNAE_DRIVER_STRING "Hisilicon Network Subsystem Driver"
+#define HNAE_DEFAULT_DEVICE_DESCR "Hisilicon Network Subsystem"
+
+#ifdef DEBUG
+
+#ifndef assert
+#define assert(expr) \
+do { \
+ if (!(expr)) { \
+ pr_err("Assertion failed! %s, %s, %s, line %d\n", \
+ #expr, __FILE__, __func__, __LINE__); \
+ } \
+} while (0)
+#endif
+
+#else
+
+#ifndef assert
+#define assert(expr)
+#endif
+
+#endif
+
+#define AE_VERSION_1 ('6' << 16 | '6' << 8 | '0')
+#define AE_VERSION_2 ('1' << 24 | '6' << 16 | '1' << 8 | '0')
+#define AE_NAME_SIZE 16
+
+/* some said the RX and TX RCB format should not be the same in the future. But
+ * it is the same now...
+ */
+#define RCB_REG_BASEADDR_L 0x00 /* P660 support only 32bit accessing */
+#define RCB_REG_BASEADDR_H 0x04
+#define RCB_REG_BD_NUM 0x08
+#define RCB_REG_BD_LEN 0x0C
+#define RCB_REG_PKTLINE 0x10
+#define RCB_REG_TAIL 0x18
+#define RCB_REG_HEAD 0x1C
+#define RCB_REG_FBDNUM 0x20
+#define RCB_REG_OFFSET 0x24 /* pkt num to be handled */
+#define RCB_REG_PKTNUM_RECORD 0x2C /* total pkt received */
+
+#define HNS_RX_HEAD_SIZE 256
+
+#define HNAE_AE_REGISTER 0x1
+
+#define RCB_RING_NAME_LEN 16
+
+enum hnae_led_state {
+ HNAE_LED_INACTIVE,
+ HNAE_LED_ACTIVE,
+ HNAE_LED_ON,
+ HNAE_LED_OFF
+};
+
+#define HNS_RX_FLAG_VLAN_PRESENT 0x1
+#define HNS_RX_FLAG_L3ID_IPV4 0x0
+#define HNS_RX_FLAG_L3ID_IPV6 0x1
+#define HNS_RX_FLAG_L4ID_UDP 0x0
+#define HNS_RX_FLAG_L4ID_TCP 0x1
+
+#define HNS_TXD_ASID_S 0
+#define HNS_TXD_ASID_M (0xff << HNS_TXD_ASID_S)
+#define HNS_TXD_BUFNUM_S 8
+#define HNS_TXD_BUFNUM_M (0x3 << HNS_TXD_BUFNUM_S)
+#define HNS_TXD_PORTID_S 10
+#define HNS_TXD_PORTID_M (0x7 << HNS_TXD_PORTID_S)
+
+#define HNS_TXD_RA_B 8
+#define HNS_TXD_RI_B 9
+#define HNS_TXD_L4CS_B 10
+#define HNS_TXD_L3CS_B 11
+#define HNS_TXD_FE_B 12
+#define HNS_TXD_VLD_B 13
+#define HNS_TXD_IPOFFSET_S 14
+#define HNS_TXD_IPOFFSET_M (0xff << HNS_TXD_IPOFFSET_S)
+
+#define HNS_RXD_IPOFFSET_S 0
+#define HNS_RXD_IPOFFSET_M (0xff << HNS_TXD_IPOFFSET_S)
+#define HNS_RXD_BUFNUM_S 8
+#define HNS_RXD_BUFNUM_M (0x3 << HNS_RXD_BUFNUM_S)
+#define HNS_RXD_PORTID_S 10
+#define HNS_RXD_PORTID_M (0x7 << HNS_RXD_PORTID_S)
+#define HNS_RXD_DMAC_S 13
+#define HNS_RXD_DMAC_M (0x3 << HNS_RXD_DMAC_S)
+#define HNS_RXD_VLAN_S 15
+#define HNS_RXD_VLAN_M (0x3 << HNS_RXD_VLAN_S)
+#define HNS_RXD_L3ID_S 17
+#define HNS_RXD_L3ID_M (0xf << HNS_RXD_L3ID_S)
+#define HNS_RXD_L4ID_S 21
+#define HNS_RXD_L4ID_M (0xf << HNS_RXD_L4ID_S)
+#define HNS_RXD_FE_B 25
+#define HNS_RXD_FRAG_B 26
+#define HNS_RXD_VLD_B 27
+#define HNS_RXD_L2E_B 28
+#define HNS_RXD_L3E_B 29
+#define HNS_RXD_L4E_B 30
+#define HNS_RXD_DROP_B 31
+
+#define HNS_RXD_VLANID_S 8
+#define HNS_RXD_VLANID_M (0xfff << HNS_RXD_VLANID_S)
+#define HNS_RXD_CFI_B 20
+#define HNS_RXD_PRI_S 21
+#define HNS_RXD_PRI_M (0x7 << HNS_RXD_PRI_S)
+#define HNS_RXD_ASID_S 24
+#define HNS_RXD_ASID_M (0xff << HNS_RXD_ASID_S)
+
+/* hardware spec ring buffer format */
+struct __packed hnae_desc {
+ __le64 addr;
+ union {
+ struct {
+ __le16 asid_bufnum_pid;
+ __le16 send_size;
+ __le32 flag_ipoffset;
+ __le32 reserved_3[4];
+ } tx;
+
+ struct {
+ __le32 ipoff_bnum_pid_flag;
+ __le16 pkt_len;
+ __le16 size;
+ __le32 vlan_pri_asid;
+ __le32 reserved_2[3];
+ } rx;
+ };
+};
+
+struct hnae_desc_cb {
+ dma_addr_t dma; /* dma address of this desc */
+ void *buf; /* cpu addr for a desc */
+
+ /* priv data for the desc, e.g. skb when use with ip stack*/
+ void *priv;
+ u16 page_offset;
+ u16 reuse_flag;
+
+ u16 length; /* length of the buffer */
+
+ /* desc type, used by the ring user to mark the type of the priv data */
+ u16 type;
+};
+
+#define setflags(flags, bits) ((flags) |= (bits))
+#define unsetflags(flags, bits) ((flags) &= ~(bits))
+
+/* hnae_ring->flags fields */
+#define RINGF_DIR 0x1 /* TX or RX ring, set if TX */
+#define is_tx_ring(ring) ((ring)->flags & RINGF_DIR)
+#define is_rx_ring(ring) (!is_tx_ring(ring))
+#define ring_to_dma_dir(ring) (is_tx_ring(ring) ? \
+ DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
+struct ring_stats {
+ u64 io_err_cnt;
+ u64 sw_err_cnt;
+ u64 seg_pkt_cnt;
+ union {
+ struct {
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 tx_err_cnt;
+ u64 restart_queue;
+ u64 tx_busy;
+ };
+ struct {
+ u64 rx_pkts;
+ u64 rx_bytes;
+ u64 rx_err_cnt;
+ u64 reuse_pg_cnt;
+ u64 err_pkt_len;
+ u64 non_vld_descs;
+ u64 err_bd_num;
+ u64 l2_err;
+ u64 l3l4_csum_err;
+ };
+ };
+};
+
+struct hnae_queue;
+
+struct hnae_ring {
+ u8 __iomem *io_base; /* base io address for the ring */
+ struct hnae_desc *desc; /* dma map address space */
+ struct hnae_desc_cb *desc_cb;
+ struct hnae_queue *q;
+ int irq;
+ char ring_name[RCB_RING_NAME_LEN];
+
+ /* statistic */
+ struct ring_stats stats;
+
+ dma_addr_t desc_dma_addr;
+ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
+ u16 desc_num; /* total number of desc */
+ u16 max_desc_num_per_pkt;
+ u16 max_raw_data_sz_per_desc;
+ u16 max_pkt_size;
+ int next_to_use; /* idx of next spare desc */
+
+ /* idx of lastest sent desc, the ring is empty when equal to
+ * next_to_use
+ */
+ int next_to_clean;
+
+ int flags; /* ring attribute */
+ int irq_init_flag;
+};
+
+#define ring_ptr_move_fw(ring, p) \
+ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
+#define ring_ptr_move_bw(ring, p) \
+ ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
+
+enum hns_desc_type {
+ DESC_TYPE_SKB,
+ DESC_TYPE_PAGE,
+};
+
+#define assert_is_ring_idx(ring, idx) \
+ assert((idx) >= 0 && (idx) < (ring)->desc_num)
+
+/* the distance between [begin, end) in a ring buffer
+ * note: there is a unuse slot between the begin and the end
+ */
+static inline int ring_dist(struct hnae_ring *ring, int begin, int end)
+{
+ assert_is_ring_idx(ring, begin);
+ assert_is_ring_idx(ring, end);
+
+ return (end - begin + ring->desc_num) % ring->desc_num;
+}
+
+static inline int ring_space(struct hnae_ring *ring)
+{
+ return ring->desc_num -
+ ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1;
+}
+
+static inline int is_ring_empty(struct hnae_ring *ring)
+{
+ assert_is_ring_idx(ring, ring->next_to_use);
+ assert_is_ring_idx(ring, ring->next_to_clean);
+
+ return ring->next_to_use == ring->next_to_clean;
+}
+
+#define hnae_buf_size(_ring) ((_ring)->buf_size)
+#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring)))
+#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring))
+
+struct hnae_handle;
+
+/* allocate and dma map space for hnae desc */
+struct hnae_buf_ops {
+ int (*alloc_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+ void (*free_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+ int (*map_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+ void (*unmap_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+};
+
+struct hnae_queue {
+ void __iomem *io_base;
+ phys_addr_t phy_base;
+ struct hnae_ae_dev *dev; /* the device who use this queue */
+ struct hnae_ring rx_ring, tx_ring;
+ struct hnae_handle *handle;
+};
+
+/*hnae loop mode*/
+enum hnae_loop {
+ MAC_INTERNALLOOP_MAC = 0,
+ MAC_INTERNALLOOP_SERDES,
+ MAC_INTERNALLOOP_PHY,
+ MAC_LOOP_NONE,
+};
+
+/*hnae port type*/
+enum hnae_port_type {
+ HNAE_PORT_SERVICE = 0,
+ HNAE_PORT_DEBUG
+};
+
+/* This struct defines the operation on the handle.
+ *
+ * get_handle(): (mandatory)
+ * Get a handle from AE according to its name and options.
+ * the AE driver should manage the space used by handle and its queues while
+ * the HNAE framework will allocate desc and desc_cb for all rings in the
+ * queues.
+ * put_handle():
+ * Release the handle.
+ * start():
+ * Enable the hardware, include all queues
+ * stop():
+ * Disable the hardware
+ * set_opts(): (mandatory)
+ * Set options to the AE
+ * get_opts(): (mandatory)
+ * Get options from the AE
+ * get_status():
+ * Get the carrier state of the back channel of the handle, 1 for ok, 0 for
+ * non-ok
+ * toggle_ring_irq(): (mandatory)
+ * Set the ring irq to be enabled(0) or disable(1)
+ * toggle_queue_status(): (mandatory)
+ * Set the queue to be enabled(1) or disable(0), this will not change the
+ * ring irq state
+ * adjust_link()
+ * adjust link status
+ * set_loopback()
+ * set loopback
+ * get_ring_bdnum_limit()
+ * get ring bd number limit
+ * get_pauseparam()
+ * get tx and rx of pause frame use
+ * set_autoneg()
+ * set auto autonegotiation of pause frame use
+ * get_autoneg()
+ * get auto autonegotiation of pause frame use
+ * set_pauseparam()
+ * set tx and rx of pause frame use
+ * get_coalesce_usecs()
+ * get usecs to delay a TX interrupt after a packet is sent
+ * get_rx_max_coalesced_frames()
+ * get Maximum number of packets to be sent before a TX interrupt.
+ * set_coalesce_usecs()
+ * set usecs to delay a TX interrupt after a packet is sent
+ * set_coalesce_frames()
+ * set Maximum number of packets to be sent before a TX interrupt.
+ * get_ringnum()
+ * get RX/TX ring number
+ * get_max_ringnum()
+ * get RX/TX ring maximum number
+ * get_mac_addr()
+ * get mac address
+ * set_mac_addr()
+ * set mac address
+ * set_mc_addr()
+ * set multicast mode
+ * set_mtu()
+ * set mtu
+ * update_stats()
+ * update Old network device statistics
+ * get_ethtool_stats()
+ * get ethtool network device statistics
+ * get_strings()
+ * get a set of strings that describe the requested objects
+ * get_sset_count()
+ * get number of strings that @get_strings will write
+ * update_led_status()
+ * update the led status
+ * set_led_id()
+ * set led id
+ * get_regs()
+ * get regs dump
+ * get_regs_len()
+ * get the len of the regs dump
+ */
+struct hnae_ae_ops {
+ struct hnae_handle *(*get_handle)(struct hnae_ae_dev *dev,
+ u32 port_id);
+ void (*put_handle)(struct hnae_handle *handle);
+ void (*init_queue)(struct hnae_queue *q);
+ void (*fini_queue)(struct hnae_queue *q);
+ int (*start)(struct hnae_handle *handle);
+ void (*stop)(struct hnae_handle *handle);
+ void (*reset)(struct hnae_handle *handle);
+ int (*set_opts)(struct hnae_handle *handle, int type, void *opts);
+ int (*get_opts)(struct hnae_handle *handle, int type, void **opts);
+ int (*get_status)(struct hnae_handle *handle);
+ int (*get_info)(struct hnae_handle *handle,
+ u8 *auto_neg, u16 *speed, u8 *duplex);
+ void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
+ void (*toggle_queue_status)(struct hnae_queue *queue, u32 val);
+ void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
+ int (*set_loopback)(struct hnae_handle *handle,
+ enum hnae_loop loop_mode, int en);
+ void (*get_ring_bdnum_limit)(struct hnae_queue *queue,
+ u32 *uplimit);
+ void (*get_pauseparam)(struct hnae_handle *handle,
+ u32 *auto_neg, u32 *rx_en, u32 *tx_en);
+ int (*set_autoneg)(struct hnae_handle *handle, u8 enable);
+ int (*get_autoneg)(struct hnae_handle *handle);
+ int (*set_pauseparam)(struct hnae_handle *handle,
+ u32 auto_neg, u32 rx_en, u32 tx_en);
+ void (*get_coalesce_usecs)(struct hnae_handle *handle,
+ u32 *tx_usecs, u32 *rx_usecs);
+ void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle,
+ u32 *tx_frames, u32 *rx_frames);
+ void (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
+ int (*set_coalesce_frames)(struct hnae_handle *handle,
+ u32 coalesce_frames);
+ void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
+ int (*get_mac_addr)(struct hnae_handle *handle, void **p);
+ int (*set_mac_addr)(struct hnae_handle *handle, void *p);
+ int (*set_mc_addr)(struct hnae_handle *handle, void *addr);
+ int (*set_mtu)(struct hnae_handle *handle, int new_mtu);
+ void (*update_stats)(struct hnae_handle *handle,
+ struct net_device_stats *net_stats);
+ void (*get_stats)(struct hnae_handle *handle, u64 *data);
+ void (*get_strings)(struct hnae_handle *handle,
+ u32 stringset, u8 *data);
+ int (*get_sset_count)(struct hnae_handle *handle, int stringset);
+ void (*update_led_status)(struct hnae_handle *handle);
+ int (*set_led_id)(struct hnae_handle *handle,
+ enum hnae_led_state status);
+ void (*get_regs)(struct hnae_handle *handle, void *data);
+ int (*get_regs_len)(struct hnae_handle *handle);
+};
+
+struct hnae_ae_dev {
+ struct device cls_dev; /* the class dev */
+ struct device *dev; /* the presented dev */
+ struct hnae_ae_ops *ops;
+ struct list_head node;
+ struct module *owner; /* the module who provides this dev */
+ int id;
+ char name[AE_NAME_SIZE];
+ struct list_head handle_list;
+ spinlock_t lock; /* lock to protect the handle_list */
+};
+
+struct hnae_handle {
+ struct device *owner_dev; /* the device which make use of this handle */
+ struct hnae_ae_dev *dev; /* the device who provides this handle */
+ struct device_node *phy_node;
+ phy_interface_t phy_if;
+ u32 if_support;
+ int q_num;
+ int vf_id;
+ u32 eport_id;
+ enum hnae_port_type port_type;
+ struct list_head node; /* list to hnae_ae_dev->handle_list */
+ struct hnae_buf_ops *bops; /* operation for the buffer */
+ struct hnae_queue **qs; /* array base of all queues */
+};
+
+#define ring_to_dev(ring) ((ring)->q->dev->dev)
+
+struct hnae_handle *hnae_get_handle(struct device *owner_dev, const char *ae_id,
+ u32 port_id, struct hnae_buf_ops *bops);
+void hnae_put_handle(struct hnae_handle *handle);
+int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner);
+void hnae_ae_unregister(struct hnae_ae_dev *dev);
+
+int hnae_register_notifier(struct notifier_block *nb);
+void hnae_unregister_notifier(struct notifier_block *nb);
+int hnae_reinit_handle(struct hnae_handle *handle);
+
+#define hnae_queue_xmit(q, buf_num) writel_relaxed(buf_num, \
+ (q)->tx_ring.io_base + RCB_REG_TAIL)
+
+#ifndef assert
+#define assert(cond)
+#endif
+
+static inline int hnae_reserve_buffer_map(struct hnae_ring *ring,
+ struct hnae_desc_cb *cb)
+{
+ struct hnae_buf_ops *bops = ring->q->handle->bops;
+ int ret;
+
+ ret = bops->alloc_buffer(ring, cb);
+ if (ret)
+ goto out;
+
+ ret = bops->map_buffer(ring, cb);
+ if (ret)
+ goto out_with_buf;
+
+ return 0;
+
+out_with_buf:
+ bops->free_buffer(ring, cb);
+out:
+ return ret;
+}
+
+static inline int hnae_alloc_buffer_attach(struct hnae_ring *ring, int i)
+{
+ int ret = hnae_reserve_buffer_map(ring, &ring->desc_cb[i]);
+
+ if (ret)
+ return ret;
+
+ ring->desc[i].addr = (__le64)ring->desc_cb[i].dma;
+
+ return 0;
+}
+
+static inline void hnae_buffer_detach(struct hnae_ring *ring, int i)
+{
+ ring->q->handle->bops->unmap_buffer(ring, &ring->desc_cb[i]);
+ ring->desc[i].addr = 0;
+}
+
+static inline void hnae_free_buffer_detach(struct hnae_ring *ring, int i)
+{
+ struct hnae_buf_ops *bops = ring->q->handle->bops;
+ struct hnae_desc_cb *cb = &ring->desc_cb[i];
+
+ if (!ring->desc_cb[i].dma)
+ return;
+
+ hnae_buffer_detach(ring, i);
+ bops->free_buffer(ring, cb);
+}
+
+/* detach a in-used buffer and replace with a reserved one */
+static inline void hnae_replace_buffer(struct hnae_ring *ring, int i,
+ struct hnae_desc_cb *res_cb)
+{
+ struct hnae_buf_ops *bops = ring->q->handle->bops;
+ struct hnae_desc_cb tmp_cb = ring->desc_cb[i];
+
+ bops->unmap_buffer(ring, &ring->desc_cb[i]);
+ ring->desc_cb[i] = *res_cb;
+ *res_cb = tmp_cb;
+ ring->desc[i].addr = (__le64)ring->desc_cb[i].dma;
+ ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
+}
+
+static inline void hnae_reuse_buffer(struct hnae_ring *ring, int i)
+{
+ ring->desc_cb[i].reuse_flag = 0;
+ ring->desc[i].addr = (__le64)(ring->desc_cb[i].dma
+ + ring->desc_cb[i].page_offset);
+ ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
+}
+
+#define hnae_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= (~(mask)); \
+ (origin) |= ((val) << (shift)) & (mask); \
+ } while (0)
+
+#define hnae_set_bit(origin, shift, val) \
+ hnae_set_field((origin), (0x1 << (shift)), (shift), (val))
+
+#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+
+#define hnae_get_bit(origin, shift) \
+ hnae_get_field((origin), (0x1 << (shift)), (shift))
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
new file mode 100644
index 000000000000..1a16c0307b47
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -0,0 +1,783 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+
+#include "hnae.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_rcb.h"
+
+#define AE_NAME_PORT_ID_IDX 6
+#define ETH_STATIC_REG 1
+#define ETH_DUMP_REG 5
+#define ETH_GSTRING_LEN 32
+
+static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle)
+{
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ return vf_cb->mac_cb;
+}
+
+/**
+ * hns_ae_map_eport_to_dport - translate enet port id to dsaf port id
+ * @port_id: enet port id
+ *: debug port 0-1, service port 2 -7 (dsaf mode only 2)
+ * return: dsaf port id
+ *: service ports 0 - 5, debug port 6-7
+ **/
+static int hns_ae_map_eport_to_dport(u32 port_id)
+{
+ int port_index;
+
+ if (port_id < DSAF_DEBUG_NW_NUM)
+ port_index = port_id + DSAF_SERVICE_PORT_NUM_PER_DSAF;
+ else
+ port_index = port_id - DSAF_DEBUG_NW_NUM;
+
+ return port_index;
+}
+
+static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
+{
+ return container_of(dev, struct dsaf_device, ae_dev);
+}
+
+static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle)
+{
+ int ppe_index;
+ int ppe_common_index;
+ struct ppe_common_cb *ppe_comm;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ if (vf_cb->port_index < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
+ ppe_index = vf_cb->port_index;
+ ppe_common_index = 0;
+ } else {
+ ppe_index = 0;
+ ppe_common_index =
+ vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
+ }
+ ppe_comm = vf_cb->dsaf_dev->ppe_common[ppe_common_index];
+ return &ppe_comm->ppe_cb[ppe_index];
+}
+
+static int hns_ae_get_q_num_per_vf(
+ struct dsaf_device *dsaf_dev, int port)
+{
+ int common_idx = hns_dsaf_get_comm_idx_by_port(port);
+
+ return dsaf_dev->rcb_common[common_idx]->max_q_per_vf;
+}
+
+static int hns_ae_get_vf_num_per_port(
+ struct dsaf_device *dsaf_dev, int port)
+{
+ int common_idx = hns_dsaf_get_comm_idx_by_port(port);
+
+ return dsaf_dev->rcb_common[common_idx]->max_vfn;
+}
+
+static struct ring_pair_cb *hns_ae_get_base_ring_pair(
+ struct dsaf_device *dsaf_dev, int port)
+{
+ int common_idx = hns_dsaf_get_comm_idx_by_port(port);
+ struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[common_idx];
+ int q_num = rcb_comm->max_q_per_vf;
+ int vf_num = rcb_comm->max_vfn;
+
+ if (common_idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
+ else
+ return &rcb_comm->ring_pair_cb[0];
+}
+
+static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
+{
+ return container_of(q, struct ring_pair_cb, q);
+}
+
+struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
+ u32 port_id)
+{
+ int port_idx;
+ int vfnum_per_port;
+ int qnum_per_vf;
+ int i;
+ struct dsaf_device *dsaf_dev;
+ struct hnae_handle *ae_handle;
+ struct ring_pair_cb *ring_pair_cb;
+ struct hnae_vf_cb *vf_cb;
+
+ dsaf_dev = hns_ae_get_dsaf_dev(dev);
+ port_idx = hns_ae_map_eport_to_dport(port_id);
+
+ ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_idx);
+ vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_idx);
+ qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_idx);
+
+ vf_cb = kzalloc(sizeof(*vf_cb) +
+ qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL);
+ if (unlikely(!vf_cb)) {
+ dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n");
+ ae_handle = ERR_PTR(-ENOMEM);
+ goto handle_err;
+ }
+ ae_handle = &vf_cb->ae_handle;
+ /* ae_handle Init */
+ ae_handle->owner_dev = dsaf_dev->dev;
+ ae_handle->dev = dev;
+ ae_handle->q_num = qnum_per_vf;
+
+ /* find ring pair, and set vf id*/
+ for (ae_handle->vf_id = 0;
+ ae_handle->vf_id < vfnum_per_port; ae_handle->vf_id++) {
+ if (!ring_pair_cb->used_by_vf)
+ break;
+ ring_pair_cb += qnum_per_vf;
+ }
+ if (ae_handle->vf_id >= vfnum_per_port) {
+ dev_err(dsaf_dev->dev, "malloc queue fail!\n");
+ ae_handle = ERR_PTR(-EINVAL);
+ goto vf_id_err;
+ }
+
+ ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1);
+ for (i = 0; i < qnum_per_vf; i++) {
+ ae_handle->qs[i] = &ring_pair_cb->q;
+ ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i];
+ ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
+
+ ring_pair_cb->used_by_vf = 1;
+ if (port_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
+ ring_pair_cb->port_id_in_dsa = port_idx;
+ else
+ ring_pair_cb->port_id_in_dsa = 0;
+
+ ring_pair_cb++;
+ }
+
+ vf_cb->dsaf_dev = dsaf_dev;
+ vf_cb->port_index = port_idx;
+ vf_cb->mac_cb = &dsaf_dev->mac_cb[port_idx];
+
+ ae_handle->phy_if = vf_cb->mac_cb->phy_if;
+ ae_handle->phy_node = vf_cb->mac_cb->phy_node;
+ ae_handle->if_support = vf_cb->mac_cb->if_support;
+ ae_handle->port_type = vf_cb->mac_cb->mac_type;
+
+ return ae_handle;
+vf_id_err:
+ kfree(vf_cb);
+handle_err:
+ return ae_handle;
+}
+
+static void hns_ae_put_handle(struct hnae_handle *handle)
+{
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+ int i;
+
+ vf_cb->mac_cb = NULL;
+
+ kfree(vf_cb);
+
+ for (i = 0; i < handle->q_num; i++)
+ hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
+}
+
+static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
+{
+ int q_num = handle->q_num;
+ int i;
+
+ for (i = 0; i < q_num; i++)
+ hns_rcb_ring_enable_hw(handle->qs[i], val);
+}
+
+static void hns_ae_init_queue(struct hnae_queue *q)
+{
+ struct ring_pair_cb *ring =
+ container_of(q, struct ring_pair_cb, q);
+
+ hns_rcb_init_hw(ring);
+}
+
+static void hns_ae_fini_queue(struct hnae_queue *q)
+{
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(q->handle);
+
+ if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+ hns_rcb_reset_ring_hw(q);
+}
+
+static int hns_ae_set_mac_address(struct hnae_handle *handle, void *p)
+{
+ int ret;
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ if (!p || !is_valid_ether_addr((const u8 *)p)) {
+ dev_err(handle->owner_dev, "is not valid ether addr !\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ ret = hns_mac_change_vf_addr(mac_cb, handle->vf_id, p);
+ if (ret != 0) {
+ dev_err(handle->owner_dev,
+ "set_mac_address fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
+{
+ int ret;
+ char *mac_addr = (char *)addr;
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ assert(mac_cb);
+
+ if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+ return 0;
+
+ ret = hns_mac_set_multi(mac_cb, mac_cb->mac_id, mac_addr, ENABLE);
+ if (ret) {
+ dev_err(handle->owner_dev,
+ "mac add mul_mac:%pM port%d fail, ret = %#x!\n",
+ mac_addr, mac_cb->mac_id, ret);
+ return ret;
+ }
+
+ ret = hns_mac_set_multi(mac_cb, DSAF_BASE_INNER_PORT_NUM,
+ mac_addr, ENABLE);
+ if (ret)
+ dev_err(handle->owner_dev,
+ "mac add mul_mac:%pM port%d fail, ret = %#x!\n",
+ mac_addr, DSAF_BASE_INNER_PORT_NUM, ret);
+
+ return ret;
+}
+
+static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ return hns_mac_set_mtu(mac_cb, new_mtu);
+}
+
+static int hns_ae_start(struct hnae_handle *handle)
+{
+ int ret;
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ ret = hns_mac_vm_config_bc_en(mac_cb, 0, ENABLE);
+ if (ret)
+ return ret;
+
+ hns_ae_ring_enable_all(handle, 1);
+ msleep(100);
+
+ hns_mac_start(mac_cb);
+
+ return 0;
+}
+
+void hns_ae_stop(struct hnae_handle *handle)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ /* just clean tx fbd, neednot rx fbd*/
+ hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_TX);
+
+ msleep(20);
+
+ hns_mac_stop(mac_cb);
+
+ usleep_range(10000, 20000);
+
+ hns_ae_ring_enable_all(handle, 0);
+
+ (void)hns_mac_vm_config_bc_en(mac_cb, 0, DISABLE);
+}
+
+static void hns_ae_reset(struct hnae_handle *handle)
+{
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) {
+ u8 ppe_common_index =
+ vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
+
+ hns_mac_reset(vf_cb->mac_cb);
+ hns_ppe_reset_common(vf_cb->dsaf_dev, ppe_common_index);
+ }
+}
+
+void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
+{
+ u32 flag;
+
+ if (is_tx_ring(ring))
+ flag = RCB_INT_FLAG_TX;
+ else
+ flag = RCB_INT_FLAG_RX;
+
+ hns_rcb_int_clr_hw(ring->q, flag);
+ hns_rcb_int_ctrl_hw(ring->q, flag, mask);
+}
+
+static void hns_ae_toggle_queue_status(struct hnae_queue *queue, u32 val)
+{
+ hns_rcb_start(queue, val);
+}
+
+static int hns_ae_get_link_status(struct hnae_handle *handle)
+{
+ u32 link_status;
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ hns_mac_get_link_status(mac_cb, &link_status);
+
+ return !!link_status;
+}
+
+static int hns_ae_get_mac_info(struct hnae_handle *handle,
+ u8 *auto_neg, u16 *speed, u8 *duplex)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
+}
+
+static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
+ int duplex)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ hns_mac_adjust_link(mac_cb, speed, duplex);
+}
+
+static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
+ u32 *uplimit)
+{
+ *uplimit = HNS_RCB_RING_MAX_PENDING_BD;
+}
+
+static void hns_ae_get_pauseparam(struct hnae_handle *handle,
+ u32 *auto_neg, u32 *rx_en, u32 *tx_en)
+{
+ assert(handle);
+
+ hns_mac_get_autoneg(hns_get_mac_cb(handle), auto_neg);
+
+ hns_mac_get_pauseparam(hns_get_mac_cb(handle), rx_en, tx_en);
+}
+
+static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
+{
+ assert(handle);
+
+ return hns_mac_set_autoneg(hns_get_mac_cb(handle), enable);
+}
+
+static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en)
+{
+ hns_dsaf_set_promisc_mode(hns_ae_get_dsaf_dev(handle->dev), en);
+}
+
+static int hns_ae_get_autoneg(struct hnae_handle *handle)
+{
+ u32 auto_neg;
+
+ assert(handle);
+
+ hns_mac_get_autoneg(hns_get_mac_cb(handle), &auto_neg);
+
+ return auto_neg;
+}
+
+static int hns_ae_set_pauseparam(struct hnae_handle *handle,
+ u32 autoneg, u32 rx_en, u32 tx_en)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ int ret;
+
+ ret = hns_mac_set_autoneg(mac_cb, autoneg);
+ if (ret)
+ return ret;
+
+ return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en);
+}
+
+static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
+ u32 *tx_usecs, u32 *rx_usecs)
+{
+ int port;
+
+ port = hns_ae_map_eport_to_dport(handle->eport_id);
+
+ *tx_usecs = hns_rcb_get_coalesce_usecs(
+ hns_ae_get_dsaf_dev(handle->dev),
+ hns_dsaf_get_comm_idx_by_port(port));
+ *rx_usecs = hns_rcb_get_coalesce_usecs(
+ hns_ae_get_dsaf_dev(handle->dev),
+ hns_dsaf_get_comm_idx_by_port(port));
+}
+
+static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle,
+ u32 *tx_frames, u32 *rx_frames)
+{
+ int port;
+
+ assert(handle);
+
+ port = hns_ae_map_eport_to_dport(handle->eport_id);
+
+ *tx_frames = hns_rcb_get_coalesced_frames(
+ hns_ae_get_dsaf_dev(handle->dev), port);
+ *rx_frames = hns_rcb_get_coalesced_frames(
+ hns_ae_get_dsaf_dev(handle->dev), port);
+}
+
+static void hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
+ u32 timeout)
+{
+ int port;
+
+ assert(handle);
+
+ port = hns_ae_map_eport_to_dport(handle->eport_id);
+
+ hns_rcb_set_coalesce_usecs(hns_ae_get_dsaf_dev(handle->dev),
+ port, timeout);
+}
+
+static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
+ u32 coalesce_frames)
+{
+ int port;
+ int ret;
+
+ assert(handle);
+
+ port = hns_ae_map_eport_to_dport(handle->eport_id);
+
+ ret = hns_rcb_set_coalesced_frames(hns_ae_get_dsaf_dev(handle->dev),
+ port, coalesce_frames);
+ return ret;
+}
+
+void hns_ae_update_stats(struct hnae_handle *handle,
+ struct net_device_stats *net_stats)
+{
+ int port;
+ int idx;
+ struct dsaf_device *dsaf_dev;
+ struct hns_mac_cb *mac_cb;
+ struct hns_ppe_cb *ppe_cb;
+ struct hnae_queue *queue;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+ u64 tx_bytes = 0, rx_bytes = 0, tx_packets = 0, rx_packets = 0;
+ u64 rx_errors = 0, tx_errors = 0, tx_dropped = 0;
+ u64 rx_missed_errors = 0;
+
+ dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+ if (!dsaf_dev)
+ return;
+ port = vf_cb->port_index;
+ ppe_cb = hns_get_ppe_cb(handle);
+ mac_cb = hns_get_mac_cb(handle);
+
+ for (idx = 0; idx < handle->q_num; idx++) {
+ queue = handle->qs[idx];
+ hns_rcb_update_stats(queue);
+
+ tx_bytes += queue->tx_ring.stats.tx_bytes;
+ tx_packets += queue->tx_ring.stats.tx_pkts;
+ rx_bytes += queue->rx_ring.stats.rx_bytes;
+ rx_packets += queue->rx_ring.stats.rx_pkts;
+
+ rx_errors += queue->rx_ring.stats.err_pkt_len
+ + queue->rx_ring.stats.l2_err
+ + queue->rx_ring.stats.l3l4_csum_err;
+ }
+
+ hns_ppe_update_stats(ppe_cb);
+ rx_missed_errors = ppe_cb->hw_stats.rx_drop_no_buf;
+ tx_errors += ppe_cb->hw_stats.tx_err_checksum
+ + ppe_cb->hw_stats.tx_err_fifo_empty;
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE) {
+ hns_dsaf_update_stats(dsaf_dev, port);
+ /* for port upline direction, i.e., rx. */
+ rx_missed_errors += dsaf_dev->hw_stats[port].bp_drop;
+ rx_missed_errors += dsaf_dev->hw_stats[port].pad_drop;
+ rx_missed_errors += dsaf_dev->hw_stats[port].crc_false;
+
+ /* for port downline direction, i.e., tx. */
+ port = port + DSAF_PPE_INODE_BASE;
+ hns_dsaf_update_stats(dsaf_dev, port);
+ tx_dropped += dsaf_dev->hw_stats[port].bp_drop;
+ tx_dropped += dsaf_dev->hw_stats[port].pad_drop;
+ tx_dropped += dsaf_dev->hw_stats[port].crc_false;
+ tx_dropped += dsaf_dev->hw_stats[port].rslt_drop;
+ tx_dropped += dsaf_dev->hw_stats[port].vlan_drop;
+ tx_dropped += dsaf_dev->hw_stats[port].stp_drop;
+ }
+
+ hns_mac_update_stats(mac_cb);
+ rx_errors += mac_cb->hw_stats.rx_fifo_overrun_err;
+
+ tx_errors += mac_cb->hw_stats.tx_bad_pkts
+ + mac_cb->hw_stats.tx_fragment_err
+ + mac_cb->hw_stats.tx_jabber_err
+ + mac_cb->hw_stats.tx_underrun_err
+ + mac_cb->hw_stats.tx_crc_err;
+
+ net_stats->tx_bytes = tx_bytes;
+ net_stats->tx_packets = tx_packets;
+ net_stats->rx_bytes = rx_bytes;
+ net_stats->rx_dropped = 0;
+ net_stats->rx_packets = rx_packets;
+ net_stats->rx_errors = rx_errors;
+ net_stats->tx_errors = tx_errors;
+ net_stats->tx_dropped = tx_dropped;
+ net_stats->rx_missed_errors = rx_missed_errors;
+ net_stats->rx_crc_errors = mac_cb->hw_stats.rx_fcs_err;
+ net_stats->rx_frame_errors = mac_cb->hw_stats.rx_align_err;
+ net_stats->rx_fifo_errors = mac_cb->hw_stats.rx_fifo_overrun_err;
+ net_stats->rx_length_errors = mac_cb->hw_stats.rx_len_err;
+ net_stats->multicast = mac_cb->hw_stats.rx_mc_pkts;
+}
+
+void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
+{
+ int idx;
+ struct hns_mac_cb *mac_cb;
+ struct hns_ppe_cb *ppe_cb;
+ u64 *p = data;
+ struct hnae_vf_cb *vf_cb;
+
+ if (!handle || !data) {
+ pr_err("hns_ae_get_stats NULL handle or data pointer!\n");
+ return;
+ }
+
+ vf_cb = hns_ae_get_vf_cb(handle);
+ mac_cb = hns_get_mac_cb(handle);
+ ppe_cb = hns_get_ppe_cb(handle);
+
+ for (idx = 0; idx < handle->q_num; idx++) {
+ hns_rcb_get_stats(handle->qs[idx], p);
+ p += hns_rcb_get_ring_sset_count((int)ETH_SS_STATS);
+ }
+
+ hns_ppe_get_stats(ppe_cb, p);
+ p += hns_ppe_get_sset_count((int)ETH_SS_STATS);
+
+ hns_mac_get_stats(mac_cb, p);
+ p += hns_mac_get_sset_count(mac_cb, (int)ETH_SS_STATS);
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index);
+}
+
+void hns_ae_get_strings(struct hnae_handle *handle,
+ u32 stringset, u8 *data)
+{
+ int port;
+ int idx;
+ struct hns_mac_cb *mac_cb;
+ struct hns_ppe_cb *ppe_cb;
+ u8 *p = data;
+ struct hnae_vf_cb *vf_cb;
+
+ assert(handle);
+
+ vf_cb = hns_ae_get_vf_cb(handle);
+ port = vf_cb->port_index;
+ mac_cb = hns_get_mac_cb(handle);
+ ppe_cb = hns_get_ppe_cb(handle);
+
+ for (idx = 0; idx < handle->q_num; idx++) {
+ hns_rcb_get_strings(stringset, p, idx);
+ p += ETH_GSTRING_LEN * hns_rcb_get_ring_sset_count(stringset);
+ }
+
+ hns_ppe_get_strings(ppe_cb, stringset, p);
+ p += ETH_GSTRING_LEN * hns_ppe_get_sset_count(stringset);
+
+ hns_mac_get_strings(mac_cb, stringset, p);
+ p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset);
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ hns_dsaf_get_strings(stringset, p, port);
+}
+
+int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
+{
+ u32 sset_count = 0;
+ struct hns_mac_cb *mac_cb;
+
+ assert(handle);
+
+ mac_cb = hns_get_mac_cb(handle);
+
+ sset_count += hns_rcb_get_ring_sset_count(stringset) * handle->q_num;
+ sset_count += hns_ppe_get_sset_count(stringset);
+ sset_count += hns_mac_get_sset_count(mac_cb, stringset);
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ sset_count += hns_dsaf_get_sset_count(stringset);
+
+ return sset_count;
+}
+
+static int hns_ae_config_loopback(struct hnae_handle *handle,
+ enum hnae_loop loop, int en)
+{
+ int ret;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ switch (loop) {
+ case MAC_INTERNALLOOP_SERDES:
+ ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en);
+ break;
+ case MAC_INTERNALLOOP_MAC:
+ ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+void hns_ae_update_led_status(struct hnae_handle *handle)
+{
+ struct hns_mac_cb *mac_cb;
+
+ assert(handle);
+ mac_cb = hns_get_mac_cb(handle);
+ if (!mac_cb->cpld_vaddr)
+ return;
+ hns_set_led_opt(mac_cb);
+}
+
+int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
+ enum hnae_led_state status)
+{
+ struct hns_mac_cb *mac_cb;
+
+ assert(handle);
+
+ mac_cb = hns_get_mac_cb(handle);
+
+ return hns_cpld_led_set_id(mac_cb, status);
+}
+
+void hns_ae_get_regs(struct hnae_handle *handle, void *data)
+{
+ u32 *p = data;
+ u32 rcb_com_idx;
+ int i;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+ struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
+
+ hns_ppe_get_regs(ppe_cb, p);
+ p += hns_ppe_get_regs_count();
+
+ rcb_com_idx = hns_dsaf_get_comm_idx_by_port(vf_cb->port_index);
+ hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[rcb_com_idx], p);
+ p += hns_rcb_get_common_regs_count();
+
+ for (i = 0; i < handle->q_num; i++) {
+ hns_rcb_get_ring_regs(handle->qs[i], p);
+ p += hns_rcb_get_ring_regs_count();
+ }
+
+ hns_mac_get_regs(vf_cb->mac_cb, p);
+ p += hns_mac_get_regs_count(vf_cb->mac_cb);
+
+ if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+ hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p);
+}
+
+int hns_ae_get_regs_len(struct hnae_handle *handle)
+{
+ u32 total_num;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ total_num = hns_ppe_get_regs_count();
+ total_num += hns_rcb_get_common_regs_count();
+ total_num += hns_rcb_get_ring_regs_count() * handle->q_num;
+ total_num += hns_mac_get_regs_count(vf_cb->mac_cb);
+
+ if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+ total_num += hns_dsaf_get_regs_count();
+
+ return total_num;
+}
+
+static struct hnae_ae_ops hns_dsaf_ops = {
+ .get_handle = hns_ae_get_handle,
+ .put_handle = hns_ae_put_handle,
+ .init_queue = hns_ae_init_queue,
+ .fini_queue = hns_ae_fini_queue,
+ .start = hns_ae_start,
+ .stop = hns_ae_stop,
+ .reset = hns_ae_reset,
+ .toggle_ring_irq = hns_ae_toggle_ring_irq,
+ .toggle_queue_status = hns_ae_toggle_queue_status,
+ .get_status = hns_ae_get_link_status,
+ .get_info = hns_ae_get_mac_info,
+ .adjust_link = hns_ae_adjust_link,
+ .set_loopback = hns_ae_config_loopback,
+ .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
+ .get_pauseparam = hns_ae_get_pauseparam,
+ .set_autoneg = hns_ae_set_autoneg,
+ .get_autoneg = hns_ae_get_autoneg,
+ .set_pauseparam = hns_ae_set_pauseparam,
+ .get_coalesce_usecs = hns_ae_get_coalesce_usecs,
+ .get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames,
+ .set_coalesce_usecs = hns_ae_set_coalesce_usecs,
+ .set_coalesce_frames = hns_ae_set_coalesce_frames,
+ .set_promisc_mode = hns_ae_set_promisc_mode,
+ .set_mac_addr = hns_ae_set_mac_address,
+ .set_mc_addr = hns_ae_set_multicast_one,
+ .set_mtu = hns_ae_set_mtu,
+ .update_stats = hns_ae_update_stats,
+ .get_stats = hns_ae_get_stats,
+ .get_strings = hns_ae_get_strings,
+ .get_sset_count = hns_ae_get_sset_count,
+ .update_led_status = hns_ae_update_led_status,
+ .set_led_id = hns_ae_cpld_set_led_id,
+ .get_regs = hns_ae_get_regs,
+ .get_regs_len = hns_ae_get_regs_len
+};
+
+int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
+{
+ struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev;
+
+ ae_dev->ops = &hns_dsaf_ops;
+ ae_dev->dev = dsaf_dev->dev;
+
+ return hnae_ae_register(ae_dev, THIS_MODULE);
+}
+
+void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev)
+{
+ hnae_ae_unregister(&dsaf_dev->ae_dev);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
new file mode 100644
index 000000000000..b8517b00e706
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -0,0 +1,704 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/of_mdio.h>
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_gmac.h"
+
+static const struct mac_stats_string g_gmac_stats_string[] = {
+ {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)},
+ {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)},
+ {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
+ {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
+ {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
+ {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)},
+ {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
+ {"gmac_rx_pkts_128to255", MAC_STATS_FIELD_OFF(rx_128to255)},
+ {"gmac_rx_pkts_256to511", MAC_STATS_FIELD_OFF(rx_256to511)},
+ {"gmac_rx_pkts_512to1023", MAC_STATS_FIELD_OFF(rx_512to1023)},
+ {"gmac_rx_pkts_1024to1518", MAC_STATS_FIELD_OFF(rx_1024to1518)},
+ {"gmac_rx_pkts_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax)},
+ {"gmac_rx_fcs_errors", MAC_STATS_FIELD_OFF(rx_fcs_err)},
+ {"gmac_rx_tagged", MAC_STATS_FIELD_OFF(rx_vlan_pkts)},
+ {"gmac_rx_data_err", MAC_STATS_FIELD_OFF(rx_data_err)},
+ {"gmac_rx_align_errors", MAC_STATS_FIELD_OFF(rx_align_err)},
+ {"gmac_rx_long_errors", MAC_STATS_FIELD_OFF(rx_oversize)},
+ {"gmac_rx_jabber_errors", MAC_STATS_FIELD_OFF(rx_jabber_err)},
+ {"gmac_rx_pause_maccontrol", MAC_STATS_FIELD_OFF(rx_pfc_tc0)},
+ {"gmac_rx_unknown_maccontrol", MAC_STATS_FIELD_OFF(rx_unknown_ctrl)},
+ {"gmac_rx_very_long_err", MAC_STATS_FIELD_OFF(rx_long_err)},
+ {"gmac_rx_runt_err", MAC_STATS_FIELD_OFF(rx_minto64)},
+ {"gmac_rx_short_err", MAC_STATS_FIELD_OFF(rx_under_min)},
+ {"gmac_rx_filt_pkt", MAC_STATS_FIELD_OFF(rx_filter_bytes)},
+ {"gmac_rx_octets_total_filt", MAC_STATS_FIELD_OFF(rx_filter_pkts)},
+ {"gmac_rx_overrun_cnt", MAC_STATS_FIELD_OFF(rx_fifo_overrun_err)},
+ {"gmac_rx_length_err", MAC_STATS_FIELD_OFF(rx_len_err)},
+ {"gmac_rx_fail_comma", MAC_STATS_FIELD_OFF(rx_comma_err)},
+
+ {"gmac_tx_octets_ok", MAC_STATS_FIELD_OFF(tx_good_bytes)},
+ {"gmac_tx_octets_bad", MAC_STATS_FIELD_OFF(tx_bad_bytes)},
+ {"gmac_tx_uc_pkts", MAC_STATS_FIELD_OFF(tx_uc_pkts)},
+ {"gmac_tx_mc_pkts", MAC_STATS_FIELD_OFF(tx_mc_pkts)},
+ {"gmac_tx_bc_pkts", MAC_STATS_FIELD_OFF(tx_bc_pkts)},
+ {"gmac_tx_pkts_64octets", MAC_STATS_FIELD_OFF(tx_64bytes)},
+ {"gmac_tx_pkts_65to127", MAC_STATS_FIELD_OFF(tx_65to127)},
+ {"gmac_tx_pkts_128to255", MAC_STATS_FIELD_OFF(tx_128to255)},
+ {"gmac_tx_pkts_256to511", MAC_STATS_FIELD_OFF(tx_256to511)},
+ {"gmac_tx_pkts_512to1023", MAC_STATS_FIELD_OFF(tx_512to1023)},
+ {"gmac_tx_pkts_1024to1518", MAC_STATS_FIELD_OFF(tx_1024to1518)},
+ {"gmac_tx_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax)},
+ {"gmac_tx_excessive_length_drop", MAC_STATS_FIELD_OFF(tx_jabber_err)},
+ {"gmac_tx_underrun", MAC_STATS_FIELD_OFF(tx_underrun_err)},
+ {"gmac_tx_tagged", MAC_STATS_FIELD_OFF(tx_vlan)},
+ {"gmac_tx_crc_error", MAC_STATS_FIELD_OFF(tx_crc_err)},
+ {"gmac_tx_pause_frames", MAC_STATS_FIELD_OFF(tx_pfc_tc0)}
+};
+
+static void hns_gmac_enable(void *mac_drv, enum mac_commom_mode mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ /*enable GE rX/tX */
+ if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1);
+
+ if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1);
+}
+
+static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ /*disable GE rX/tX */
+ if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0);
+
+ if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
+}
+
+/**
+*hns_gmac_get_en - get port enable
+*@mac_drv:mac device
+*@rx:rx enable
+*@tx:tx enable
+*/
+static void hns_gmac_get_en(void *mac_drv, u32 *rx, u32 *tx)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 porten;
+
+ porten = dsaf_read_dev(drv, GMAC_PORT_EN_REG);
+ *tx = dsaf_get_bit(porten, GMAC_PORT_TX_EN_B);
+ *rx = dsaf_get_bit(porten, GMAC_PORT_RX_EN_B);
+}
+
+static void hns_gmac_free(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+ u32 mac_id = drv->mac_id;
+
+ hns_dsaf_ge_srst_by_port(dsaf_dev, mac_id, 0);
+}
+
+static void hns_gmac_set_tx_auto_pause_frames(void *mac_drv, u16 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_field(drv, GMAC_FC_TX_TIMER_REG, GMAC_FC_TX_TIMER_M,
+ GMAC_FC_TX_TIMER_S, newval);
+}
+
+static void hns_gmac_get_tx_auto_pause_frames(void *mac_drv, u16 *newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *newval = dsaf_get_dev_field(drv, GMAC_FC_TX_TIMER_REG,
+ GMAC_FC_TX_TIMER_M, GMAC_FC_TX_TIMER_S);
+}
+
+static void hns_gmac_set_rx_auto_pause_frames(void *mac_drv, u32 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_PAUSE_EN_REG,
+ GMAC_PAUSE_EN_RX_FDFC_B, !!newval);
+}
+
+static void hns_gmac_config_max_frame_length(void *mac_drv, u16 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_field(drv, GMAC_MAX_FRM_SIZE_REG, GMAC_MAX_FRM_SIZE_M,
+ GMAC_MAX_FRM_SIZE_S, newval);
+
+ dsaf_set_dev_field(drv, GAMC_RX_MAX_FRAME, GMAC_MAX_FRM_SIZE_M,
+ GMAC_MAX_FRM_SIZE_S, newval);
+}
+
+static void hns_gmac_config_an_mode(void *mac_drv, u8 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_TRANSMIT_CONTROL_REG,
+ GMAC_TX_AN_EN_B, !!newval);
+}
+
+static void hns_gmac_tx_loop_pkt_dis(void *mac_drv)
+{
+ u32 tx_loop_pkt_pri;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ tx_loop_pkt_pri = dsaf_read_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG);
+ dsaf_set_bit(tx_loop_pkt_pri, GMAC_TX_LOOP_PKT_EN_B, 1);
+ dsaf_set_bit(tx_loop_pkt_pri, GMAC_TX_LOOP_PKT_HIG_PRI_B, 0);
+ dsaf_write_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG, tx_loop_pkt_pri);
+}
+
+static void hns_gmac_set_duplex_type(void *mac_drv, u8 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG,
+ GMAC_DUPLEX_TYPE_B, !!newval);
+}
+
+static void hns_gmac_get_duplex_type(void *mac_drv,
+ enum hns_gmac_duplex_mdoe *duplex_mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *duplex_mode = (enum hns_gmac_duplex_mdoe)dsaf_get_dev_bit(
+ drv, GMAC_DUPLEX_TYPE_REG, GMAC_DUPLEX_TYPE_B);
+}
+
+static void hns_gmac_get_port_mode(void *mac_drv, enum hns_port_mode *port_mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *port_mode = (enum hns_port_mode)dsaf_get_dev_field(
+ drv, GMAC_PORT_MODE_REG, GMAC_PORT_MODE_M, GMAC_PORT_MODE_S);
+}
+
+static void hns_gmac_port_mode_get(void *mac_drv,
+ struct hns_gmac_port_mode_cfg *port_mode)
+{
+ u32 tx_ctrl;
+ u32 recv_ctrl;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ port_mode->port_mode = (enum hns_port_mode)dsaf_get_dev_field(
+ drv, GMAC_PORT_MODE_REG, GMAC_PORT_MODE_M, GMAC_PORT_MODE_S);
+
+ tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+ recv_ctrl = dsaf_read_dev(drv, GMAC_RECV_CONTROL_REG);
+
+ port_mode->max_frm_size =
+ dsaf_get_dev_field(drv, GMAC_MAX_FRM_SIZE_REG,
+ GMAC_MAX_FRM_SIZE_M, GMAC_MAX_FRM_SIZE_S);
+ port_mode->short_runts_thr =
+ dsaf_get_dev_field(drv, GMAC_SHORT_RUNTS_THR_REG,
+ GMAC_SHORT_RUNTS_THR_M,
+ GMAC_SHORT_RUNTS_THR_S);
+
+ port_mode->pad_enable = dsaf_get_bit(tx_ctrl, GMAC_TX_PAD_EN_B);
+ port_mode->crc_add = dsaf_get_bit(tx_ctrl, GMAC_TX_CRC_ADD_B);
+ port_mode->an_enable = dsaf_get_bit(tx_ctrl, GMAC_TX_AN_EN_B);
+
+ port_mode->runt_pkt_en =
+ dsaf_get_bit(recv_ctrl, GMAC_RECV_CTRL_RUNT_PKT_EN_B);
+ port_mode->strip_pad_en =
+ dsaf_get_bit(recv_ctrl, GMAC_RECV_CTRL_STRIP_PAD_EN_B);
+}
+
+static void hns_gmac_pause_frm_cfg(void *mac_drv, u32 rx_pause_en,
+ u32 tx_pause_en)
+{
+ u32 pause_en;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ pause_en = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+ dsaf_set_bit(pause_en, GMAC_PAUSE_EN_RX_FDFC_B, !!rx_pause_en);
+ dsaf_set_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B, !!tx_pause_en);
+ dsaf_write_dev(drv, GMAC_PAUSE_EN_REG, pause_en);
+}
+
+static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
+ u32 *tx_pause_en)
+{
+ u32 pause_en;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ pause_en = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+
+ *rx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_RX_FDFC_B);
+ *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B);
+}
+
+static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
+ u32 full_duplex)
+{
+ u32 tx_ctrl;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG,
+ GMAC_DUPLEX_TYPE_B, !!full_duplex);
+
+ switch (speed) {
+ case MAC_SPEED_10:
+ dsaf_set_dev_field(
+ drv, GMAC_PORT_MODE_REG,
+ GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x6);
+ break;
+ case MAC_SPEED_100:
+ dsaf_set_dev_field(
+ drv, GMAC_PORT_MODE_REG,
+ GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x7);
+ break;
+ case MAC_SPEED_1000:
+ dsaf_set_dev_field(
+ drv, GMAC_PORT_MODE_REG,
+ GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x8);
+ break;
+ default:
+ dev_err(drv->dev,
+ "hns_gmac_adjust_link fail, speed%d mac%d\n",
+ speed, drv->mac_id);
+ return -EINVAL;
+ }
+
+ tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+ dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, 1);
+ dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, 1);
+ dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
+
+ dsaf_set_dev_bit(drv, GMAC_MODE_CHANGE_EN_REG,
+ GMAC_MODE_CHANGE_EB_B, 1);
+
+ return 0;
+}
+
+static void hns_gmac_init(void *mac_drv)
+{
+ u32 port;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+ port = drv->mac_id;
+
+ hns_dsaf_ge_srst_by_port(dsaf_dev, port, 0);
+ mdelay(10);
+ hns_dsaf_ge_srst_by_port(dsaf_dev, port, 1);
+ mdelay(10);
+ hns_gmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
+ hns_gmac_tx_loop_pkt_dis(mac_drv);
+}
+
+void hns_gmac_update_stats(void *mac_drv)
+{
+ struct mac_hw_stats *hw_stats = NULL;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ hw_stats = &drv->mac_cb->hw_stats;
+
+ /* RX */
+ hw_stats->rx_good_bytes
+ += dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_OK_REG);
+ hw_stats->rx_bad_bytes
+ += dsaf_read_dev(drv, GMAC_RX_OCTETS_BAD_REG);
+ hw_stats->rx_uc_pkts += dsaf_read_dev(drv, GMAC_RX_UC_PKTS_REG);
+ hw_stats->rx_mc_pkts += dsaf_read_dev(drv, GMAC_RX_MC_PKTS_REG);
+ hw_stats->rx_bc_pkts += dsaf_read_dev(drv, GMAC_RX_BC_PKTS_REG);
+ hw_stats->rx_64bytes
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_64OCTETS_REG);
+ hw_stats->rx_65to127
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_65TO127OCTETS_REG);
+ hw_stats->rx_128to255
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_128TO255OCTETS_REG);
+ hw_stats->rx_256to511
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_255TO511OCTETS_REG);
+ hw_stats->rx_512to1023
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_512TO1023OCTETS_REG);
+ hw_stats->rx_1024to1518
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_1024TO1518OCTETS_REG);
+ hw_stats->rx_1519tomax
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_1519TOMAXOCTETS_REG);
+ hw_stats->rx_fcs_err += dsaf_read_dev(drv, GMAC_RX_FCS_ERRORS_REG);
+ hw_stats->rx_vlan_pkts += dsaf_read_dev(drv, GMAC_RX_TAGGED_REG);
+ hw_stats->rx_data_err += dsaf_read_dev(drv, GMAC_RX_DATA_ERR_REG);
+ hw_stats->rx_align_err
+ += dsaf_read_dev(drv, GMAC_RX_ALIGN_ERRORS_REG);
+ hw_stats->rx_oversize
+ += dsaf_read_dev(drv, GMAC_RX_LONG_ERRORS_REG);
+ hw_stats->rx_jabber_err
+ += dsaf_read_dev(drv, GMAC_RX_JABBER_ERRORS_REG);
+ hw_stats->rx_pfc_tc0
+ += dsaf_read_dev(drv, GMAC_RX_PAUSE_MACCTRL_FRAM_REG);
+ hw_stats->rx_unknown_ctrl
+ += dsaf_read_dev(drv, GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG);
+ hw_stats->rx_long_err
+ += dsaf_read_dev(drv, GMAC_RX_VERY_LONG_ERR_CNT_REG);
+ hw_stats->rx_minto64
+ += dsaf_read_dev(drv, GMAC_RX_RUNT_ERR_CNT_REG);
+ hw_stats->rx_under_min
+ += dsaf_read_dev(drv, GMAC_RX_SHORT_ERR_CNT_REG);
+ hw_stats->rx_filter_pkts
+ += dsaf_read_dev(drv, GMAC_RX_FILT_PKT_CNT_REG);
+ hw_stats->rx_filter_bytes
+ += dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_FILT_REG);
+ hw_stats->rx_fifo_overrun_err
+ += dsaf_read_dev(drv, GMAC_RX_OVERRUN_CNT_REG);
+ hw_stats->rx_len_err
+ += dsaf_read_dev(drv, GMAC_RX_LENGTHFIELD_ERR_CNT_REG);
+ hw_stats->rx_comma_err
+ += dsaf_read_dev(drv, GMAC_RX_FAIL_COMMA_CNT_REG);
+
+ /* TX */
+ hw_stats->tx_good_bytes
+ += dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_OK_REG);
+ hw_stats->tx_bad_bytes
+ += dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_BAD_REG);
+ hw_stats->tx_uc_pkts += dsaf_read_dev(drv, GMAC_TX_UC_PKTS_REG);
+ hw_stats->tx_mc_pkts += dsaf_read_dev(drv, GMAC_TX_MC_PKTS_REG);
+ hw_stats->tx_bc_pkts += dsaf_read_dev(drv, GMAC_TX_BC_PKTS_REG);
+ hw_stats->tx_64bytes
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_64OCTETS_REG);
+ hw_stats->tx_65to127
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_65TO127OCTETS_REG);
+ hw_stats->tx_128to255
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_128TO255OCTETS_REG);
+ hw_stats->tx_256to511
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_255TO511OCTETS_REG);
+ hw_stats->tx_512to1023
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_512TO1023OCTETS_REG);
+ hw_stats->tx_1024to1518
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_1024TO1518OCTETS_REG);
+ hw_stats->tx_1519tomax
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_1519TOMAXOCTETS_REG);
+ hw_stats->tx_jabber_err
+ += dsaf_read_dev(drv, GMAC_TX_EXCESSIVE_LENGTH_DROP_REG);
+ hw_stats->tx_underrun_err
+ += dsaf_read_dev(drv, GMAC_TX_UNDERRUN_REG);
+ hw_stats->tx_vlan += dsaf_read_dev(drv, GMAC_TX_TAGGED_REG);
+ hw_stats->tx_crc_err += dsaf_read_dev(drv, GMAC_TX_CRC_ERROR_REG);
+ hw_stats->tx_pfc_tc0
+ += dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG);
+}
+
+static void hns_gmac_set_mac_addr(void *mac_drv, char *mac_addr)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ if (drv->mac_id >= DSAF_SERVICE_NW_NUM) {
+ u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
+
+ u32 low_val = mac_addr[5] | (mac_addr[4] << 8)
+ | (mac_addr[3] << 16) | (mac_addr[2] << 24);
+ dsaf_write_dev(drv, GMAC_STATION_ADDR_LOW_2_REG, low_val);
+ dsaf_write_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG, high_val);
+ }
+}
+
+static int hns_gmac_config_loopback(void *mac_drv, enum hnae_loop loop_mode,
+ u8 enable)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ switch (loop_mode) {
+ case MAC_INTERNALLOOP_MAC:
+ dsaf_set_dev_bit(drv, GMAC_LOOP_REG, GMAC_LP_REG_CF2MI_LP_EN_B,
+ !!enable);
+ break;
+ default:
+ dev_err(drv->dev, "loop_mode error\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hns_gmac_config_pad_and_crc(void *mac_drv, u8 newval)
+{
+ u32 tx_ctrl;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+ dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, !!newval);
+ dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, !!newval);
+ dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
+}
+
+static void hns_gmac_get_id(void *mac_drv, u8 *mac_id)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *mac_id = drv->mac_id;
+}
+
+static void hns_gmac_get_info(void *mac_drv, struct mac_info *mac_info)
+{
+ enum hns_gmac_duplex_mdoe duplex;
+ enum hns_port_mode speed;
+ u32 rx_pause;
+ u32 tx_pause;
+ u32 rx;
+ u32 tx;
+ u16 fc_tx_timer;
+ struct hns_gmac_port_mode_cfg port_mode = { GMAC_10M_MII, 0 };
+
+ hns_gmac_port_mode_get(mac_drv, &port_mode);
+ mac_info->pad_and_crc_en = port_mode.crc_add && port_mode.pad_enable;
+ mac_info->auto_neg = port_mode.an_enable;
+
+ hns_gmac_get_tx_auto_pause_frames(mac_drv, &fc_tx_timer);
+ mac_info->tx_pause_time = fc_tx_timer;
+
+ hns_gmac_get_en(mac_drv, &rx, &tx);
+ mac_info->port_en = rx && tx;
+
+ hns_gmac_get_duplex_type(mac_drv, &duplex);
+ mac_info->duplex = duplex;
+
+ hns_gmac_get_port_mode(mac_drv, &speed);
+ switch (speed) {
+ case GMAC_10M_SGMII:
+ mac_info->speed = MAC_SPEED_10;
+ break;
+ case GMAC_100M_SGMII:
+ mac_info->speed = MAC_SPEED_100;
+ break;
+ case GMAC_1000M_SGMII:
+ mac_info->speed = MAC_SPEED_1000;
+ break;
+ default:
+ mac_info->speed = 0;
+ break;
+ }
+
+ hns_gmac_get_pausefrm_cfg(mac_drv, &rx_pause, &tx_pause);
+ mac_info->rx_pause_en = rx_pause;
+ mac_info->tx_pause_en = tx_pause;
+}
+
+static void hns_gmac_autoneg_stat(void *mac_drv, u32 *enable)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *enable = dsaf_get_dev_bit(drv, GMAC_TRANSMIT_CONTROL_REG,
+ GMAC_TX_AN_EN_B);
+}
+
+static void hns_gmac_get_link_status(void *mac_drv, u32 *link_stat)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *link_stat = dsaf_get_dev_bit(drv, GMAC_AN_NEG_STATE_REG,
+ GMAC_AN_NEG_STAT_RX_SYNC_OK_B);
+}
+
+static void hns_gmac_get_regs(void *mac_drv, void *data)
+{
+ u32 *regs = data;
+ int i;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ /* base config registers */
+ regs[0] = dsaf_read_dev(drv, GMAC_DUPLEX_TYPE_REG);
+ regs[1] = dsaf_read_dev(drv, GMAC_FD_FC_TYPE_REG);
+ regs[2] = dsaf_read_dev(drv, GMAC_FC_TX_TIMER_REG);
+ regs[3] = dsaf_read_dev(drv, GMAC_FD_FC_ADDR_LOW_REG);
+ regs[4] = dsaf_read_dev(drv, GMAC_FD_FC_ADDR_HIGH_REG);
+ regs[5] = dsaf_read_dev(drv, GMAC_IPG_TX_TIMER_REG);
+ regs[6] = dsaf_read_dev(drv, GMAC_PAUSE_THR_REG);
+ regs[7] = dsaf_read_dev(drv, GMAC_MAX_FRM_SIZE_REG);
+ regs[8] = dsaf_read_dev(drv, GMAC_PORT_MODE_REG);
+ regs[9] = dsaf_read_dev(drv, GMAC_PORT_EN_REG);
+ regs[10] = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+ regs[11] = dsaf_read_dev(drv, GMAC_SHORT_RUNTS_THR_REG);
+ regs[12] = dsaf_read_dev(drv, GMAC_AN_NEG_STATE_REG);
+ regs[13] = dsaf_read_dev(drv, GMAC_TX_LOCAL_PAGE_REG);
+ regs[14] = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+ regs[15] = dsaf_read_dev(drv, GMAC_REC_FILT_CONTROL_REG);
+ regs[16] = dsaf_read_dev(drv, GMAC_PTP_CONFIG_REG);
+
+ /* rx static registers */
+ regs[17] = dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_OK_REG);
+ regs[18] = dsaf_read_dev(drv, GMAC_RX_OCTETS_BAD_REG);
+ regs[19] = dsaf_read_dev(drv, GMAC_RX_UC_PKTS_REG);
+ regs[20] = dsaf_read_dev(drv, GMAC_RX_MC_PKTS_REG);
+ regs[21] = dsaf_read_dev(drv, GMAC_RX_BC_PKTS_REG);
+ regs[22] = dsaf_read_dev(drv, GMAC_RX_PKTS_64OCTETS_REG);
+ regs[23] = dsaf_read_dev(drv, GMAC_RX_PKTS_65TO127OCTETS_REG);
+ regs[24] = dsaf_read_dev(drv, GMAC_RX_PKTS_128TO255OCTETS_REG);
+ regs[25] = dsaf_read_dev(drv, GMAC_RX_PKTS_255TO511OCTETS_REG);
+ regs[26] = dsaf_read_dev(drv, GMAC_RX_PKTS_512TO1023OCTETS_REG);
+ regs[27] = dsaf_read_dev(drv, GMAC_RX_PKTS_1024TO1518OCTETS_REG);
+ regs[28] = dsaf_read_dev(drv, GMAC_RX_PKTS_1519TOMAXOCTETS_REG);
+ regs[29] = dsaf_read_dev(drv, GMAC_RX_FCS_ERRORS_REG);
+ regs[30] = dsaf_read_dev(drv, GMAC_RX_TAGGED_REG);
+ regs[31] = dsaf_read_dev(drv, GMAC_RX_DATA_ERR_REG);
+ regs[32] = dsaf_read_dev(drv, GMAC_RX_ALIGN_ERRORS_REG);
+ regs[33] = dsaf_read_dev(drv, GMAC_RX_LONG_ERRORS_REG);
+ regs[34] = dsaf_read_dev(drv, GMAC_RX_JABBER_ERRORS_REG);
+ regs[35] = dsaf_read_dev(drv, GMAC_RX_PAUSE_MACCTRL_FRAM_REG);
+ regs[36] = dsaf_read_dev(drv, GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG);
+ regs[37] = dsaf_read_dev(drv, GMAC_RX_VERY_LONG_ERR_CNT_REG);
+ regs[38] = dsaf_read_dev(drv, GMAC_RX_RUNT_ERR_CNT_REG);
+ regs[39] = dsaf_read_dev(drv, GMAC_RX_SHORT_ERR_CNT_REG);
+ regs[40] = dsaf_read_dev(drv, GMAC_RX_FILT_PKT_CNT_REG);
+ regs[41] = dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_FILT_REG);
+
+ /* tx static registers */
+ regs[42] = dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_OK_REG);
+ regs[43] = dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_BAD_REG);
+ regs[44] = dsaf_read_dev(drv, GMAC_TX_UC_PKTS_REG);
+ regs[45] = dsaf_read_dev(drv, GMAC_TX_MC_PKTS_REG);
+ regs[46] = dsaf_read_dev(drv, GMAC_TX_BC_PKTS_REG);
+ regs[47] = dsaf_read_dev(drv, GMAC_TX_PKTS_64OCTETS_REG);
+ regs[48] = dsaf_read_dev(drv, GMAC_TX_PKTS_65TO127OCTETS_REG);
+ regs[49] = dsaf_read_dev(drv, GMAC_TX_PKTS_128TO255OCTETS_REG);
+ regs[50] = dsaf_read_dev(drv, GMAC_TX_PKTS_255TO511OCTETS_REG);
+ regs[51] = dsaf_read_dev(drv, GMAC_TX_PKTS_512TO1023OCTETS_REG);
+ regs[52] = dsaf_read_dev(drv, GMAC_TX_PKTS_1024TO1518OCTETS_REG);
+ regs[53] = dsaf_read_dev(drv, GMAC_TX_PKTS_1519TOMAXOCTETS_REG);
+ regs[54] = dsaf_read_dev(drv, GMAC_TX_EXCESSIVE_LENGTH_DROP_REG);
+ regs[55] = dsaf_read_dev(drv, GMAC_TX_UNDERRUN_REG);
+ regs[56] = dsaf_read_dev(drv, GMAC_TX_TAGGED_REG);
+ regs[57] = dsaf_read_dev(drv, GMAC_TX_CRC_ERROR_REG);
+ regs[58] = dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG);
+
+ regs[59] = dsaf_read_dev(drv, GAMC_RX_MAX_FRAME);
+ regs[60] = dsaf_read_dev(drv, GMAC_LINE_LOOP_BACK_REG);
+ regs[61] = dsaf_read_dev(drv, GMAC_CF_CRC_STRIP_REG);
+ regs[62] = dsaf_read_dev(drv, GMAC_MODE_CHANGE_EN_REG);
+ regs[63] = dsaf_read_dev(drv, GMAC_SIXTEEN_BIT_CNTR_REG);
+ regs[64] = dsaf_read_dev(drv, GMAC_LD_LINK_COUNTER_REG);
+ regs[65] = dsaf_read_dev(drv, GMAC_LOOP_REG);
+ regs[66] = dsaf_read_dev(drv, GMAC_RECV_CONTROL_REG);
+ regs[67] = dsaf_read_dev(drv, GMAC_VLAN_CODE_REG);
+ regs[68] = dsaf_read_dev(drv, GMAC_RX_OVERRUN_CNT_REG);
+ regs[69] = dsaf_read_dev(drv, GMAC_RX_LENGTHFIELD_ERR_CNT_REG);
+ regs[70] = dsaf_read_dev(drv, GMAC_RX_FAIL_COMMA_CNT_REG);
+
+ regs[71] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_0_REG);
+ regs[72] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_0_REG);
+ regs[73] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_1_REG);
+ regs[74] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_1_REG);
+ regs[75] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_2_REG);
+ regs[76] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG);
+ regs[77] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_3_REG);
+ regs[78] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_3_REG);
+ regs[79] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_4_REG);
+ regs[80] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_4_REG);
+ regs[81] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_5_REG);
+ regs[82] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_5_REG);
+ regs[83] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_MSK_0_REG);
+ regs[84] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_MSK_0_REG);
+ regs[85] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_MSK_1_REG);
+ regs[86] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_MSK_1_REG);
+ regs[87] = dsaf_read_dev(drv, GMAC_MAC_SKIP_LEN_REG);
+ regs[88] = dsaf_read_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG);
+
+ /* mark end of mac regs */
+ for (i = 89; i < 96; i++)
+ regs[i] = 0xaaaaaaaa;
+}
+
+static void hns_gmac_get_stats(void *mac_drv, u64 *data)
+{
+ u32 i;
+ u64 *buf = data;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct mac_hw_stats *hw_stats = NULL;
+
+ hw_stats = &drv->mac_cb->hw_stats;
+
+ for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
+ buf[i] = DSAF_STATS_READ(hw_stats,
+ g_gmac_stats_string[i].offset);
+ }
+}
+
+static void hns_gmac_get_strings(u32 stringset, u8 *data)
+{
+ char *buff = (char *)data;
+ u32 i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
+ snprintf(buff, ETH_GSTRING_LEN, g_gmac_stats_string[i].desc);
+ buff = buff + ETH_GSTRING_LEN;
+ }
+}
+
+static int hns_gmac_get_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return ARRAY_SIZE(g_gmac_stats_string);
+
+ return 0;
+}
+
+static int hns_gmac_get_regs_count(void)
+{
+ return ETH_GMAC_DUMP_NUM;
+}
+
+void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
+{
+ struct mac_driver *mac_drv;
+
+ mac_drv = devm_kzalloc(mac_cb->dev, sizeof(*mac_drv), GFP_KERNEL);
+ if (!mac_drv)
+ return NULL;
+
+ mac_drv->mac_init = hns_gmac_init;
+ mac_drv->mac_enable = hns_gmac_enable;
+ mac_drv->mac_disable = hns_gmac_disable;
+ mac_drv->mac_free = hns_gmac_free;
+ mac_drv->adjust_link = hns_gmac_adjust_link;
+ mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames;
+ mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length;
+ mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg;
+
+ mac_drv->mac_id = mac_param->mac_id;
+ mac_drv->mac_mode = mac_param->mac_mode;
+ mac_drv->io_base = mac_param->vaddr;
+ mac_drv->dev = mac_param->dev;
+ mac_drv->mac_cb = mac_cb;
+
+ mac_drv->set_mac_addr = hns_gmac_set_mac_addr;
+ mac_drv->set_an_mode = hns_gmac_config_an_mode;
+ mac_drv->config_loopback = hns_gmac_config_loopback;
+ mac_drv->config_pad_and_crc = hns_gmac_config_pad_and_crc;
+ mac_drv->config_half_duplex = hns_gmac_set_duplex_type;
+ mac_drv->set_rx_ignore_pause_frames = hns_gmac_set_rx_auto_pause_frames;
+ mac_drv->mac_get_id = hns_gmac_get_id;
+ mac_drv->get_info = hns_gmac_get_info;
+ mac_drv->autoneg_stat = hns_gmac_autoneg_stat;
+ mac_drv->get_pause_enable = hns_gmac_get_pausefrm_cfg;
+ mac_drv->get_link_status = hns_gmac_get_link_status;
+ mac_drv->get_regs = hns_gmac_get_regs;
+ mac_drv->get_regs_count = hns_gmac_get_regs_count;
+ mac_drv->get_ethtool_stats = hns_gmac_get_stats;
+ mac_drv->get_sset_count = hns_gmac_get_sset_count;
+ mac_drv->get_strings = hns_gmac_get_strings;
+ mac_drv->update_stats = hns_gmac_update_stats;
+
+ return (void *)mac_drv;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h
new file mode 100644
index 000000000000..44fe3010dc6d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_GMAC_H
+#define _HNS_GMAC_H
+
+#include "hns_dsaf_mac.h"
+
+enum hns_port_mode {
+ GMAC_10M_MII = 0,
+ GMAC_100M_MII,
+ GMAC_1000M_GMII,
+ GMAC_10M_RGMII,
+ GMAC_100M_RGMII,
+ GMAC_1000M_RGMII,
+ GMAC_10M_SGMII,
+ GMAC_100M_SGMII,
+ GMAC_1000M_SGMII,
+ GMAC_10000M_SGMII /* 10GE */
+};
+
+enum hns_gmac_duplex_mdoe {
+ GMAC_HALF_DUPLEX_MODE = 0,
+ GMAC_FULL_DUPLEX_MODE
+};
+
+struct hns_gmac_port_mode_cfg {
+ enum hns_port_mode port_mode;
+ u32 max_frm_size;
+ u32 short_runts_thr;
+ u32 pad_enable;
+ u32 crc_add;
+ u32 an_enable; /*auto-nego enable */
+ u32 runt_pkt_en;
+ u32 strip_pad_en;
+};
+
+#define ETH_GMAC_DUMP_NUM 96
+#endif /* __HNS_GMAC_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
new file mode 100644
index 000000000000..026b38676cba
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -0,0 +1,902 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/phy_fixed.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "hns_dsaf_misc.h"
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_rcb.h"
+
+#define MAC_EN_FLAG_V 0xada0328
+
+static const u16 mac_phy_to_speed[] = {
+ [PHY_INTERFACE_MODE_MII] = MAC_SPEED_100,
+ [PHY_INTERFACE_MODE_GMII] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_SGMII] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_TBI] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RMII] = MAC_SPEED_100,
+ [PHY_INTERFACE_MODE_RGMII] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_ID] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_RXID] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_TXID] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RTBI] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_XGMII] = MAC_SPEED_10000
+};
+
+static const enum mac_mode g_mac_mode_100[] = {
+ [PHY_INTERFACE_MODE_MII] = MAC_MODE_MII_100,
+ [PHY_INTERFACE_MODE_RMII] = MAC_MODE_RMII_100
+};
+
+static const enum mac_mode g_mac_mode_1000[] = {
+ [PHY_INTERFACE_MODE_GMII] = MAC_MODE_GMII_1000,
+ [PHY_INTERFACE_MODE_SGMII] = MAC_MODE_SGMII_1000,
+ [PHY_INTERFACE_MODE_TBI] = MAC_MODE_TBI_1000,
+ [PHY_INTERFACE_MODE_RGMII] = MAC_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_ID] = MAC_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_RXID] = MAC_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_TXID] = MAC_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RTBI] = MAC_MODE_RTBI_1000
+};
+
+static enum mac_mode hns_mac_dev_to_enet_if(const struct hns_mac_cb *mac_cb)
+{
+ switch (mac_cb->max_speed) {
+ case MAC_SPEED_100:
+ return g_mac_mode_100[mac_cb->phy_if];
+ case MAC_SPEED_1000:
+ return g_mac_mode_1000[mac_cb->phy_if];
+ case MAC_SPEED_10000:
+ return MAC_MODE_XGMII_10000;
+ default:
+ return MAC_MODE_MII_100;
+ }
+}
+
+static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
+{
+ switch (mac_cb->max_speed) {
+ case MAC_SPEED_100:
+ return g_mac_mode_100[mac_cb->phy_if];
+ case MAC_SPEED_1000:
+ return g_mac_mode_1000[mac_cb->phy_if];
+ case MAC_SPEED_10000:
+ return MAC_MODE_XGMII_10000;
+ default:
+ return MAC_MODE_MII_100;
+ }
+}
+
+int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+{
+ if (!mac_cb->cpld_vaddr)
+ return -ENODEV;
+
+ *sfp_prsnt = !dsaf_read_b((u8 *)mac_cb->cpld_vaddr
+ + MAC_SFP_PORT_OFFSET);
+
+ return 0;
+}
+
+void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
+{
+ struct mac_driver *mac_ctrl_drv;
+ int ret, sfp_prsnt;
+
+ mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_ctrl_drv->get_link_status)
+ mac_ctrl_drv->get_link_status(mac_ctrl_drv, link_status);
+ else
+ *link_status = 0;
+
+ ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
+ if (!ret)
+ *link_status = *link_status && sfp_prsnt;
+
+ mac_cb->link = *link_status;
+}
+
+int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
+ u8 *auto_neg, u16 *speed, u8 *duplex)
+{
+ struct mac_driver *mac_ctrl_drv;
+ struct mac_info info;
+
+ mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (!mac_ctrl_drv->get_info)
+ return -ENODEV;
+
+ mac_ctrl_drv->get_info(mac_ctrl_drv, &info);
+ if (auto_neg)
+ *auto_neg = info.auto_neg;
+ if (speed)
+ *speed = info.speed;
+ if (duplex)
+ *duplex = info.duplex;
+
+ return 0;
+}
+
+void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
+{
+ int ret;
+ struct mac_driver *mac_ctrl_drv;
+
+ mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
+
+ mac_cb->speed = speed;
+ mac_cb->half_duplex = !duplex;
+ mac_ctrl_drv->mac_mode = hns_mac_dev_to_enet_if(mac_cb);
+
+ if (mac_ctrl_drv->adjust_link) {
+ ret = mac_ctrl_drv->adjust_link(mac_ctrl_drv,
+ (enum mac_speed)speed, duplex);
+ if (ret) {
+ dev_err(mac_cb->dev,
+ "adjust_link failed,%s mac%d ret = %#x!\n",
+ mac_cb->dsaf_dev->ae_dev.name,
+ mac_cb->mac_id, ret);
+ return;
+ }
+ }
+}
+
+/**
+ *hns_mac_get_inner_port_num - get mac table inner port number
+ *@mac_cb: mac device
+ *@vmid: vm id
+ *@port_num:port number
+ *
+ */
+static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
+ u8 vmid, u8 *port_num)
+{
+ u8 tmp_port;
+ u32 comm_idx;
+
+ if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) {
+ if (mac_cb->mac_id != DSAF_MAX_PORT_NUM_PER_CHIP) {
+ dev_err(mac_cb->dev,
+ "input invalid,%s mac%d vmid%d !\n",
+ mac_cb->dsaf_dev->ae_dev.name,
+ mac_cb->mac_id, vmid);
+ return -EINVAL;
+ }
+ } else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) {
+ if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM_PER_CHIP) {
+ dev_err(mac_cb->dev,
+ "input invalid,%s mac%d vmid%d!\n",
+ mac_cb->dsaf_dev->ae_dev.name,
+ mac_cb->mac_id, vmid);
+ return -EINVAL;
+ }
+ } else {
+ dev_err(mac_cb->dev, "dsaf mode invalid,%s mac%d!\n",
+ mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
+ return -EINVAL;
+ }
+
+ comm_idx = hns_dsaf_get_comm_idx_by_port(mac_cb->mac_id);
+
+ if (vmid >= mac_cb->dsaf_dev->rcb_common[comm_idx]->max_vfn) {
+ dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d !\n",
+ mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid);
+ return -EINVAL;
+ }
+
+ switch (mac_cb->dsaf_dev->dsaf_mode) {
+ case DSAF_MODE_ENABLE_FIX:
+ tmp_port = 0;
+ break;
+ case DSAF_MODE_DISABLE_FIX:
+ tmp_port = 0;
+ break;
+ case DSAF_MODE_ENABLE_0VM:
+ case DSAF_MODE_ENABLE_8VM:
+ case DSAF_MODE_ENABLE_16VM:
+ case DSAF_MODE_ENABLE_32VM:
+ case DSAF_MODE_ENABLE_128VM:
+ case DSAF_MODE_DISABLE_2PORT_8VM:
+ case DSAF_MODE_DISABLE_2PORT_16VM:
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ case DSAF_MODE_DISABLE_6PORT_2VM:
+ case DSAF_MODE_DISABLE_6PORT_4VM:
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ tmp_port = vmid;
+ break;
+ default:
+ dev_err(mac_cb->dev, "dsaf mode invalid,%s mac%d!\n",
+ mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
+ return -EINVAL;
+ }
+ tmp_port += DSAF_BASE_INNER_PORT_NUM;
+
+ *port_num = tmp_port;
+
+ return 0;
+}
+
+/**
+ *hns_mac_get_inner_port_num - change vf mac address
+ *@mac_cb: mac device
+ *@vmid: vmid
+ *@addr:mac address
+ */
+int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb,
+ u32 vmid, char *addr)
+{
+ int ret;
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+ struct mac_entry_idx *old_entry;
+
+ old_entry = &mac_cb->addr_entry_idx[vmid];
+ if (dsaf_dev) {
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_vlan_id = old_entry->vlan_id;
+ mac_entry.in_port_num = mac_cb->mac_id;
+ ret = hns_mac_get_inner_port_num(mac_cb, (u8)vmid,
+ &mac_entry.port_num);
+ if (ret)
+ return ret;
+
+ if ((old_entry->valid != 0) &&
+ (memcmp(old_entry->addr,
+ addr, sizeof(mac_entry.addr)) != 0)) {
+ ret = hns_dsaf_del_mac_entry(dsaf_dev,
+ old_entry->vlan_id,
+ mac_cb->mac_id,
+ old_entry->addr);
+ if (ret)
+ return ret;
+ }
+
+ ret = hns_dsaf_set_mac_uc_entry(dsaf_dev, &mac_entry);
+ if (ret)
+ return ret;
+ }
+
+ if ((mac_ctrl_drv->set_mac_addr) && (vmid == 0))
+ mac_ctrl_drv->set_mac_addr(mac_cb->priv.mac, addr);
+
+ memcpy(old_entry->addr, addr, sizeof(old_entry->addr));
+ old_entry->valid = 1;
+ return 0;
+}
+
+int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
+ u32 port_num, char *addr, u8 en)
+{
+ int ret;
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+ if (dsaf_dev && addr) {
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_vlan_id = 0;/*vlan_id;*/
+ mac_entry.in_port_num = mac_cb->mac_id;
+ mac_entry.port_num = port_num;
+
+ if (en == DISABLE)
+ ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+ else
+ ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+ if (ret) {
+ dev_err(dsaf_dev->dev,
+ "set mac mc port failed,%s mac%d ret = %#x!\n",
+ mac_cb->dsaf_dev->ae_dev.name,
+ mac_cb->mac_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ *hns_mac_del_mac - delete mac address into dsaf table,can't delete the same
+ * address twice
+ *@net_dev: net device
+ *@vfn : vf lan
+ *@mac : mac address
+ *return status
+ */
+int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac)
+{
+ struct mac_entry_idx *old_mac;
+ struct dsaf_device *dsaf_dev;
+ u32 ret;
+
+ dsaf_dev = mac_cb->dsaf_dev;
+
+ if (vfn < DSAF_MAX_VM_NUM) {
+ old_mac = &mac_cb->addr_entry_idx[vfn];
+ } else {
+ dev_err(mac_cb->dev,
+ "vf queue is too large,%s mac%d queue = %#x!\n",
+ mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vfn);
+ return -EINVAL;
+ }
+
+ if (dsaf_dev) {
+ ret = hns_dsaf_del_mac_entry(dsaf_dev, old_mac->vlan_id,
+ mac_cb->mac_id, old_mac->addr);
+ if (ret)
+ return ret;
+
+ if (memcmp(old_mac->addr, mac, sizeof(old_mac->addr)) == 0)
+ old_mac->valid = 0;
+ }
+
+ return 0;
+}
+
+static void hns_mac_param_get(struct mac_params *param,
+ struct hns_mac_cb *mac_cb)
+{
+ param->vaddr = (void *)mac_cb->vaddr;
+ param->mac_mode = hns_get_enet_interface(mac_cb);
+ memcpy(param->addr, mac_cb->addr_entry_idx[0].addr,
+ MAC_NUM_OCTETS_PER_ADDR);
+ param->mac_id = mac_cb->mac_id;
+ param->dev = mac_cb->dev;
+}
+
+/**
+ *hns_mac_queue_config_bc_en - set broadcast rx&tx enable
+ *@mac_cb: mac device
+ *@queue: queue number
+ *@en:enable
+ *retuen 0 - success , negative --fail
+ */
+static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb,
+ u32 port_num, u16 vlan_id, u8 en)
+{
+ int ret;
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ u8 addr[MAC_NUM_OCTETS_PER_ADDR]
+ = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+ /* directy return ok in debug network mode */
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+ return 0;
+
+ if (dsaf_dev) {
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_vlan_id = vlan_id;
+ mac_entry.in_port_num = mac_cb->mac_id;
+ mac_entry.port_num = port_num;
+
+ if (en == DISABLE)
+ ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+ else
+ ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ *hns_mac_vm_config_bc_en - set broadcast rx&tx enable
+ *@mac_cb: mac device
+ *@vmid: vm id
+ *@en:enable
+ *retuen 0 - success , negative --fail
+ */
+int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, u8 en)
+{
+ int ret;
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ u8 port_num;
+ u8 addr[MAC_NUM_OCTETS_PER_ADDR]
+ = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct mac_entry_idx *uc_mac_entry;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+ return 0;
+
+ uc_mac_entry = &mac_cb->addr_entry_idx[vmid];
+
+ if (dsaf_dev) {
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_vlan_id = uc_mac_entry->vlan_id;
+ mac_entry.in_port_num = mac_cb->mac_id;
+ ret = hns_mac_get_inner_port_num(mac_cb, vmid, &port_num);
+ if (ret)
+ return ret;
+ mac_entry.port_num = port_num;
+
+ if (en == DISABLE)
+ ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+ else
+ ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+ return ret;
+ }
+
+ return 0;
+}
+
+void hns_mac_reset(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *drv;
+
+ drv = hns_mac_get_drv(mac_cb);
+
+ drv->mac_init(drv);
+
+ if (drv->config_max_frame_length)
+ drv->config_max_frame_length(drv, mac_cb->max_frm);
+
+ if (drv->set_tx_auto_pause_frames)
+ drv->set_tx_auto_pause_frames(drv, mac_cb->tx_pause_frm_time);
+
+ if (drv->set_an_mode)
+ drv->set_an_mode(drv, 1);
+
+ if (drv->mac_pausefrm_cfg) {
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+ drv->mac_pausefrm_cfg(drv, 0, 0);
+ else /* mac rx must disable, dsaf pfc close instead of it*/
+ drv->mac_pausefrm_cfg(drv, 0, 1);
+ }
+}
+
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu)
+{
+ struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+ u32 buf_size = mac_cb->dsaf_dev->buf_size;
+ u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if ((new_mtu < MAC_MIN_MTU) || (new_frm > MAC_MAX_MTU) ||
+ (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size))
+ return -EINVAL;
+
+ if (!drv->config_max_frame_length)
+ return -ECHILD;
+
+ /* adjust max frame to be at least the size of a standard frame */
+ if (new_frm < (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN))
+ new_frm = (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN);
+
+ drv->config_max_frame_length(drv, new_frm);
+
+ mac_cb->max_frm = new_frm;
+
+ return 0;
+}
+
+void hns_mac_start(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *mac_drv = hns_mac_get_drv(mac_cb);
+
+ /* for virt */
+ if (mac_drv->mac_en_flg == MAC_EN_FLAG_V) {
+ /*plus 1 when the virtual mac has been enabled */
+ mac_drv->virt_dev_num += 1;
+ return;
+ }
+
+ if (mac_drv->mac_enable) {
+ mac_drv->mac_enable(mac_cb->priv.mac, MAC_COMM_MODE_RX_AND_TX);
+ mac_drv->mac_en_flg = MAC_EN_FLAG_V;
+ }
+}
+
+void hns_mac_stop(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ /*modified for virtualization */
+ if (mac_ctrl_drv->virt_dev_num > 0) {
+ mac_ctrl_drv->virt_dev_num -= 1;
+ if (mac_ctrl_drv->virt_dev_num > 0)
+ return;
+ }
+
+ if (mac_ctrl_drv->mac_disable)
+ mac_ctrl_drv->mac_disable(mac_cb->priv.mac,
+ MAC_COMM_MODE_RX_AND_TX);
+
+ mac_ctrl_drv->mac_en_flg = 0;
+ mac_cb->link = 0;
+ cpld_led_reset(mac_cb);
+}
+
+/**
+ * hns_mac_get_autoneg - get auto autonegotiation
+ * @mac_cb: mac control block
+ * @enable: enable or not
+ * retuen 0 - success , negative --fail
+ */
+void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_ctrl_drv->autoneg_stat)
+ mac_ctrl_drv->autoneg_stat(mac_ctrl_drv, auto_neg);
+ else
+ *auto_neg = 0;
+}
+
+/**
+ * hns_mac_get_pauseparam - set rx & tx pause parameter
+ * @mac_cb: mac control block
+ * @rx_en: rx enable status
+ * @tx_en: tx enable status
+ * retuen 0 - success , negative --fail
+ */
+void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_ctrl_drv->get_pause_enable) {
+ mac_ctrl_drv->get_pause_enable(mac_ctrl_drv, rx_en, tx_en);
+ } else {
+ *rx_en = 0;
+ *tx_en = 0;
+ }
+
+ /* Due to the chip defect, the service mac's rx pause CAN'T be enabled.
+ * We set the rx pause frm always be true (1), because DSAF deals with
+ * the rx pause frm instead of service mac. After all, we still support
+ * rx pause frm.
+ */
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ *rx_en = 1;
+}
+
+/**
+ * hns_mac_set_autoneg - set auto autonegotiation
+ * @mac_cb: mac control block
+ * @enable: enable or not
+ * retuen 0 - success , negative --fail
+ */
+int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII && enable) {
+ dev_err(mac_cb->dev, "enable autoneg is not allowed!");
+ return -ENOTSUPP;
+ }
+
+ if (mac_ctrl_drv->set_an_mode)
+ mac_ctrl_drv->set_an_mode(mac_ctrl_drv, enable);
+
+ return 0;
+}
+
+/**
+ * hns_mac_set_autoneg - set rx & tx pause parameter
+ * @mac_cb: mac control block
+ * @rx_en: rx enable or not
+ * @tx_en: tx enable or not
+ * return 0 - success , negative --fail
+ */
+int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE) {
+ if (!rx_en) {
+ dev_err(mac_cb->dev, "disable rx_pause is not allowed!");
+ return -EINVAL;
+ }
+ } else if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
+ if (tx_en || rx_en) {
+ dev_err(mac_cb->dev, "enable tx_pause or enable rx_pause are not allowed!");
+ return -EINVAL;
+ }
+ } else {
+ dev_err(mac_cb->dev, "Unsupport this operation!");
+ return -EINVAL;
+ }
+
+ if (mac_ctrl_drv->mac_pausefrm_cfg)
+ mac_ctrl_drv->mac_pausefrm_cfg(mac_ctrl_drv, rx_en, tx_en);
+
+ return 0;
+}
+
+/**
+ * hns_mac_init_ex - mac init
+ * @mac_cb: mac control block
+ * retuen 0 - success , negative --fail
+ */
+static int hns_mac_init_ex(struct hns_mac_cb *mac_cb)
+{
+ int ret;
+ struct mac_params param;
+ struct mac_driver *drv;
+
+ hns_dsaf_fix_mac_mode(mac_cb);
+
+ memset(&param, 0, sizeof(struct mac_params));
+ hns_mac_param_get(&param, mac_cb);
+
+ if (MAC_SPEED_FROM_MODE(param.mac_mode) < MAC_SPEED_10000)
+ drv = (struct mac_driver *)hns_gmac_config(mac_cb, &param);
+ else
+ drv = (struct mac_driver *)hns_xgmac_config(mac_cb, &param);
+
+ if (!drv)
+ return -ENOMEM;
+
+ mac_cb->priv.mac = (void *)drv;
+ hns_mac_reset(mac_cb);
+
+ hns_mac_adjust_link(mac_cb, mac_cb->speed, !mac_cb->half_duplex);
+
+ ret = hns_mac_port_config_bc_en(mac_cb, mac_cb->mac_id, 0, ENABLE);
+ if (ret)
+ goto free_mac_drv;
+
+ return 0;
+
+free_mac_drv:
+ drv->mac_free(mac_cb->priv.mac);
+ mac_cb->priv.mac = NULL;
+
+ return ret;
+}
+
+/**
+ *mac_free_dev - get mac information from device node
+ *@mac_cb: mac device
+ *@np:device node
+ *@mac_mode_idx:mac mode index
+ */
+static void hns_mac_get_info(struct hns_mac_cb *mac_cb,
+ struct device_node *np, u32 mac_mode_idx)
+{
+ mac_cb->link = false;
+ mac_cb->half_duplex = false;
+ mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if];
+ mac_cb->max_speed = mac_cb->speed;
+
+ if (mac_cb->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ mac_cb->if_support = MAC_GMAC_SUPPORTED;
+ mac_cb->if_support |= SUPPORTED_1000baseT_Full;
+ } else if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII) {
+ mac_cb->if_support = SUPPORTED_10000baseR_FEC;
+ mac_cb->if_support |= SUPPORTED_10000baseKR_Full;
+ }
+
+ mac_cb->max_frm = MAC_DEFAULT_MTU;
+ mac_cb->tx_pause_frm_time = MAC_DEFAULT_PAUSE_TIME;
+
+ /* Get the rest of the PHY information */
+ mac_cb->phy_node = of_parse_phandle(np, "phy-handle", mac_cb->mac_id);
+ if (mac_cb->phy_node)
+ dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
+ mac_cb->mac_id, mac_cb->phy_node->name);
+}
+
+/**
+ * hns_mac_get_mode - get mac mode
+ * @phy_if: phy interface
+ * retuen 0 - gmac, 1 - xgmac , negative --fail
+ */
+static int hns_mac_get_mode(phy_interface_t phy_if)
+{
+ switch (phy_if) {
+ case PHY_INTERFACE_MODE_SGMII:
+ return MAC_GMAC_IDX;
+ case PHY_INTERFACE_MODE_XGMII:
+ return MAC_XGMAC_IDX;
+ default:
+ return -EINVAL;
+ }
+}
+
+u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
+ struct hns_mac_cb *mac_cb, u32 mac_mode_idx)
+{
+ u8 __iomem *base = dsaf_dev->io_base;
+ int mac_id = mac_cb->mac_id;
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ return base + 0x40000 + mac_id * 0x4000 -
+ mac_mode_idx * 0x20000;
+ else
+ return mac_cb->serdes_vaddr + 0x1000
+ + (mac_id - DSAF_SERVICE_PORT_NUM_PER_DSAF) * 0x100000;
+}
+
+/**
+ * hns_mac_get_cfg - get mac cfg from dtb or acpi table
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_idx: mac index
+ * retuen 0 - success , negative --fail
+ */
+int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx)
+{
+ int ret;
+ u32 mac_mode_idx;
+ struct hns_mac_cb *mac_cb = &dsaf_dev->mac_cb[mac_idx];
+
+ mac_cb->dsaf_dev = dsaf_dev;
+ mac_cb->dev = dsaf_dev->dev;
+ mac_cb->mac_id = mac_idx;
+
+ mac_cb->sys_ctl_vaddr = dsaf_dev->sc_base;
+ mac_cb->serdes_vaddr = dsaf_dev->sds_base;
+
+ if (dsaf_dev->cpld_base &&
+ mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
+ mac_cb->cpld_vaddr = dsaf_dev->cpld_base +
+ mac_cb->mac_id * CPLD_ADDR_PORT_OFFSET;
+ cpld_led_reset(mac_cb);
+ }
+ mac_cb->sfp_prsnt = 0;
+ mac_cb->txpkt_for_led = 0;
+ mac_cb->rxpkt_for_led = 0;
+
+ if (mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
+ mac_cb->mac_type = HNAE_PORT_SERVICE;
+ else
+ mac_cb->mac_type = HNAE_PORT_DEBUG;
+
+ mac_cb->phy_if = hns_mac_get_phy_if(mac_cb);
+
+ ret = hns_mac_get_mode(mac_cb->phy_if);
+ if (ret < 0) {
+ dev_err(dsaf_dev->dev,
+ "hns_mac_get_mode failed,mac%d ret = %#x!\n",
+ mac_cb->mac_id, ret);
+ return ret;
+ }
+ mac_mode_idx = (u32)ret;
+
+ hns_mac_get_info(mac_cb, mac_cb->dev->of_node, mac_mode_idx);
+
+ mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx);
+
+ return 0;
+}
+
+/**
+ * hns_mac_init - init mac
+ * @dsaf_dev: dsa fabric device struct pointer
+ * retuen 0 - success , negative --fail
+ */
+int hns_mac_init(struct dsaf_device *dsaf_dev)
+{
+ int i;
+ int ret;
+ size_t size;
+ struct hns_mac_cb *mac_cb;
+
+ size = sizeof(struct hns_mac_cb) * DSAF_MAX_PORT_NUM_PER_CHIP;
+ dsaf_dev->mac_cb = devm_kzalloc(dsaf_dev->dev, size, GFP_KERNEL);
+ if (!dsaf_dev->mac_cb)
+ return -ENOMEM;
+
+ for (i = 0; i < DSAF_MAX_PORT_NUM_PER_CHIP; i++) {
+ ret = hns_mac_get_cfg(dsaf_dev, i);
+ if (ret)
+ goto free_mac_cb;
+
+ mac_cb = &dsaf_dev->mac_cb[i];
+ ret = hns_mac_init_ex(mac_cb);
+ if (ret)
+ goto free_mac_cb;
+ }
+
+ return 0;
+
+free_mac_cb:
+ dsaf_dev->mac_cb = NULL;
+
+ return ret;
+}
+
+void hns_mac_uninit(struct dsaf_device *dsaf_dev)
+{
+ cpld_led_reset(dsaf_dev->mac_cb);
+ dsaf_dev->mac_cb = NULL;
+}
+
+int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
+ enum hnae_loop loop, int en)
+{
+ int ret;
+ struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+
+ if (drv->config_loopback)
+ ret = drv->config_loopback(drv, loop, en);
+ else
+ ret = -ENOTSUPP;
+
+ return ret;
+}
+
+void hns_mac_update_stats(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->update_stats(mac_ctrl_drv);
+}
+
+void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->get_ethtool_stats(mac_ctrl_drv, data);
+}
+
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb,
+ int stringset, u8 *data)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->get_strings(stringset, data);
+}
+
+int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ return mac_ctrl_drv->get_sset_count(stringset);
+}
+
+int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ return mac_ctrl_drv->get_regs_count();
+}
+
+void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->get_regs(mac_ctrl_drv, data);
+}
+
+void hns_set_led_opt(struct hns_mac_cb *mac_cb)
+{
+ int nic_data = 0;
+ int txpkts, rxpkts;
+
+ txpkts = mac_cb->txpkt_for_led - mac_cb->hw_stats.tx_good_pkts;
+ rxpkts = mac_cb->rxpkt_for_led - mac_cb->hw_stats.rx_good_pkts;
+ if (txpkts || rxpkts)
+ nic_data = 1;
+ else
+ nic_data = 0;
+ mac_cb->txpkt_for_led = mac_cb->hw_stats.tx_good_pkts;
+ mac_cb->rxpkt_for_led = mac_cb->hw_stats.rx_good_pkts;
+ hns_cpld_set_led(mac_cb, (int)mac_cb->link,
+ mac_cb->speed, nic_data);
+}
+
+int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status)
+{
+ if (!mac_cb || !mac_cb->cpld_vaddr)
+ return 0;
+
+ return cpld_set_led_id(mac_cb, status);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
new file mode 100644
index 000000000000..7da95a7581f9
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -0,0 +1,456 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_DSAF_MAC_H
+#define _HNS_DSAF_MAC_H
+
+#include <linux/phy.h>
+#include <linux/kernel.h>
+#include <linux/if_vlan.h>
+#include "hns_dsaf_main.h"
+
+struct dsaf_device;
+
+#define MAC_GMAC_SUPPORTED \
+ (SUPPORTED_10baseT_Half \
+ | SUPPORTED_10baseT_Full \
+ | SUPPORTED_100baseT_Half \
+ | SUPPORTED_100baseT_Full \
+ | SUPPORTED_Autoneg)
+
+#define MAC_DEFAULT_MTU (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+#define MAC_MAX_MTU 9600
+#define MAC_MIN_MTU 68
+
+#define MAC_DEFAULT_PAUSE_TIME 0xff
+
+#define MAC_GMAC_IDX 0
+#define MAC_XGMAC_IDX 1
+
+#define ETH_STATIC_REG 1
+#define ETH_DUMP_REG 5
+/* check mac addr broadcast */
+#define MAC_IS_BROADCAST(p) ((*(p) == 0xff) && (*((p) + 1) == 0xff) && \
+ (*((p) + 2) == 0xff) && (*((p) + 3) == 0xff) && \
+ (*((p) + 4) == 0xff) && (*((p) + 5) == 0xff))
+
+/* check mac addr is 01-00-5e-xx-xx-xx*/
+#define MAC_IS_L3_MULTICAST(p) ((*((p) + 0) == 0x01) && \
+ (*((p) + 1) == 0x00) && \
+ (*((p) + 2) == 0x5e))
+
+/*check the mac addr is 0 in all bit*/
+#define MAC_IS_ALL_ZEROS(p) ((*(p) == 0) && (*((p) + 1) == 0) && \
+ (*((p) + 2) == 0) && (*((p) + 3) == 0) && \
+ (*((p) + 4) == 0) && (*((p) + 5) == 0))
+
+/*check mac addr multicast*/
+#define MAC_IS_MULTICAST(p) ((*((u8 *)((p) + 0)) & 0x01) ? (1) : (0))
+
+/**< Number of octets (8-bit bytes) in an ethernet address */
+#define MAC_NUM_OCTETS_PER_ADDR 6
+
+struct mac_priv {
+ void *mac;
+};
+
+/* net speed */
+enum mac_speed {
+ MAC_SPEED_10 = 10, /**< 10 Mbps */
+ MAC_SPEED_100 = 100, /**< 100 Mbps */
+ MAC_SPEED_1000 = 1000, /**< 1000 Mbps = 1 Gbps */
+ MAC_SPEED_10000 = 10000 /**< 10000 Mbps = 10 Gbps */
+};
+
+/*mac interface keyword */
+enum mac_intf {
+ MAC_IF_NONE = 0x00000000, /**< interface not invalid */
+ MAC_IF_MII = 0x00010000, /**< MII interface */
+ MAC_IF_RMII = 0x00020000, /**< RMII interface */
+ MAC_IF_SMII = 0x00030000, /**< SMII interface */
+ MAC_IF_GMII = 0x00040000, /**< GMII interface */
+ MAC_IF_RGMII = 0x00050000, /**< RGMII interface */
+ MAC_IF_TBI = 0x00060000, /**< TBI interface */
+ MAC_IF_RTBI = 0x00070000, /**< RTBI interface */
+ MAC_IF_SGMII = 0x00080000, /**< SGMII interface */
+ MAC_IF_XGMII = 0x00090000, /**< XGMII interface */
+ MAC_IF_QSGMII = 0x000a0000 /**< QSGMII interface */
+};
+
+/*mac mode */
+enum mac_mode {
+ /**< Invalid Ethernet mode */
+ MAC_MODE_INVALID = 0,
+ /**< 10 Mbps MII */
+ MAC_MODE_MII_10 = (MAC_IF_MII | MAC_SPEED_10),
+ /**< 100 Mbps MII */
+ MAC_MODE_MII_100 = (MAC_IF_MII | MAC_SPEED_100),
+ /**< 10 Mbps RMII */
+ MAC_MODE_RMII_10 = (MAC_IF_RMII | MAC_SPEED_10),
+ /**< 100 Mbps RMII */
+ MAC_MODE_RMII_100 = (MAC_IF_RMII | MAC_SPEED_100),
+ /**< 10 Mbps SMII */
+ MAC_MODE_SMII_10 = (MAC_IF_SMII | MAC_SPEED_10),
+ /**< 100 Mbps SMII */
+ MAC_MODE_SMII_100 = (MAC_IF_SMII | MAC_SPEED_100),
+ /**< 1000 Mbps GMII */
+ MAC_MODE_GMII_1000 = (MAC_IF_GMII | MAC_SPEED_1000),
+ /**< 10 Mbps RGMII */
+ MAC_MODE_RGMII_10 = (MAC_IF_RGMII | MAC_SPEED_10),
+ /**< 100 Mbps RGMII */
+ MAC_MODE_RGMII_100 = (MAC_IF_RGMII | MAC_SPEED_100),
+ /**< 1000 Mbps RGMII */
+ MAC_MODE_RGMII_1000 = (MAC_IF_RGMII | MAC_SPEED_1000),
+ /**< 1000 Mbps TBI */
+ MAC_MODE_TBI_1000 = (MAC_IF_TBI | MAC_SPEED_1000),
+ /**< 1000 Mbps RTBI */
+ MAC_MODE_RTBI_1000 = (MAC_IF_RTBI | MAC_SPEED_1000),
+ /**< 10 Mbps SGMII */
+ MAC_MODE_SGMII_10 = (MAC_IF_SGMII | MAC_SPEED_10),
+ /**< 100 Mbps SGMII */
+ MAC_MODE_SGMII_100 = (MAC_IF_SGMII | MAC_SPEED_100),
+ /**< 1000 Mbps SGMII */
+ MAC_MODE_SGMII_1000 = (MAC_IF_SGMII | MAC_SPEED_1000),
+ /**< 10000 Mbps XGMII */
+ MAC_MODE_XGMII_10000 = (MAC_IF_XGMII | MAC_SPEED_10000),
+ /**< 1000 Mbps QSGMII */
+ MAC_MODE_QSGMII_1000 = (MAC_IF_QSGMII | MAC_SPEED_1000)
+};
+
+/*mac communicate mode*/
+enum mac_commom_mode {
+ MAC_COMM_MODE_NONE = 0, /**< No transmit/receive communication */
+ MAC_COMM_MODE_RX = 1, /**< Only receive communication */
+ MAC_COMM_MODE_TX = 2, /**< Only transmit communication */
+ MAC_COMM_MODE_RX_AND_TX = 3 /**< Both tx and rx communication */
+};
+
+/*mac statistics */
+struct mac_statistics {
+ u64 stat_pkts64; /* r-10G tr-DT 64 byte frame counter */
+ u64 stat_pkts65to127; /* r-10G 65 to 127 byte frame counter */
+ u64 stat_pkts128to255; /* r-10G 128 to 255 byte frame counter */
+ u64 stat_pkts256to511; /*r-10G 256 to 511 byte frame counter */
+ u64 stat_pkts512to1023;/* r-10G 512 to 1023 byte frame counter */
+ u64 stat_pkts1024to1518; /* r-10G 1024 to 1518 byte frame counter */
+ u64 stat_pkts1519to1522; /* r-10G 1519 to 1522 byte good frame count*/
+ /* Total number of packets that were less than 64 octets */
+ /* long with a wrong CRC.*/
+ u64 stat_fragments;
+ /* Total number of packets longer than valid maximum length octets */
+ u64 stat_jabbers;
+ /* number of dropped packets due to internal errors of */
+ /* the MAC Client. */
+ u64 stat_drop_events;
+ /* Incremented when frames of correct length but with */
+ /* CRC error are received.*/
+ u64 stat_crc_align_errors;
+ /* Total number of packets that were less than 64 octets */
+ /* long with a good CRC.*/
+ u64 stat_undersize_pkts;
+ u64 stat_oversize_pkts; /**< T,B.D*/
+
+ u64 stat_rx_pause; /**< Pause MAC Control received */
+ u64 stat_tx_pause; /**< Pause MAC Control sent */
+
+ u64 in_octets; /**< Total number of byte received. */
+ u64 in_pkts; /* Total number of packets received.*/
+ u64 in_mcast_pkts; /* Total number of multicast frame received */
+ u64 in_bcast_pkts; /* Total number of broadcast frame received */
+ /* Frames received, but discarded due to */
+ /* problems within the MAC RX. */
+ u64 in_discards;
+ u64 in_errors; /* Number of frames received with error: */
+ /* - FIFO Overflow Error */
+ /* - CRC Error */
+ /* - Frame Too Long Error */
+ /* - Alignment Error */
+ u64 out_octets; /*Total number of byte sent. */
+ u64 out_pkts; /**< Total number of packets sent .*/
+ u64 out_mcast_pkts; /* Total number of multicast frame sent */
+ u64 out_bcast_pkts; /* Total number of multicast frame sent */
+ /* Frames received, but discarded due to problems within */
+ /* the MAC TX N/A!.*/
+ u64 out_discards;
+ u64 out_errors; /*Number of frames transmitted with error: */
+ /* - FIFO Overflow Error */
+ /* - FIFO Underflow Error */
+ /* - Other */
+};
+
+/*mac para struct ,mac get param from nic or dsaf when initialize*/
+struct mac_params {
+ char addr[MAC_NUM_OCTETS_PER_ADDR];
+ void *vaddr; /*virtual address*/
+ struct device *dev;
+ u8 mac_id;
+ /**< Ethernet operation mode (MAC-PHY interface and speed) */
+ enum mac_mode mac_mode;
+};
+
+struct mac_info {
+ u16 speed;/* The forced speed (lower bits) in */
+ /* *mbps. Please use */
+ /* * ethtool_cmd_speed()/_set() to */
+ /* * access it */
+ u8 duplex; /* Duplex, half or full */
+ u8 auto_neg; /* Enable or disable autonegotiation */
+ enum hnae_loop loop_mode;
+ u8 tx_pause_en;
+ u8 tx_pause_time;
+ u8 rx_pause_en;
+ u8 pad_and_crc_en;
+ u8 promiscuous_en;
+ u8 port_en; /*port enable*/
+};
+
+struct mac_entry_idx {
+ u8 addr[MAC_NUM_OCTETS_PER_ADDR];
+ u16 vlan_id:12;
+ u16 valid:1;
+ u16 qos:3;
+};
+
+struct mac_hw_stats {
+ u64 rx_good_pkts; /* only for xgmac */
+ u64 rx_good_bytes;
+ u64 rx_total_pkts; /* only for xgmac */
+ u64 rx_total_bytes; /* only for xgmac */
+ u64 rx_bad_bytes; /* only for gmac */
+ u64 rx_uc_pkts;
+ u64 rx_mc_pkts;
+ u64 rx_bc_pkts;
+ u64 rx_fragment_err; /* only for xgmac */
+ u64 rx_undersize; /* only for xgmac */
+ u64 rx_under_min;
+ u64 rx_minto64; /* only for gmac */
+ u64 rx_64bytes;
+ u64 rx_65to127;
+ u64 rx_128to255;
+ u64 rx_256to511;
+ u64 rx_512to1023;
+ u64 rx_1024to1518;
+ u64 rx_1519tomax;
+ u64 rx_1519tomax_good; /* only for xgmac */
+ u64 rx_oversize;
+ u64 rx_jabber_err;
+ u64 rx_fcs_err;
+ u64 rx_vlan_pkts; /* only for gmac */
+ u64 rx_data_err; /* only for gmac */
+ u64 rx_align_err; /* only for gmac */
+ u64 rx_long_err; /* only for gmac */
+ u64 rx_pfc_tc0;
+ u64 rx_pfc_tc1; /* only for xgmac */
+ u64 rx_pfc_tc2; /* only for xgmac */
+ u64 rx_pfc_tc3; /* only for xgmac */
+ u64 rx_pfc_tc4; /* only for xgmac */
+ u64 rx_pfc_tc5; /* only for xgmac */
+ u64 rx_pfc_tc6; /* only for xgmac */
+ u64 rx_pfc_tc7; /* only for xgmac */
+ u64 rx_unknown_ctrl;
+ u64 rx_filter_pkts; /* only for gmac */
+ u64 rx_filter_bytes; /* only for gmac */
+ u64 rx_fifo_overrun_err;/* only for gmac */
+ u64 rx_len_err; /* only for gmac */
+ u64 rx_comma_err; /* only for gmac */
+ u64 rx_symbol_err; /* only for xgmac */
+ u64 tx_good_to_sw; /* only for xgmac */
+ u64 tx_bad_to_sw; /* only for xgmac */
+ u64 rx_1731_pkts; /* only for xgmac */
+
+ u64 tx_good_bytes;
+ u64 tx_good_pkts; /* only for xgmac */
+ u64 tx_total_bytes; /* only for xgmac */
+ u64 tx_total_pkts; /* only for xgmac */
+ u64 tx_bad_bytes; /* only for gmac */
+ u64 tx_bad_pkts; /* only for xgmac */
+ u64 tx_uc_pkts;
+ u64 tx_mc_pkts;
+ u64 tx_bc_pkts;
+ u64 tx_undersize; /* only for xgmac */
+ u64 tx_fragment_err; /* only for xgmac */
+ u64 tx_under_min_pkts; /* only for gmac */
+ u64 tx_64bytes;
+ u64 tx_65to127;
+ u64 tx_128to255;
+ u64 tx_256to511;
+ u64 tx_512to1023;
+ u64 tx_1024to1518;
+ u64 tx_1519tomax;
+ u64 tx_1519tomax_good; /* only for xgmac */
+ u64 tx_oversize; /* only for xgmac */
+ u64 tx_jabber_err;
+ u64 tx_underrun_err; /* only for gmac */
+ u64 tx_vlan; /* only for gmac */
+ u64 tx_crc_err; /* only for gmac */
+ u64 tx_pfc_tc0;
+ u64 tx_pfc_tc1; /* only for xgmac */
+ u64 tx_pfc_tc2; /* only for xgmac */
+ u64 tx_pfc_tc3; /* only for xgmac */
+ u64 tx_pfc_tc4; /* only for xgmac */
+ u64 tx_pfc_tc5; /* only for xgmac */
+ u64 tx_pfc_tc6; /* only for xgmac */
+ u64 tx_pfc_tc7; /* only for xgmac */
+ u64 tx_ctrl; /* only for xgmac */
+ u64 tx_1731_pkts; /* only for xgmac */
+ u64 tx_1588_pkts; /* only for xgmac */
+ u64 rx_good_from_sw; /* only for xgmac */
+ u64 rx_bad_from_sw; /* only for xgmac */
+};
+
+struct hns_mac_cb {
+ struct device *dev;
+ struct dsaf_device *dsaf_dev;
+ struct mac_priv priv;
+ u8 __iomem *vaddr;
+ u8 __iomem *cpld_vaddr;
+ u8 __iomem *sys_ctl_vaddr;
+ u8 __iomem *serdes_vaddr;
+ struct mac_entry_idx addr_entry_idx[DSAF_MAX_VM_NUM];
+ u8 sfp_prsnt;
+ u8 cpld_led_value;
+ u8 mac_id;
+
+ u8 link;
+ u8 half_duplex;
+ u16 speed;
+ u16 max_speed;
+ u16 max_frm;
+ u16 tx_pause_frm_time;
+ u32 if_support;
+ u64 txpkt_for_led;
+ u64 rxpkt_for_led;
+ enum hnae_port_type mac_type;
+ phy_interface_t phy_if;
+ enum hnae_loop loop_mode;
+
+ struct device_node *phy_node;
+
+ struct mac_hw_stats hw_stats;
+};
+
+struct mac_driver {
+ /*init Mac when init nic or dsaf*/
+ void (*mac_init)(void *mac_drv);
+ /*remove mac when remove nic or dsaf*/
+ void (*mac_free)(void *mac_drv);
+ /*enable mac when enable nic or dsaf*/
+ void (*mac_enable)(void *mac_drv, enum mac_commom_mode mode);
+ /*disable mac when disable nic or dsaf*/
+ void (*mac_disable)(void *mac_drv, enum mac_commom_mode mode);
+ /* config mac address*/
+ void (*set_mac_addr)(void *mac_drv, char *mac_addr);
+ /*adjust mac mode of port,include speed and duplex*/
+ int (*adjust_link)(void *mac_drv, enum mac_speed speed,
+ u32 full_duplex);
+ /* config autoegotaite mode of port*/
+ void (*set_an_mode)(void *mac_drv, u8 enable);
+ /* config loopbank mode */
+ int (*config_loopback)(void *mac_drv, enum hnae_loop loop_mode,
+ u8 enable);
+ /* config mtu*/
+ void (*config_max_frame_length)(void *mac_drv, u16 newval);
+ /*config PAD and CRC enable */
+ void (*config_pad_and_crc)(void *mac_drv, u8 newval);
+ /* config duplex mode*/
+ void (*config_half_duplex)(void *mac_drv, u8 newval);
+ /*config tx pause time,if pause_time is zero,disable tx pause enable*/
+ void (*set_tx_auto_pause_frames)(void *mac_drv, u16 pause_time);
+ /*config rx pause enable*/
+ void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable);
+ /* config rx mode for promiscuous*/
+ int (*set_promiscuous)(void *mac_drv, u8 enable);
+ /* get mac id */
+ void (*mac_get_id)(void *mac_drv, u8 *mac_id);
+ void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en);
+
+ void (*autoneg_stat)(void *mac_drv, u32 *enable);
+ int (*set_pause_enable)(void *mac_drv, u32 rx_en, u32 tx_en);
+ void (*get_pause_enable)(void *mac_drv, u32 *rx_en, u32 *tx_en);
+ void (*get_link_status)(void *mac_drv, u32 *link_stat);
+ /* get the imporant regs*/
+ void (*get_regs)(void *mac_drv, void *data);
+ int (*get_regs_count)(void);
+ /* get strings name for ethtool statistic */
+ void (*get_strings)(u32 stringset, u8 *data);
+ /* get the number of strings*/
+ int (*get_sset_count)(int stringset);
+
+ /* get the statistic by ethtools*/
+ void (*get_ethtool_stats)(void *mac_drv, u64 *data);
+
+ /* get mac information */
+ void (*get_info)(void *mac_drv, struct mac_info *mac_info);
+
+ void (*update_stats)(void *mac_drv);
+
+ enum mac_mode mac_mode;
+ u8 mac_id;
+ struct hns_mac_cb *mac_cb;
+ void __iomem *io_base;
+ unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
+ unsigned int virt_dev_num;
+ struct device *dev;
+};
+
+struct mac_stats_string {
+ char desc[64];
+ unsigned long offset;
+};
+
+#define MAC_MAKE_MODE(interface, speed) (enum mac_mode)((interface) | (speed))
+#define MAC_INTERFACE_FROM_MODE(mode) (enum mac_intf)((mode) & 0xFFFF0000)
+#define MAC_SPEED_FROM_MODE(mode) (enum mac_speed)((mode) & 0x0000FFFF)
+#define MAC_STATS_FIELD_OFF(field) (offsetof(struct mac_hw_stats, field))
+
+static inline struct mac_driver *hns_mac_get_drv(
+ const struct hns_mac_cb *mac_cb)
+{
+ return (struct mac_driver *)(mac_cb->priv.mac);
+}
+
+void *hns_gmac_config(struct hns_mac_cb *mac_cb,
+ struct mac_params *mac_param);
+void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
+ struct mac_params *mac_param);
+
+int hns_mac_init(struct dsaf_device *dsaf_dev);
+void mac_adjust_link(struct net_device *net_dev);
+void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status);
+int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
+int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
+ u32 port_num, char *addr, u8 en);
+int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, u8 en);
+void hns_mac_start(struct hns_mac_cb *mac_cb);
+void hns_mac_stop(struct hns_mac_cb *mac_cb);
+int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac);
+void hns_mac_uninit(struct dsaf_device *dsaf_dev);
+void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
+void hns_mac_reset(struct hns_mac_cb *mac_cb);
+void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg);
+void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en);
+int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable);
+int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en);
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu);
+int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
+ u8 *auto_neg, u16 *speed, u8 *duplex);
+phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb);
+int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en);
+int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
+ enum hnae_loop loop, int en);
+void hns_mac_update_stats(struct hns_mac_cb *mac_cb);
+void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data);
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 *data);
+int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset);
+void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data);
+int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb);
+void hns_set_led_opt(struct hns_mac_cb *mac_cb);
+int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status);
+#endif /* _HNS_DSAF_MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
new file mode 100644
index 000000000000..2a98eba660c0
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -0,0 +1,2474 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/device.h>
+#include <linux/vmalloc.h>
+
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_rcb.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_mac.h"
+
+const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
+ [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
+ [DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss",
+ [DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf",
+};
+
+int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
+{
+ int ret, i;
+ u32 desc_num;
+ u32 buf_size;
+ const char *name, *mode_str;
+ struct device_node *np = dsaf_dev->dev->of_node;
+
+ if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v2"))
+ dsaf_dev->dsaf_ver = AE_VERSION_2;
+ else
+ dsaf_dev->dsaf_ver = AE_VERSION_1;
+
+ ret = of_property_read_string(np, "dsa_name", &name);
+ if (ret) {
+ dev_err(dsaf_dev->dev, "get dsaf name fail, ret=%d!\n", ret);
+ return ret;
+ }
+ strncpy(dsaf_dev->ae_dev.name, name, AE_NAME_SIZE);
+ dsaf_dev->ae_dev.name[AE_NAME_SIZE - 1] = '\0';
+
+ ret = of_property_read_string(np, "mode", &mode_str);
+ if (ret) {
+ dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret);
+ return ret;
+ }
+ for (i = 0; i < DSAF_MODE_MAX; i++) {
+ if (g_dsaf_mode_match[i] &&
+ !strcmp(mode_str, g_dsaf_mode_match[i]))
+ break;
+ }
+ if (i >= DSAF_MODE_MAX ||
+ i == DSAF_MODE_INVALID || i == DSAF_MODE_ENABLE) {
+ dev_err(dsaf_dev->dev,
+ "%s prs mode str fail!\n", dsaf_dev->ae_dev.name);
+ return -EINVAL;
+ }
+ dsaf_dev->dsaf_mode = (enum dsaf_mode)i;
+
+ if (dsaf_dev->dsaf_mode > DSAF_MODE_ENABLE)
+ dsaf_dev->dsaf_en = HRD_DSAF_NO_DSAF_MODE;
+ else
+ dsaf_dev->dsaf_en = HRD_DSAF_MODE;
+
+ if ((i == DSAF_MODE_ENABLE_16VM) ||
+ (i == DSAF_MODE_DISABLE_2PORT_8VM) ||
+ (i == DSAF_MODE_DISABLE_6PORT_2VM))
+ dsaf_dev->dsaf_tc_mode = HRD_DSAF_8TC_MODE;
+ else
+ dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE;
+
+ dsaf_dev->sc_base = of_iomap(np, 0);
+ if (!dsaf_dev->sc_base) {
+ dev_err(dsaf_dev->dev,
+ "%s of_iomap 0 fail!\n", dsaf_dev->ae_dev.name);
+ ret = -ENOMEM;
+ goto unmap_base_addr;
+ }
+
+ dsaf_dev->sds_base = of_iomap(np, 1);
+ if (!dsaf_dev->sds_base) {
+ dev_err(dsaf_dev->dev,
+ "%s of_iomap 1 fail!\n", dsaf_dev->ae_dev.name);
+ ret = -ENOMEM;
+ goto unmap_base_addr;
+ }
+
+ dsaf_dev->ppe_base = of_iomap(np, 2);
+ if (!dsaf_dev->ppe_base) {
+ dev_err(dsaf_dev->dev,
+ "%s of_iomap 2 fail!\n", dsaf_dev->ae_dev.name);
+ ret = -ENOMEM;
+ goto unmap_base_addr;
+ }
+
+ dsaf_dev->io_base = of_iomap(np, 3);
+ if (!dsaf_dev->io_base) {
+ dev_err(dsaf_dev->dev,
+ "%s of_iomap 3 fail!\n", dsaf_dev->ae_dev.name);
+ ret = -ENOMEM;
+ goto unmap_base_addr;
+ }
+
+ dsaf_dev->cpld_base = of_iomap(np, 4);
+ if (!dsaf_dev->cpld_base)
+ dev_dbg(dsaf_dev->dev, "NO CPLD ADDR");
+
+ ret = of_property_read_u32(np, "desc-num", &desc_num);
+ if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT ||
+ desc_num > HNS_DSAF_MAX_DESC_CNT) {
+ dev_err(dsaf_dev->dev, "get desc-num(%d) fail, ret=%d!\n",
+ desc_num, ret);
+ goto unmap_base_addr;
+ }
+ dsaf_dev->desc_num = desc_num;
+
+ ret = of_property_read_u32(np, "buf-size", &buf_size);
+ if (ret < 0) {
+ dev_err(dsaf_dev->dev,
+ "get buf-size fail, ret=%d!\r\n", ret);
+ goto unmap_base_addr;
+ }
+ dsaf_dev->buf_size = buf_size;
+
+ dsaf_dev->buf_size_type = hns_rcb_buf_size2type(buf_size);
+ if (dsaf_dev->buf_size_type < 0) {
+ dev_err(dsaf_dev->dev,
+ "buf_size(%d) is wrong!\n", buf_size);
+ goto unmap_base_addr;
+ }
+
+ if (!dma_set_mask_and_coherent(dsaf_dev->dev, DMA_BIT_MASK(64ULL)))
+ dev_dbg(dsaf_dev->dev, "set mask to 64bit\n");
+ else
+ dev_err(dsaf_dev->dev, "set mask to 64bit fail!\n");
+
+ return 0;
+
+unmap_base_addr:
+ if (dsaf_dev->io_base)
+ iounmap(dsaf_dev->io_base);
+ if (dsaf_dev->ppe_base)
+ iounmap(dsaf_dev->ppe_base);
+ if (dsaf_dev->sds_base)
+ iounmap(dsaf_dev->sds_base);
+ if (dsaf_dev->sc_base)
+ iounmap(dsaf_dev->sc_base);
+ if (dsaf_dev->cpld_base)
+ iounmap(dsaf_dev->cpld_base);
+ return ret;
+}
+
+static void hns_dsaf_free_cfg(struct dsaf_device *dsaf_dev)
+{
+ if (dsaf_dev->io_base)
+ iounmap(dsaf_dev->io_base);
+
+ if (dsaf_dev->ppe_base)
+ iounmap(dsaf_dev->ppe_base);
+
+ if (dsaf_dev->sds_base)
+ iounmap(dsaf_dev->sds_base);
+
+ if (dsaf_dev->sc_base)
+ iounmap(dsaf_dev->sc_base);
+
+ if (dsaf_dev->cpld_base)
+ iounmap(dsaf_dev->cpld_base);
+}
+
+/**
+ * hns_dsaf_sbm_link_sram_init_en - config dsaf_sbm_init_en
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_sbm_link_sram_init_en(struct dsaf_device *dsaf_dev)
+{
+ dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_SBM_INIT_S, 1);
+}
+
+/**
+ * hns_dsaf_reg_cnt_clr_ce - config hns_dsaf_reg_cnt_clr_ce
+ * @dsaf_id: dsa fabric id
+ * @hns_dsaf_reg_cnt_clr_ce: config value
+ */
+static void
+hns_dsaf_reg_cnt_clr_ce(struct dsaf_device *dsaf_dev, u32 reg_cnt_clr_ce)
+{
+ dsaf_set_dev_bit(dsaf_dev, DSAF_DSA_REG_CNT_CLR_CE_REG,
+ DSAF_CNT_CLR_CE_S, reg_cnt_clr_ce);
+}
+
+/**
+ * hns_ppe_qid_cfg - config ppe qid
+ * @dsaf_id: dsa fabric id
+ * @pppe_qid_cfg: value array
+ */
+static void
+hns_dsaf_ppe_qid_cfg(struct dsaf_device *dsaf_dev, u32 qid_cfg)
+{
+ u32 i;
+
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAF_PPE_QID_CFG_0_REG + 0x0004 * i,
+ DSAF_PPE_QID_CFG_M, DSAF_PPE_QID_CFG_S,
+ qid_cfg);
+ }
+}
+
+static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev)
+{
+ u16 max_q_per_vf, max_vfn;
+ u32 q_id, q_num_per_port;
+ u32 i;
+
+ hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
+ HNS_DSAF_COMM_SERVICE_NW_IDX,
+ &max_vfn, &max_q_per_vf);
+ q_num_per_port = max_vfn * max_q_per_vf;
+
+ for (i = 0, q_id = 0; i < DSAF_SERVICE_NW_NUM; i++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAF_MIX_DEF_QID_0_REG + 0x0004 * i,
+ 0xff, 0, q_id);
+ q_id += q_num_per_port;
+ }
+}
+
+/**
+ * hns_dsaf_sw_port_type_cfg - cfg sw type
+ * @dsaf_id: dsa fabric id
+ * @psw_port_type: array
+ */
+static void hns_dsaf_sw_port_type_cfg(struct dsaf_device *dsaf_dev,
+ enum dsaf_sw_port_type port_type)
+{
+ u32 i;
+
+ for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAF_SW_PORT_TYPE_0_REG + 0x0004 * i,
+ DSAF_SW_PORT_TYPE_M, DSAF_SW_PORT_TYPE_S,
+ port_type);
+ }
+}
+
+/**
+ * hns_dsaf_stp_port_type_cfg - cfg stp type
+ * @dsaf_id: dsa fabric id
+ * @pstp_port_type: array
+ */
+static void hns_dsaf_stp_port_type_cfg(struct dsaf_device *dsaf_dev,
+ enum dsaf_stp_port_type port_type)
+{
+ u32 i;
+
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAF_STP_PORT_TYPE_0_REG + 0x0004 * i,
+ DSAF_STP_PORT_TYPE_M, DSAF_STP_PORT_TYPE_S,
+ port_type);
+ }
+}
+
+/**
+ * hns_dsaf_sbm_cfg - config sbm
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_sbm_cfg(struct dsaf_device *dsaf_dev)
+{
+ u32 o_sbm_cfg;
+ u32 i;
+
+ for (i = 0; i < DSAF_SBM_NUM; i++) {
+ o_sbm_cfg = dsaf_read_dev(dsaf_dev,
+ DSAF_SBM_CFG_REG_0_REG + 0x80 * i);
+ dsaf_set_bit(o_sbm_cfg, DSAF_SBM_CFG_EN_S, 1);
+ dsaf_set_bit(o_sbm_cfg, DSAF_SBM_CFG_SHCUT_EN_S, 0);
+ dsaf_write_dev(dsaf_dev,
+ DSAF_SBM_CFG_REG_0_REG + 0x80 * i, o_sbm_cfg);
+ }
+}
+
+/**
+ * hns_dsaf_sbm_cfg_mib_en - config sbm
+ * @dsaf_id: dsa fabric id
+ */
+static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev)
+{
+ u32 sbm_cfg_mib_en;
+ u32 i;
+ u32 reg;
+ u32 read_cnt;
+
+ for (i = 0; i < DSAF_SBM_NUM; i++) {
+ reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i;
+ dsaf_set_dev_bit(dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S, 1);
+ }
+
+ /* waitint for all sbm enable finished */
+ for (i = 0; i < DSAF_SBM_NUM; i++) {
+ read_cnt = 0;
+ reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i;
+ do {
+ udelay(1);
+ sbm_cfg_mib_en = dsaf_get_dev_bit(
+ dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S);
+ read_cnt++;
+ } while (sbm_cfg_mib_en == 0 &&
+ read_cnt < DSAF_CFG_READ_CNT);
+
+ if (sbm_cfg_mib_en == 0) {
+ dev_err(dsaf_dev->dev,
+ "sbm_cfg_mib_en fail,%s,sbm_num=%d\n",
+ dsaf_dev->ae_dev.name, i);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_sbm_bp_wl_cfg - config sbm
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
+{
+ u32 o_sbm_bp_cfg0;
+ u32 o_sbm_bp_cfg1;
+ u32 o_sbm_bp_cfg2;
+ u32 o_sbm_bp_cfg3;
+ u32 reg;
+ u32 i;
+
+ /* XGE */
+ for (i = 0; i < DSAF_XGE_NUM; i++) {
+ reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg0 = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG0_COM_MAX_BUF_NUM_S, 512);
+ dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0);
+ dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg0);
+
+ reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg1 = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg1, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0);
+ dsaf_set_field(o_sbm_bp_cfg1, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg1);
+
+ reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+ DSAF_SBM_CFG2_SET_BUF_NUM_S, 104);
+ dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+ DSAF_SBM_CFG2_RESET_BUF_NUM_S, 128);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2);
+
+ reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg3 = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg3,
+ DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
+ DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110);
+ dsaf_set_field(o_sbm_bp_cfg3,
+ DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
+ DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg3);
+
+ /* for no enable pfc mode */
+ reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg3 = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg3,
+ DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
+ DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 128);
+ dsaf_set_field(o_sbm_bp_cfg3,
+ DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
+ DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 192);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg3);
+ }
+
+ /* PPE */
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+ DSAF_SBM_CFG2_SET_BUF_NUM_S, 10);
+ dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+ DSAF_SBM_CFG2_RESET_BUF_NUM_S, 12);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2);
+ }
+
+ /* RoCEE */
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ reg = DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+ DSAF_SBM_CFG2_SET_BUF_NUM_S, 2);
+ dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+ DSAF_SBM_CFG2_RESET_BUF_NUM_S, 4);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2);
+ }
+}
+
+/**
+ * hns_dsaf_voq_bp_all_thrd_cfg - voq
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_voq_bp_all_thrd_cfg(struct dsaf_device *dsaf_dev)
+{
+ u32 voq_bp_all_thrd;
+ u32 i;
+
+ for (i = 0; i < DSAF_VOQ_NUM; i++) {
+ voq_bp_all_thrd = dsaf_read_dev(
+ dsaf_dev, DSAF_VOQ_BP_ALL_THRD_0_REG + 0x40 * i);
+ if (i < DSAF_XGE_NUM) {
+ dsaf_set_field(voq_bp_all_thrd,
+ DSAF_VOQ_BP_ALL_DOWNTHRD_M,
+ DSAF_VOQ_BP_ALL_DOWNTHRD_S, 930);
+ dsaf_set_field(voq_bp_all_thrd,
+ DSAF_VOQ_BP_ALL_UPTHRD_M,
+ DSAF_VOQ_BP_ALL_UPTHRD_S, 950);
+ } else {
+ dsaf_set_field(voq_bp_all_thrd,
+ DSAF_VOQ_BP_ALL_DOWNTHRD_M,
+ DSAF_VOQ_BP_ALL_DOWNTHRD_S, 220);
+ dsaf_set_field(voq_bp_all_thrd,
+ DSAF_VOQ_BP_ALL_UPTHRD_M,
+ DSAF_VOQ_BP_ALL_UPTHRD_S, 230);
+ }
+ dsaf_write_dev(
+ dsaf_dev, DSAF_VOQ_BP_ALL_THRD_0_REG + 0x40 * i,
+ voq_bp_all_thrd);
+ }
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_cfg - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_tcam_data: addr
+ */
+static void hns_dsaf_tbl_tcam_data_cfg(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_LOW_0_REG,
+ ptbl_tcam_data->tbl_tcam_data_low);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_HIGH_0_REG,
+ ptbl_tcam_data->tbl_tcam_data_high);
+}
+
+/**
+ * dsaf_tbl_tcam_mcast_cfg - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_tcam_mcast: addr
+ */
+static void hns_dsaf_tbl_tcam_mcast_cfg(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_tcam_mcast_cfg *mcast)
+{
+ u32 mcast_cfg4;
+
+ mcast_cfg4 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+ dsaf_set_bit(mcast_cfg4, DSAF_TBL_MCAST_CFG4_ITEM_VLD_S,
+ mcast->tbl_mcast_item_vld);
+ dsaf_set_bit(mcast_cfg4, DSAF_TBL_MCAST_CFG4_OLD_EN_S,
+ mcast->tbl_mcast_old_en);
+ dsaf_set_field(mcast_cfg4, DSAF_TBL_MCAST_CFG4_VM128_112_M,
+ DSAF_TBL_MCAST_CFG4_VM128_112_S,
+ mcast->tbl_mcast_port_msk[4]);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG, mcast_cfg4);
+
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG,
+ mcast->tbl_mcast_port_msk[3]);
+
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG,
+ mcast->tbl_mcast_port_msk[2]);
+
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG,
+ mcast->tbl_mcast_port_msk[1]);
+
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG,
+ mcast->tbl_mcast_port_msk[0]);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_ucast_cfg - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_tcam_ucast: addr
+ */
+static void hns_dsaf_tbl_tcam_ucast_cfg(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_tcam_ucast_cfg *tbl_tcam_ucast)
+{
+ u32 ucast_cfg1;
+
+ ucast_cfg1 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+ dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S,
+ tbl_tcam_ucast->tbl_ucast_mac_discard);
+ dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_ITEM_VLD_S,
+ tbl_tcam_ucast->tbl_ucast_item_vld);
+ dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_OLD_EN_S,
+ tbl_tcam_ucast->tbl_ucast_old_en);
+ dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_DVC_S,
+ tbl_tcam_ucast->tbl_ucast_dvc);
+ dsaf_set_field(ucast_cfg1, DSAF_TBL_UCAST_CFG1_OUT_PORT_M,
+ DSAF_TBL_UCAST_CFG1_OUT_PORT_S,
+ tbl_tcam_ucast->tbl_ucast_out_port);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_UCAST_CFG_0_REG, ucast_cfg1);
+}
+
+/**
+ * hns_dsaf_tbl_line_cfg - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_lin: addr
+ */
+static void hns_dsaf_tbl_line_cfg(struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_line_cfg *tbl_lin)
+{
+ u32 tbl_line;
+
+ tbl_line = dsaf_read_dev(dsaf_dev, DSAF_TBL_LIN_CFG_0_REG);
+ dsaf_set_bit(tbl_line, DSAF_TBL_LINE_CFG_MAC_DISCARD_S,
+ tbl_lin->tbl_line_mac_discard);
+ dsaf_set_bit(tbl_line, DSAF_TBL_LINE_CFG_DVC_S,
+ tbl_lin->tbl_line_dvc);
+ dsaf_set_field(tbl_line, DSAF_TBL_LINE_CFG_OUT_PORT_M,
+ DSAF_TBL_LINE_CFG_OUT_PORT_S,
+ tbl_lin->tbl_line_out_port);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_LIN_CFG_0_REG, tbl_line);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_mcast_pul - tbl
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_mcast_pul(struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_pul;
+
+ o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_line_pul - tbl
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_line_pul(struct dsaf_device *dsaf_dev)
+{
+ u32 tbl_pul;
+
+ tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(tbl_pul, DSAF_TBL_PUL_LINE_VLD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, tbl_pul);
+ dsaf_set_bit(tbl_pul, DSAF_TBL_PUL_LINE_VLD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_mcast_pul - tbl
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_data_mcast_pul(
+ struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_pul;
+
+ o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 1);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 0);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_ucast_pul - tbl
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_data_ucast_pul(
+ struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_pul;
+
+ o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 1);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_UCAST_VLD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 0);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_UCAST_VLD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
+{
+ dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en);
+}
+
+/**
+ * hns_dsaf_tbl_stat_en - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_stat_en: addr
+ */
+static void hns_dsaf_tbl_stat_en(struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_ctrl;
+
+ o_tbl_ctrl = dsaf_read_dev(dsaf_dev, DSAF_TBL_DFX_CTRL_0_REG);
+ dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_LINE_LKUP_NUM_EN_S, 1);
+ dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_UC_LKUP_NUM_EN_S, 1);
+ dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_MC_LKUP_NUM_EN_S, 1);
+ dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_BC_LKUP_NUM_EN_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_DFX_CTRL_0_REG, o_tbl_ctrl);
+}
+
+/**
+ * hns_dsaf_rocee_bp_en - rocee back press enable
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_rocee_bp_en(struct dsaf_device *dsaf_dev)
+{
+ dsaf_set_dev_bit(dsaf_dev, DSAF_XGE_CTRL_SIG_CFG_0_REG,
+ DSAF_FC_XGE_TX_PAUSE_S, 1);
+}
+
+/* set msk for dsaf exception irq*/
+static void hns_dsaf_int_xge_msk_set(struct dsaf_device *dsaf_dev,
+ u32 chnn_num, u32 mask_set)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_XGE_INT_MSK_0_REG + 0x4 * chnn_num, mask_set);
+}
+
+static void hns_dsaf_int_ppe_msk_set(struct dsaf_device *dsaf_dev,
+ u32 chnn_num, u32 msk_set)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_PPE_INT_MSK_0_REG + 0x4 * chnn_num, msk_set);
+}
+
+static void hns_dsaf_int_rocee_msk_set(struct dsaf_device *dsaf_dev,
+ u32 chnn, u32 msk_set)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_ROCEE_INT_MSK_0_REG + 0x4 * chnn, msk_set);
+}
+
+static void
+hns_dsaf_int_tbl_msk_set(struct dsaf_device *dsaf_dev, u32 msk_set)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_INT_MSK_0_REG, msk_set);
+}
+
+/* clr dsaf exception irq*/
+static void hns_dsaf_int_xge_src_clr(struct dsaf_device *dsaf_dev,
+ u32 chnn_num, u32 int_src)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_XGE_INT_SRC_0_REG + 0x4 * chnn_num, int_src);
+}
+
+static void hns_dsaf_int_ppe_src_clr(struct dsaf_device *dsaf_dev,
+ u32 chnn, u32 int_src)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_PPE_INT_SRC_0_REG + 0x4 * chnn, int_src);
+}
+
+static void hns_dsaf_int_rocee_src_clr(struct dsaf_device *dsaf_dev,
+ u32 chnn, u32 int_src)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_ROCEE_INT_SRC_0_REG + 0x4 * chnn, int_src);
+}
+
+static void hns_dsaf_int_tbl_src_clr(struct dsaf_device *dsaf_dev,
+ u32 int_src)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_INT_SRC_0_REG, int_src);
+}
+
+/**
+ * hns_dsaf_single_line_tbl_cfg - INT
+ * @dsaf_id: dsa fabric id
+ * @address:
+ * @ptbl_line:
+ */
+static void hns_dsaf_single_line_tbl_cfg(
+ struct dsaf_device *dsaf_dev,
+ u32 address, struct dsaf_tbl_line_cfg *ptbl_line)
+{
+ /*Write Addr*/
+ hns_dsaf_tbl_line_addr_cfg(dsaf_dev, address);
+
+ /*Write Line*/
+ hns_dsaf_tbl_line_cfg(dsaf_dev, ptbl_line);
+
+ /*Write Plus*/
+ hns_dsaf_tbl_line_pul(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_tcam_uc_cfg - INT
+ * @dsaf_id: dsa fabric id
+ * @address,
+ * @ptbl_tcam_data,
+ */
+static void hns_dsaf_tcam_uc_cfg(
+ struct dsaf_device *dsaf_dev, u32 address,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+ struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast)
+{
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+ /*Write Tcam Data*/
+ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, ptbl_tcam_data);
+ /*Write Tcam Ucast*/
+ hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, ptbl_tcam_ucast);
+ /*Write Plus*/
+ hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_tcam_mc_cfg - INT
+ * @dsaf_id: dsa fabric id
+ * @address,
+ * @ptbl_tcam_data,
+ * @ptbl_tcam_mcast,
+ */
+static void hns_dsaf_tcam_mc_cfg(
+ struct dsaf_device *dsaf_dev, u32 address,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+ struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast)
+{
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+ /*Write Tcam Data*/
+ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, ptbl_tcam_data);
+ /*Write Tcam Mcast*/
+ hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, ptbl_tcam_mcast);
+ /*Write Plus*/
+ hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_tcam_mc_invld - INT
+ * @dsaf_id: dsa fabric id
+ * @address
+ */
+static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
+{
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+ /*write tcam mcast*/
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG, 0);
+
+ /*Write Plus*/
+ hns_dsaf_tbl_tcam_mcast_pul(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_tcam_uc_get - INT
+ * @dsaf_id: dsa fabric id
+ * @address
+ * @ptbl_tcam_data
+ * @ptbl_tcam_ucast
+ */
+static void hns_dsaf_tcam_uc_get(
+ struct dsaf_device *dsaf_dev, u32 address,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+ struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast)
+{
+ u32 tcam_read_data0;
+ u32 tcam_read_data4;
+
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+ /*read tcam item puls*/
+ hns_dsaf_tbl_tcam_load_pul(dsaf_dev);
+
+ /*read tcam data*/
+ ptbl_tcam_data->tbl_tcam_data_high
+ = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+ ptbl_tcam_data->tbl_tcam_data_low
+ = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+
+ /*read tcam mcast*/
+ tcam_read_data0 = dsaf_read_dev(dsaf_dev,
+ DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+ tcam_read_data4 = dsaf_read_dev(dsaf_dev,
+ DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+
+ ptbl_tcam_ucast->tbl_ucast_item_vld
+ = dsaf_get_bit(tcam_read_data4,
+ DSAF_TBL_MCAST_CFG4_ITEM_VLD_S);
+ ptbl_tcam_ucast->tbl_ucast_old_en
+ = dsaf_get_bit(tcam_read_data4, DSAF_TBL_MCAST_CFG4_OLD_EN_S);
+ ptbl_tcam_ucast->tbl_ucast_mac_discard
+ = dsaf_get_bit(tcam_read_data0,
+ DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S);
+ ptbl_tcam_ucast->tbl_ucast_out_port
+ = dsaf_get_field(tcam_read_data0,
+ DSAF_TBL_UCAST_CFG1_OUT_PORT_M,
+ DSAF_TBL_UCAST_CFG1_OUT_PORT_S);
+ ptbl_tcam_ucast->tbl_ucast_dvc
+ = dsaf_get_bit(tcam_read_data0, DSAF_TBL_UCAST_CFG1_DVC_S);
+}
+
+/**
+ * hns_dsaf_tcam_mc_get - INT
+ * @dsaf_id: dsa fabric id
+ * @address
+ * @ptbl_tcam_data
+ * @ptbl_tcam_ucast
+ */
+static void hns_dsaf_tcam_mc_get(
+ struct dsaf_device *dsaf_dev, u32 address,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+ struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast)
+{
+ u32 data_tmp;
+
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+ /*read tcam item puls*/
+ hns_dsaf_tbl_tcam_load_pul(dsaf_dev);
+
+ /*read tcam data*/
+ ptbl_tcam_data->tbl_tcam_data_high =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+ ptbl_tcam_data->tbl_tcam_data_low =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+
+ /*read tcam mcast*/
+ ptbl_tcam_mcast->tbl_mcast_port_msk[0] =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+ ptbl_tcam_mcast->tbl_mcast_port_msk[1] =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+ ptbl_tcam_mcast->tbl_mcast_port_msk[2] =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+ ptbl_tcam_mcast->tbl_mcast_port_msk[3] =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+
+ data_tmp = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+ ptbl_tcam_mcast->tbl_mcast_item_vld =
+ dsaf_get_bit(data_tmp, DSAF_TBL_MCAST_CFG4_ITEM_VLD_S);
+ ptbl_tcam_mcast->tbl_mcast_old_en =
+ dsaf_get_bit(data_tmp, DSAF_TBL_MCAST_CFG4_OLD_EN_S);
+ ptbl_tcam_mcast->tbl_mcast_port_msk[4] =
+ dsaf_get_field(data_tmp, DSAF_TBL_MCAST_CFG4_VM128_112_M,
+ DSAF_TBL_MCAST_CFG4_VM128_112_S);
+}
+
+/**
+ * hns_dsaf_tbl_line_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_line_init(struct dsaf_device *dsaf_dev)
+{
+ u32 i;
+ /* defaultly set all lineal mac table entry resulting discard */
+ struct dsaf_tbl_line_cfg tbl_line[] = {{1, 0, 0} };
+
+ for (i = 0; i < DSAF_LINE_SUM; i++)
+ hns_dsaf_single_line_tbl_cfg(dsaf_dev, i, tbl_line);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev)
+{
+ u32 i;
+ struct dsaf_tbl_tcam_data tcam_data[] = {{0, 0} };
+ struct dsaf_tbl_tcam_ucast_cfg tcam_ucast[] = {{0, 0, 0, 0, 0} };
+
+ /*tcam tbl*/
+ for (i = 0; i < DSAF_TCAM_SUM; i++)
+ hns_dsaf_tcam_uc_cfg(dsaf_dev, i, tcam_data, tcam_ucast);
+}
+
+/**
+ * hns_dsaf_pfc_en_cfg - dsaf pfc pause cfg
+ * @mac_cb: mac contrl block
+ */
+static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev,
+ int mac_id, int en)
+{
+ if (!en)
+ dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0);
+ else
+ dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0xff);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_init - INT
+ * @dsaf_id: dsa fabric id
+ * @dsaf_mode
+ */
+static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
+{
+ u32 i;
+ u32 o_dsaf_cfg;
+
+ o_dsaf_cfg = dsaf_read_dev(dsaf_dev, DSAF_CFG_0_REG);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_EN_S, dsaf_dev->dsaf_en);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_TC_MODE_S, dsaf_dev->dsaf_tc_mode);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_CRC_EN_S, 0);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_MIX_MODE_S, 0);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_LOCA_ADDR_EN_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_CFG_0_REG, o_dsaf_cfg);
+
+ hns_dsaf_reg_cnt_clr_ce(dsaf_dev, 1);
+ hns_dsaf_stp_port_type_cfg(dsaf_dev, DSAF_STP_PORT_TYPE_FORWARD);
+
+ /* set 22 queue per tx ppe engine, only used in switch mode */
+ hns_dsaf_ppe_qid_cfg(dsaf_dev, DSAF_DEFAUTL_QUEUE_NUM_PER_PPE);
+
+ /* set promisc def queue id */
+ hns_dsaf_mix_def_qid_cfg(dsaf_dev);
+
+ /* in non switch mode, set all port to access mode */
+ hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
+
+ /*set dsaf pfc to 0 for parseing rx pause*/
+ for (i = 0; i < DSAF_COMM_CHN; i++)
+ hns_dsaf_pfc_en_cfg(dsaf_dev, i, 0);
+
+ /*msk and clr exception irqs */
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ hns_dsaf_int_xge_src_clr(dsaf_dev, i, 0xfffffffful);
+ hns_dsaf_int_ppe_src_clr(dsaf_dev, i, 0xfffffffful);
+ hns_dsaf_int_rocee_src_clr(dsaf_dev, i, 0xfffffffful);
+
+ hns_dsaf_int_xge_msk_set(dsaf_dev, i, 0xfffffffful);
+ hns_dsaf_int_ppe_msk_set(dsaf_dev, i, 0xfffffffful);
+ hns_dsaf_int_rocee_msk_set(dsaf_dev, i, 0xfffffffful);
+ }
+ hns_dsaf_int_tbl_src_clr(dsaf_dev, 0xfffffffful);
+ hns_dsaf_int_tbl_msk_set(dsaf_dev, 0xfffffffful);
+}
+
+/**
+ * hns_dsaf_inode_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev)
+{
+ u32 reg;
+ u32 tc_cfg;
+ u32 i;
+
+ if (dsaf_dev->dsaf_tc_mode == HRD_DSAF_4TC_MODE)
+ tc_cfg = HNS_DSAF_I4TC_CFG;
+ else
+ tc_cfg = HNS_DSAF_I8TC_CFG;
+
+ for (i = 0; i < DSAF_INODE_NUM; i++) {
+ reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i;
+ dsaf_set_dev_field(dsaf_dev, reg, DSAF_INODE_IN_PORT_NUM_M,
+ DSAF_INODE_IN_PORT_NUM_S, i % DSAF_XGE_NUM);
+
+ reg = DSAF_INODE_PRI_TC_CFG_0_REG + 0x80 * i;
+ dsaf_write_dev(dsaf_dev, reg, tc_cfg);
+ }
+}
+
+/**
+ * hns_dsaf_sbm_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev)
+{
+ u32 flag;
+ u32 cnt = 0;
+ int ret;
+
+ hns_dsaf_sbm_bp_wl_cfg(dsaf_dev);
+
+ /* enable sbm chanel, disable sbm chanel shcut function*/
+ hns_dsaf_sbm_cfg(dsaf_dev);
+
+ /* enable sbm mib */
+ ret = hns_dsaf_sbm_cfg_mib_en(dsaf_dev);
+ if (ret) {
+ dev_err(dsaf_dev->dev,
+ "hns_dsaf_sbm_cfg_mib_en fail,%s, ret=%d\n",
+ dsaf_dev->ae_dev.name, ret);
+ return ret;
+ }
+
+ /* enable sbm initial link sram */
+ hns_dsaf_sbm_link_sram_init_en(dsaf_dev);
+
+ do {
+ usleep_range(200, 210);/*udelay(200);*/
+ flag = dsaf_read_dev(dsaf_dev, DSAF_SRAM_INIT_OVER_0_REG);
+ cnt++;
+ } while (flag != DSAF_SRAM_INIT_FINISH_FLAG && cnt < DSAF_CFG_READ_CNT);
+
+ if (flag != DSAF_SRAM_INIT_FINISH_FLAG) {
+ dev_err(dsaf_dev->dev,
+ "hns_dsaf_sbm_init fail %s, flag=%d, cnt=%d\n",
+ dsaf_dev->ae_dev.name, flag, cnt);
+ return -ENODEV;
+ }
+
+ hns_dsaf_rocee_bp_en(dsaf_dev);
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_tbl_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_init(struct dsaf_device *dsaf_dev)
+{
+ hns_dsaf_tbl_stat_en(dsaf_dev);
+
+ hns_dsaf_tbl_tcam_init(dsaf_dev);
+ hns_dsaf_tbl_line_init(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_voq_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_voq_init(struct dsaf_device *dsaf_dev)
+{
+ hns_dsaf_voq_bp_all_thrd_cfg(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_init_hw - init dsa fabric hardware
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev)
+{
+ int ret;
+
+ dev_dbg(dsaf_dev->dev,
+ "hns_dsaf_init_hw begin %s !\n", dsaf_dev->ae_dev.name);
+
+ hns_dsaf_rst(dsaf_dev, 0);
+ mdelay(10);
+ hns_dsaf_rst(dsaf_dev, 1);
+
+ hns_dsaf_comm_init(dsaf_dev);
+
+ /*init XBAR_INODE*/
+ hns_dsaf_inode_init(dsaf_dev);
+
+ /*init SBM*/
+ ret = hns_dsaf_sbm_init(dsaf_dev);
+ if (ret)
+ return ret;
+
+ /*init TBL*/
+ hns_dsaf_tbl_init(dsaf_dev);
+
+ /*init VOQ*/
+ hns_dsaf_voq_init(dsaf_dev);
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_remove_hw - uninit dsa fabric hardware
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static void hns_dsaf_remove_hw(struct dsaf_device *dsaf_dev)
+{
+ /*reset*/
+ hns_dsaf_rst(dsaf_dev, 0);
+}
+
+/**
+ * hns_dsaf_init - init dsa fabric
+ * @dsaf_dev: dsa fabric device struct pointer
+ * retuen 0 - success , negative --fail
+ */
+static int hns_dsaf_init(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ u32 i;
+ int ret;
+
+ ret = hns_dsaf_init_hw(dsaf_dev);
+ if (ret)
+ return ret;
+
+ /* malloc mem for tcam mac key(vlan+mac) */
+ priv->soft_mac_tbl = vzalloc(sizeof(*priv->soft_mac_tbl)
+ * DSAF_TCAM_SUM);
+ if (!priv->soft_mac_tbl) {
+ ret = -ENOMEM;
+ goto remove_hw;
+ }
+
+ /*all entry invall */
+ for (i = 0; i < DSAF_TCAM_SUM; i++)
+ (priv->soft_mac_tbl + i)->index = DSAF_INVALID_ENTRY_IDX;
+
+ return 0;
+
+remove_hw:
+ hns_dsaf_remove_hw(dsaf_dev);
+ return ret;
+}
+
+/**
+ * hns_dsaf_free - free dsa fabric
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static void hns_dsaf_free(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+
+ hns_dsaf_remove_hw(dsaf_dev);
+
+ /* free all mac mem */
+ vfree(priv->soft_mac_tbl);
+ priv->soft_mac_tbl = NULL;
+}
+
+/**
+ * hns_dsaf_find_soft_mac_entry - find dsa fabric soft entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_key: mac entry struct pointer
+ */
+static u16 hns_dsaf_find_soft_mac_entry(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_tbl_tcam_key *mac_key)
+{
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ u32 i;
+
+ soft_mac_entry = priv->soft_mac_tbl;
+ for (i = 0; i < DSAF_TCAM_SUM; i++) {
+ /* invall tab entry */
+ if ((soft_mac_entry->index != DSAF_INVALID_ENTRY_IDX) &&
+ (soft_mac_entry->tcam_key.high.val == mac_key->high.val) &&
+ (soft_mac_entry->tcam_key.low.val == mac_key->low.val))
+ /* return find result --soft index */
+ return soft_mac_entry->index;
+
+ soft_mac_entry++;
+ }
+ return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
+ * hns_dsaf_find_empty_mac_entry - search dsa fabric soft empty-entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ u32 i;
+
+ soft_mac_entry = priv->soft_mac_tbl;
+ for (i = 0; i < DSAF_TCAM_SUM; i++) {
+ /* inv all entry */
+ if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
+ /* return find result --soft index */
+ return i;
+
+ soft_mac_entry++;
+ }
+ return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
+ * hns_dsaf_set_mac_key - set mac key
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_key: tcam key pointer
+ * @vlan_id: vlan id
+ * @in_port_num: input port num
+ * @addr: mac addr
+ */
+static void hns_dsaf_set_mac_key(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_tbl_tcam_key *mac_key, u16 vlan_id, u8 in_port_num,
+ u8 *addr)
+{
+ u8 port;
+
+ if (dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE)
+ /*DSAF mode : in port id fixed 0*/
+ port = 0;
+ else
+ /*non-dsaf mode*/
+ port = in_port_num;
+
+ mac_key->high.bits.mac_0 = addr[0];
+ mac_key->high.bits.mac_1 = addr[1];
+ mac_key->high.bits.mac_2 = addr[2];
+ mac_key->high.bits.mac_3 = addr[3];
+ mac_key->low.bits.mac_4 = addr[4];
+ mac_key->low.bits.mac_5 = addr[5];
+ mac_key->low.bits.vlan = vlan_id;
+ mac_key->low.bits.port = port;
+}
+
+/**
+ * hns_dsaf_set_mac_uc_entry - set mac uc-entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: uc-mac entry
+ */
+int hns_dsaf_set_mac_uc_entry(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_tbl_tcam_ucast_cfg mac_data;
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+
+ /* mac addr check */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
+ MAC_IS_BROADCAST(mac_entry->addr) ||
+ MAC_IS_MULTICAST(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev,
+ "set_uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n",
+ dsaf_dev->ae_dev.name, mac_entry->addr[0],
+ mac_entry->addr[1], mac_entry->addr[2],
+ mac_entry->addr[3], mac_entry->addr[4],
+ mac_entry->addr[5]);
+ return -EINVAL;
+ }
+
+ /* config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+ mac_entry->in_port_num, mac_entry->addr);
+
+ /* entry ie exist? */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*if has not inv entry,find a empty entry */
+ entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /* has not empty,return error */
+ dev_err(dsaf_dev->dev,
+ "set_uc_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ }
+
+ dev_dbg(dsaf_dev->dev,
+ "set_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ /* config hardware entry */
+ mac_data.tbl_ucast_item_vld = 1;
+ mac_data.tbl_ucast_mac_discard = 0;
+ mac_data.tbl_ucast_old_en = 0;
+ /* default config dvc to 0 */
+ mac_data.tbl_ucast_dvc = 0;
+ mac_data.tbl_ucast_out_port = mac_entry->port_num;
+ hns_dsaf_tcam_uc_cfg(
+ dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+
+ /* config software entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+ soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_set_mac_mc_entry - set mac mc-entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mc-mac entry
+ */
+int hns_dsaf_set_mac_mc_entry(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_multi_dest_entry *mac_entry)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_tbl_tcam_mcast_cfg mac_data;
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+ struct dsaf_drv_tbl_tcam_key tmp_mac_key;
+
+ /* mac addr check */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev,
+ "set uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n",
+ dsaf_dev->ae_dev.name, mac_entry->addr[0],
+ mac_entry->addr[1], mac_entry->addr[2],
+ mac_entry->addr[3],
+ mac_entry->addr[4], mac_entry->addr[5]);
+ return -EINVAL;
+ }
+
+ /*config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key,
+ mac_entry->in_vlan_id,
+ mac_entry->in_port_num, mac_entry->addr);
+
+ /* entry ie exist? */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*if hasnot, find enpty entry*/
+ entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*if hasnot empty, error*/
+ dev_err(dsaf_dev->dev,
+ "set_uc_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+
+ /* config hardware entry */
+ memset(mac_data.tbl_mcast_port_msk,
+ 0, sizeof(mac_data.tbl_mcast_port_msk));
+ } else {
+ /* config hardware entry */
+ hns_dsaf_tcam_mc_get(
+ dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)(&tmp_mac_key), &mac_data);
+ }
+ mac_data.tbl_mcast_old_en = 0;
+ mac_data.tbl_mcast_item_vld = 1;
+ dsaf_set_field(mac_data.tbl_mcast_port_msk[0],
+ 0x3F, 0, mac_entry->port_mask[0]);
+
+ dev_dbg(dsaf_dev->dev,
+ "set_uc_entry, %s key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ hns_dsaf_tcam_mc_cfg(
+ dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+
+ /* config software entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+ soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_add_mac_mc_port - add mac mc-port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mc-mac entry
+ */
+int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_tbl_tcam_mcast_cfg mac_data;
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+ struct dsaf_drv_tbl_tcam_key tmp_mac_key;
+ int mskid;
+
+ /*chechk mac addr */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev,
+ "set_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
+ mac_entry->addr[0], mac_entry->addr[1],
+ mac_entry->addr[2], mac_entry->addr[3],
+ mac_entry->addr[4], mac_entry->addr[5]);
+ return -EINVAL;
+ }
+
+ /*config key */
+ hns_dsaf_set_mac_key(
+ dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+ mac_entry->in_port_num, mac_entry->addr);
+
+ memset(&mac_data, 0, sizeof(struct dsaf_tbl_tcam_mcast_cfg));
+
+ /*check exist? */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*if hasnot , find a empty*/
+ entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*if hasnot empty, error*/
+ dev_err(dsaf_dev->dev,
+ "set_uc_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val);
+ return -EINVAL;
+ }
+ } else {
+ /*if exist, add in */
+ hns_dsaf_tcam_mc_get(
+ dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)(&tmp_mac_key), &mac_data);
+ }
+ /* config hardware entry */
+ if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
+ mskid = mac_entry->port_num;
+ } else if (mac_entry->port_num >= DSAF_BASE_INNER_PORT_NUM) {
+ mskid = mac_entry->port_num -
+ DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+ } else {
+ dev_err(dsaf_dev->dev,
+ "%s,pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, mac_entry->port_num,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ dsaf_set_bit(mac_data.tbl_mcast_port_msk[mskid / 32], mskid % 32, 1);
+ mac_data.tbl_mcast_old_en = 0;
+ mac_data.tbl_mcast_item_vld = 1;
+
+ dev_dbg(dsaf_dev->dev,
+ "set_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ hns_dsaf_tcam_mc_cfg(
+ dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+
+ /*config software entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+ soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_del_mac_entry - del mac mc-port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @vlan_id: vlian id
+ * @in_port_num: input port num
+ * @addr : mac addr
+ */
+int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
+ u8 in_port_num, u8 *addr)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+
+ /*check mac addr */
+ if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) {
+ dev_err(dsaf_dev->dev,
+ "del_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ return -EINVAL;
+ }
+
+ /*config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, vlan_id, in_port_num, addr);
+
+ /*exist ?*/
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*not exist, error */
+ dev_err(dsaf_dev->dev,
+ "del_mac_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ dev_dbg(dsaf_dev->dev,
+ "del_mac_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ /*do del opt*/
+ hns_dsaf_tcam_mc_invld(dsaf_dev, entry_index);
+
+ /*del soft emtry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_del_mac_mc_port - del mac mc- port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+ u16 vlan_id;
+ u8 in_port_num;
+ struct dsaf_tbl_tcam_mcast_cfg mac_data;
+ struct dsaf_drv_tbl_tcam_key tmp_mac_key;
+ int mskid;
+ const u8 empty_msk[sizeof(mac_data.tbl_mcast_port_msk)] = {0};
+
+ if (!(void *)mac_entry) {
+ dev_err(dsaf_dev->dev,
+ "hns_dsaf_del_mac_mc_port mac_entry is NULL\n");
+ return -EINVAL;
+ }
+
+ /*get key info*/
+ vlan_id = mac_entry->in_vlan_id;
+ in_port_num = mac_entry->in_port_num;
+
+ /*check mac addr */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev,
+ "del_port failed, addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
+ mac_entry->addr[0], mac_entry->addr[1],
+ mac_entry->addr[2], mac_entry->addr[3],
+ mac_entry->addr[4], mac_entry->addr[5]);
+ return -EINVAL;
+ }
+
+ /*config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, vlan_id, in_port_num,
+ mac_entry->addr);
+
+ /*check is exist? */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*find none */
+ dev_err(dsaf_dev->dev,
+ "find_soft_mac_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+
+ dev_dbg(dsaf_dev->dev,
+ "del_mac_mc_port, %s key(%#x:%#x) index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ /*read entry*/
+ hns_dsaf_tcam_mc_get(
+ dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)(&tmp_mac_key), &mac_data);
+
+ /*del the port*/
+ if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
+ mskid = mac_entry->port_num;
+ } else if (mac_entry->port_num >= DSAF_BASE_INNER_PORT_NUM) {
+ mskid = mac_entry->port_num -
+ DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+ } else {
+ dev_err(dsaf_dev->dev,
+ "%s,pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, mac_entry->port_num,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ dsaf_set_bit(mac_data.tbl_mcast_port_msk[mskid / 32], mskid % 32, 0);
+
+ /*check non port, do del entry */
+ if (!memcmp(mac_data.tbl_mcast_port_msk, empty_msk,
+ sizeof(mac_data.tbl_mcast_port_msk))) {
+ hns_dsaf_tcam_mc_invld(dsaf_dev, entry_index);
+
+ /* del soft entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+ } else { /* not zer, just del port, updata*/
+ hns_dsaf_tcam_mc_cfg(
+ dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+ }
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_get_mac_uc_entry - get mac uc entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+
+ struct dsaf_tbl_tcam_ucast_cfg mac_data;
+
+ /* check macaddr */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
+ MAC_IS_BROADCAST(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev,
+ "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac_entry->addr[0], mac_entry->addr[1],
+ mac_entry->addr[2], mac_entry->addr[3],
+ mac_entry->addr[4], mac_entry->addr[5]);
+ return -EINVAL;
+ }
+
+ /*config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+ mac_entry->in_port_num, mac_entry->addr);
+
+ /*check exist? */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*find none, error */
+ dev_err(dsaf_dev->dev,
+ "get_uc_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ dev_dbg(dsaf_dev->dev,
+ "get_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ /*read entry*/
+ hns_dsaf_tcam_uc_get(dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)&mac_key, &mac_data);
+ mac_entry->port_num = mac_data.tbl_ucast_out_port;
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_get_mac_mc_entry - get mac mc entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_multi_dest_entry *mac_entry)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+
+ struct dsaf_tbl_tcam_mcast_cfg mac_data;
+
+ /*check mac addr */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
+ MAC_IS_BROADCAST(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev,
+ "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac_entry->addr[0], mac_entry->addr[1],
+ mac_entry->addr[2], mac_entry->addr[3],
+ mac_entry->addr[4], mac_entry->addr[5]);
+ return -EINVAL;
+ }
+
+ /*config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+ mac_entry->in_port_num, mac_entry->addr);
+
+ /*check exist? */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /* find none, error */
+ dev_err(dsaf_dev->dev,
+ "get_mac_uc_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val);
+ return -EINVAL;
+ }
+ dev_dbg(dsaf_dev->dev,
+ "get_mac_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ /*read entry */
+ hns_dsaf_tcam_mc_get(dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)&mac_key, &mac_data);
+
+ mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
+ return 0;
+}
+
+/**
+ * hns_dsaf_get_mac_entry_by_index - get mac entry by tab index
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @entry_index: tab entry index
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_get_mac_entry_by_index(
+ struct dsaf_device *dsaf_dev,
+ u16 entry_index, struct dsaf_drv_mac_multi_dest_entry *mac_entry)
+{
+ struct dsaf_drv_tbl_tcam_key mac_key;
+
+ struct dsaf_tbl_tcam_mcast_cfg mac_data;
+ struct dsaf_tbl_tcam_ucast_cfg mac_uc_data;
+ char mac_addr[MAC_NUM_OCTETS_PER_ADDR] = {0};
+
+ if (entry_index >= DSAF_TCAM_SUM) {
+ /* find none, del error */
+ dev_err(dsaf_dev->dev, "get_uc_entry failed, %s\n",
+ dsaf_dev->ae_dev.name);
+ return -EINVAL;
+ }
+
+ /* mc entry, do read opt */
+ hns_dsaf_tcam_mc_get(dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)&mac_key, &mac_data);
+
+ mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
+
+ /***get mac addr*/
+ mac_addr[0] = mac_key.high.bits.mac_0;
+ mac_addr[1] = mac_key.high.bits.mac_1;
+ mac_addr[2] = mac_key.high.bits.mac_2;
+ mac_addr[3] = mac_key.high.bits.mac_3;
+ mac_addr[4] = mac_key.low.bits.mac_4;
+ mac_addr[5] = mac_key.low.bits.mac_5;
+ /**is mc or uc*/
+ if (MAC_IS_MULTICAST((u8 *)mac_addr) ||
+ MAC_IS_L3_MULTICAST((u8 *)mac_addr)) {
+ /**mc donot do*/
+ } else {
+ /*is not mc, just uc... */
+ hns_dsaf_tcam_uc_get(dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)&mac_key,
+ &mac_uc_data);
+ mac_entry->port_mask[0] = (1 << mac_uc_data.tbl_ucast_out_port);
+ }
+
+ return 0;
+}
+
+static struct dsaf_device *hns_dsaf_alloc_dev(struct device *dev,
+ size_t sizeof_priv)
+{
+ struct dsaf_device *dsaf_dev;
+
+ dsaf_dev = devm_kzalloc(dev,
+ sizeof(*dsaf_dev) + sizeof_priv, GFP_KERNEL);
+ if (unlikely(!dsaf_dev)) {
+ dsaf_dev = ERR_PTR(-ENOMEM);
+ } else {
+ dsaf_dev->dev = dev;
+ dev_set_drvdata(dev, dsaf_dev);
+ }
+
+ return dsaf_dev;
+}
+
+/**
+ * hns_dsaf_free_dev - free dev mem
+ * @dev: struct device pointer
+ */
+static void hns_dsaf_free_dev(struct dsaf_device *dsaf_dev)
+{
+ (void)dev_set_drvdata(dsaf_dev->dev, NULL);
+}
+
+/**
+ * dsaf_pfc_unit_cnt - set pfc unit count
+ * @dsaf_id: dsa fabric id
+ * @pport_rate: value array
+ * @pdsaf_pfc_unit_cnt: value array
+ */
+static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id,
+ enum dsaf_port_rate_mode rate)
+{
+ u32 unit_cnt;
+
+ switch (rate) {
+ case DSAF_PORT_RATE_10000:
+ unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_XGE;
+ break;
+ case DSAF_PORT_RATE_1000:
+ unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000;
+ break;
+ case DSAF_PORT_RATE_2500:
+ unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000;
+ break;
+ default:
+ unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_XGE;
+ }
+
+ dsaf_set_dev_field(dsaf_dev,
+ (DSAF_PFC_UNIT_CNT_0_REG + 0x4 * (u64)mac_id),
+ DSAF_PFC_UNINT_CNT_M, DSAF_PFC_UNINT_CNT_S,
+ unit_cnt);
+}
+
+/**
+ * dsaf_port_work_rate_cfg - fifo
+ * @dsaf_id: dsa fabric id
+ * @xge_ge_work_mode
+ */
+void hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id,
+ enum dsaf_port_rate_mode rate_mode)
+{
+ u32 port_work_mode;
+
+ port_work_mode = dsaf_read_dev(
+ dsaf_dev, DSAF_XGE_GE_WORK_MODE_0_REG + 0x4 * (u64)mac_id);
+
+ if (rate_mode == DSAF_PORT_RATE_10000)
+ dsaf_set_bit(port_work_mode, DSAF_XGE_GE_WORK_MODE_S, 1);
+ else
+ dsaf_set_bit(port_work_mode, DSAF_XGE_GE_WORK_MODE_S, 0);
+
+ dsaf_write_dev(dsaf_dev,
+ DSAF_XGE_GE_WORK_MODE_0_REG + 0x4 * (u64)mac_id,
+ port_work_mode);
+
+ hns_dsaf_pfc_unit_cnt(dsaf_dev, mac_id, rate_mode);
+}
+
+/**
+ * hns_dsaf_fix_mac_mode - dsaf modify mac mode
+ * @mac_cb: mac contrl block
+ */
+void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb)
+{
+ enum dsaf_port_rate_mode mode;
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ int mac_id = mac_cb->mac_id;
+
+ if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+ return;
+ if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII)
+ mode = DSAF_PORT_RATE_10000;
+ else
+ mode = DSAF_PORT_RATE_1000;
+
+ hns_dsaf_port_work_rate_cfg(dsaf_dev, mac_id, mode);
+}
+
+void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
+{
+ struct dsaf_hw_stats *hw_stats
+ = &dsaf_dev->hw_stats[node_num];
+
+ hw_stats->pad_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_PAD_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->man_pkts += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_FINAL_IN_MAN_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->rx_pkts += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->rx_pkt_id += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_SBM_PID_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->rx_pause_frame += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->release_buf_num += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_SBM_RELS_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->sbm_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_SBM_DROP_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->crc_false += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_CRC_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->bp_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_BP_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->rslt_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_RSLT_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->local_addr_false += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
+
+ hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 0x80 * (u64)node_num);
+ hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num);
+
+ hw_stats->tx_pkts += dsaf_read_dev(dsaf_dev,
+ DSAF_XOD_RCVPKT_CNT_0_REG + 0x90 * (u64)node_num);
+}
+
+/**
+ *hns_dsaf_get_regs - dump dsaf regs
+ *@dsaf_dev: dsaf device
+ *@data:data for value of regs
+ */
+void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
+{
+ u32 i = 0;
+ u32 j;
+ u32 *p = data;
+
+ /* dsaf common registers */
+ p[0] = dsaf_read_dev(ddev, DSAF_SRAM_INIT_OVER_0_REG);
+ p[1] = dsaf_read_dev(ddev, DSAF_CFG_0_REG);
+ p[2] = dsaf_read_dev(ddev, DSAF_ECC_ERR_INVERT_0_REG);
+ p[3] = dsaf_read_dev(ddev, DSAF_ABNORMAL_TIMEOUT_0_REG);
+ p[4] = dsaf_read_dev(ddev, DSAF_FSM_TIMEOUT_0_REG);
+ p[5] = dsaf_read_dev(ddev, DSAF_DSA_REG_CNT_CLR_CE_REG);
+ p[6] = dsaf_read_dev(ddev, DSAF_DSA_SBM_INF_FIFO_THRD_REG);
+ p[7] = dsaf_read_dev(ddev, DSAF_DSA_SRAM_1BIT_ECC_SEL_REG);
+ p[8] = dsaf_read_dev(ddev, DSAF_DSA_SRAM_1BIT_ECC_CNT_REG);
+
+ p[9] = dsaf_read_dev(ddev, DSAF_PFC_EN_0_REG + port * 4);
+ p[10] = dsaf_read_dev(ddev, DSAF_PFC_UNIT_CNT_0_REG + port * 4);
+ p[11] = dsaf_read_dev(ddev, DSAF_XGE_INT_MSK_0_REG + port * 4);
+ p[12] = dsaf_read_dev(ddev, DSAF_XGE_INT_SRC_0_REG + port * 4);
+ p[13] = dsaf_read_dev(ddev, DSAF_XGE_INT_STS_0_REG + port * 4);
+ p[14] = dsaf_read_dev(ddev, DSAF_XGE_INT_MSK_0_REG + port * 4);
+ p[15] = dsaf_read_dev(ddev, DSAF_PPE_INT_MSK_0_REG + port * 4);
+ p[16] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_MSK_0_REG + port * 4);
+ p[17] = dsaf_read_dev(ddev, DSAF_XGE_INT_SRC_0_REG + port * 4);
+ p[18] = dsaf_read_dev(ddev, DSAF_PPE_INT_SRC_0_REG + port * 4);
+ p[19] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_SRC_0_REG + port * 4);
+ p[20] = dsaf_read_dev(ddev, DSAF_XGE_INT_STS_0_REG + port * 4);
+ p[21] = dsaf_read_dev(ddev, DSAF_PPE_INT_STS_0_REG + port * 4);
+ p[22] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_STS_0_REG + port * 4);
+ p[23] = dsaf_read_dev(ddev, DSAF_PPE_QID_CFG_0_REG + port * 4);
+
+ for (i = 0; i < DSAF_SW_PORT_NUM; i++)
+ p[24 + i] = dsaf_read_dev(ddev,
+ DSAF_SW_PORT_TYPE_0_REG + i * 4);
+
+ p[32] = dsaf_read_dev(ddev, DSAF_MIX_DEF_QID_0_REG + port * 4);
+
+ for (i = 0; i < DSAF_SW_PORT_NUM; i++)
+ p[33 + i] = dsaf_read_dev(ddev,
+ DSAF_PORT_DEF_VLAN_0_REG + i * 4);
+
+ for (i = 0; i < DSAF_TOTAL_QUEUE_NUM; i++)
+ p[41 + i] = dsaf_read_dev(ddev,
+ DSAF_VM_DEF_VLAN_0_REG + i * 4);
+
+ /* dsaf inode registers */
+ p[170] = dsaf_read_dev(ddev, DSAF_INODE_CUT_THROUGH_CFG_0_REG);
+
+ p[171] = dsaf_read_dev(ddev,
+ DSAF_INODE_ECC_ERR_ADDR_0_REG + port * 0x80);
+
+ for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+ p[172 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_IN_PORT_NUM_0_REG + j * 0x80);
+ p[175 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_PRI_TC_CFG_0_REG + j * 0x80);
+ p[178 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_BP_STATUS_0_REG + j * 0x80);
+ p[181 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_PAD_DISCARD_NUM_0_REG + j * 0x80);
+ p[184 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_FINAL_IN_MAN_NUM_0_REG + j * 0x80);
+ p[187 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + j * 0x80);
+ p[190 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_SBM_PID_NUM_0_REG + j * 0x80);
+ p[193 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + j * 0x80);
+ p[196 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_SBM_RELS_NUM_0_REG + j * 0x80);
+ p[199 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_SBM_DROP_NUM_0_REG + j * 0x80);
+ p[202 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_CRC_FALSE_NUM_0_REG + j * 0x80);
+ p[205 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_BP_DISCARD_NUM_0_REG + j * 0x80);
+ p[208 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_RSLT_DISCARD_NUM_0_REG + j * 0x80);
+ p[211 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + j * 0x80);
+ p[214 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_VOQ_OVER_NUM_0_REG + j * 0x80);
+ p[217 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_BD_SAVE_STATUS_0_REG + j * 4);
+ p[220 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4);
+ p[223 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4);
+ p[224 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4);
+ }
+
+ p[227] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
+
+ for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+ p[228 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4);
+ }
+
+ p[231] = dsaf_read_dev(ddev,
+ DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4);
+
+ /* dsaf inode registers */
+ for (i = 0; i < DSAF_SBM_NUM / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+ p[232 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_CFG_REG_0_REG + j * 0x80);
+ p[235 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80);
+ p[238 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80);
+ p[241 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80);
+ p[244 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80);
+ p[245 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80);
+ p[248 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_0_0_REG + j * 0x80);
+ p[251 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_1_0_REG + j * 0x80);
+ p[254 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_2_0_REG + j * 0x80);
+ p[257 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_3_0_REG + j * 0x80);
+ p[260 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_INER_ST_0_REG + j * 0x80);
+ p[263 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80);
+ p[266 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80);
+ p[269 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80);
+ p[272 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80);
+ p[275 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80);
+ p[278 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80);
+ p[281 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80);
+ p[284 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80);
+ p[287 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80);
+ p[290 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80);
+ p[293 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80);
+ p[296 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80);
+ p[299 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80);
+ p[302 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80);
+ p[305 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80);
+ p[308 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80);
+ }
+
+ /* dsaf onode registers */
+ for (i = 0; i < DSAF_XOD_NUM; i++) {
+ p[311 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + j * 0x90);
+ p[319 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + j * 0x90);
+ p[327 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + j * 0x90);
+ p[335 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + j * 0x90);
+ p[343 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + j * 0x90);
+ p[351 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_TOKEN_CFG_0_REG + j * 0x90);
+ }
+
+ p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
+ p[360] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
+ p[361] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
+
+ for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+ p[362 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_GNT_L_0_REG + j * 0x90);
+ p[365 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_GNT_H_0_REG + j * 0x90);
+ p[368 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90);
+ p[371 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90);
+ p[374 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90);
+ p[377 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90);
+ p[380 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90);
+ p[383 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90);
+ p[386 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90);
+ p[389 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90);
+ }
+
+ p[392] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
+ p[393] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
+ p[394] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
+ p[395] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
+ p[396] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
+ p[397] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
+ p[398] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
+ p[399] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
+ p[400] = dsaf_read_dev(ddev,
+ DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
+ p[401] = dsaf_read_dev(ddev,
+ DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
+ p[402] = dsaf_read_dev(ddev,
+ DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
+ p[403] = dsaf_read_dev(ddev,
+ DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
+ p[404] = dsaf_read_dev(ddev,
+ DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90);
+
+ /* dsaf voq registers */
+ for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) {
+ j = (i * DSAF_COMM_CHN + port) * 0x90;
+ p[405 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_ECC_INVERT_EN_0_REG + j);
+ p[408 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_SRAM_PKT_NUM_0_REG + j);
+ p[411 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
+ p[414 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_OUT_PKT_NUM_0_REG + j);
+ p[417 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_ECC_ERR_ADDR_0_REG + j);
+ p[420 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
+ p[423 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
+ p[426 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j);
+ p[429 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j);
+ p[432 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_PPE_XOD_REQ_0_REG + j);
+ p[435 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j);
+ p[438 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_BP_ALL_THRD_0_REG + j);
+ }
+
+ /* dsaf tbl registers */
+ p[441] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
+ p[442] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
+ p[443] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
+ p[444] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
+ p[445] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
+ p[446] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
+ p[447] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
+ p[448] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
+ p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+ p[450] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
+ p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
+ p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
+ p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
+ p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+ p[455] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
+ p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+ p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+ p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+ p[459] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+ p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+ p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+ p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+ p[463] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
+
+ for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
+ j = i * 0x8;
+ p[464 + 2 * i] = dsaf_read_dev(ddev,
+ DSAF_TBL_DA0_MIS_INFO1_0_REG + j);
+ p[465 + 2 * i] = dsaf_read_dev(ddev,
+ DSAF_TBL_DA0_MIS_INFO0_0_REG + j);
+ }
+
+ p[480] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
+ p[481] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
+ p[482] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
+ p[483] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
+ p[484] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
+ p[485] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
+ p[486] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
+ p[487] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
+ p[488] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
+ p[489] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
+ p[490] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
+ p[491] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
+
+ /* dsaf other registers */
+ p[492] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
+ p[493] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
+ p[494] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
+ p[495] = dsaf_read_dev(ddev,
+ DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4);
+ p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
+ p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
+
+ /* mark end of dsaf regs */
+ for (i = 498; i < 504; i++)
+ p[i] = 0xdddddddd;
+}
+
+static char *hns_dsaf_get_node_stats_strings(char *data, int node)
+{
+ char *buff = data;
+
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node);
+ buff = buff + ETH_GSTRING_LEN;
+
+ return buff;
+}
+
+static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
+ int node_num)
+{
+ u64 *p = data;
+ struct dsaf_hw_stats *hw_stats = &ddev->hw_stats[node_num];
+
+ p[0] = hw_stats->pad_drop;
+ p[1] = hw_stats->man_pkts;
+ p[2] = hw_stats->rx_pkts;
+ p[3] = hw_stats->rx_pkt_id;
+ p[4] = hw_stats->rx_pause_frame;
+ p[5] = hw_stats->release_buf_num;
+ p[6] = hw_stats->sbm_drop;
+ p[7] = hw_stats->crc_false;
+ p[8] = hw_stats->bp_drop;
+ p[9] = hw_stats->rslt_drop;
+ p[10] = hw_stats->local_addr_false;
+ p[11] = hw_stats->vlan_drop;
+ p[12] = hw_stats->stp_drop;
+ p[13] = hw_stats->tx_pkts;
+
+ return &p[14];
+}
+
+/**
+ *hns_dsaf_get_stats - get dsaf statistic
+ *@ddev: dsaf device
+ *@data:statistic value
+ *@port: port num
+ */
+void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port)
+{
+ u64 *p = data;
+ int node_num = port;
+
+ /* for ge/xge node info */
+ p = hns_dsaf_get_node_stats(ddev, p, node_num);
+
+ /* for ppe node info */
+ node_num = port + DSAF_PPE_INODE_BASE;
+ (void)hns_dsaf_get_node_stats(ddev, p, node_num);
+}
+
+/**
+ *hns_dsaf_get_sset_count - get dsaf string set count
+ *@stringset: type of values in data
+ *return dsaf string name count
+ */
+int hns_dsaf_get_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return DSAF_STATIC_NUM;
+
+ return 0;
+}
+
+/**
+ *hns_dsaf_get_strings - get dsaf string set
+ *@stringset:srting set index
+ *@data:strings name value
+ *@port:port index
+ */
+void hns_dsaf_get_strings(int stringset, u8 *data, int port)
+{
+ char *buff = (char *)data;
+ int node = port;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ /* for ge/xge node info */
+ buff = hns_dsaf_get_node_stats_strings(buff, node);
+
+ /* for ppe node info */
+ node = port + DSAF_PPE_INODE_BASE;
+ (void)hns_dsaf_get_node_stats_strings(buff, node);
+}
+
+/**
+ *hns_dsaf_get_sset_count - get dsaf regs count
+ *return dsaf regs count
+ */
+int hns_dsaf_get_regs_count(void)
+{
+ return DSAF_DUMP_REGS_NUM;
+}
+
+/**
+ * dsaf_probe - probo dsaf dev
+ * @pdev: dasf platform device
+ * retuen 0 - success , negative --fail
+ */
+static int hns_dsaf_probe(struct platform_device *pdev)
+{
+ struct dsaf_device *dsaf_dev;
+ int ret;
+
+ dsaf_dev = hns_dsaf_alloc_dev(&pdev->dev, sizeof(struct dsaf_drv_priv));
+ if (IS_ERR(dsaf_dev)) {
+ ret = PTR_ERR(dsaf_dev);
+ dev_err(&pdev->dev,
+ "dsaf_probe dsaf_alloc_dev failed, ret = %#x!\n", ret);
+ return ret;
+ }
+
+ ret = hns_dsaf_get_cfg(dsaf_dev);
+ if (ret)
+ goto free_dev;
+
+ ret = hns_dsaf_init(dsaf_dev);
+ if (ret)
+ goto free_cfg;
+
+ ret = hns_mac_init(dsaf_dev);
+ if (ret)
+ goto uninit_dsaf;
+
+ ret = hns_ppe_init(dsaf_dev);
+ if (ret)
+ goto uninit_mac;
+
+ ret = hns_dsaf_ae_init(dsaf_dev);
+ if (ret)
+ goto uninit_ppe;
+
+ return 0;
+
+uninit_ppe:
+ hns_ppe_uninit(dsaf_dev);
+
+uninit_mac:
+ hns_mac_uninit(dsaf_dev);
+
+uninit_dsaf:
+ hns_dsaf_free(dsaf_dev);
+
+free_cfg:
+ hns_dsaf_free_cfg(dsaf_dev);
+
+free_dev:
+ hns_dsaf_free_dev(dsaf_dev);
+
+ return ret;
+}
+
+/**
+ * dsaf_remove - remove dsaf dev
+ * @pdev: dasf platform device
+ */
+static int hns_dsaf_remove(struct platform_device *pdev)
+{
+ struct dsaf_device *dsaf_dev = dev_get_drvdata(&pdev->dev);
+
+ hns_dsaf_ae_uninit(dsaf_dev);
+
+ hns_ppe_uninit(dsaf_dev);
+
+ hns_mac_uninit(dsaf_dev);
+
+ hns_dsaf_free(dsaf_dev);
+
+ hns_dsaf_free_cfg(dsaf_dev);
+
+ hns_dsaf_free_dev(dsaf_dev);
+
+ return 0;
+}
+
+static const struct of_device_id g_dsaf_match[] = {
+ {.compatible = "hisilicon,hns-dsaf-v1"},
+ {.compatible = "hisilicon,hns-dsaf-v2"},
+ {}
+};
+
+static struct platform_driver g_dsaf_driver = {
+ .probe = hns_dsaf_probe,
+ .remove = hns_dsaf_remove,
+ .driver = {
+ .name = DSAF_DRV_NAME,
+ .of_match_table = g_dsaf_match,
+ },
+};
+
+module_platform_driver(g_dsaf_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("HNS DSAF driver");
+MODULE_VERSION(DSAF_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
new file mode 100644
index 000000000000..b2b93484995c
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -0,0 +1,428 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __HNS_DSAF_MAIN_H
+#define __HNS_DSAF_MAIN_H
+#include "hnae.h"
+
+#include "hns_dsaf_reg.h"
+#include "hns_dsaf_mac.h"
+
+struct hns_mac_cb;
+
+#define DSAF_DRV_NAME "hns_dsaf"
+#define DSAF_MOD_VERSION "v1.0"
+
+#define ENABLE (0x1)
+#define DISABLE (0x0)
+
+#define HNS_DSAF_DEBUG_NW_REG_OFFSET (0x100000)
+
+#define DSAF_BASE_INNER_PORT_NUM (127) /* mac tbl qid*/
+
+#define DSAF_MAX_CHIP_NUM (2) /*max 2 chips */
+
+#define DSAF_DEFAUTL_QUEUE_NUM_PER_PPE (22)
+
+#define HNS_DSAF_MAX_DESC_CNT (1024)
+#define HNS_DSAF_MIN_DESC_CNT (16)
+
+#define DSAF_INVALID_ENTRY_IDX (0xffff)
+
+#define DSAF_CFG_READ_CNT (30)
+#define DSAF_SRAM_INIT_FINISH_FLAG (0xff)
+
+#define MAC_NUM_OCTETS_PER_ADDR 6
+
+#define DSAF_DUMP_REGS_NUM 504
+#define DSAF_STATIC_NUM 28
+
+#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
+
+enum hal_dsaf_mode {
+ HRD_DSAF_NO_DSAF_MODE = 0x0,
+ HRD_DSAF_MODE = 0x1,
+};
+
+enum hal_dsaf_tc_mode {
+ HRD_DSAF_4TC_MODE = 0X0,
+ HRD_DSAF_8TC_MODE = 0X1,
+};
+
+struct dsaf_vm_def_vlan {
+ u32 vm_def_vlan_id;
+ u32 vm_def_vlan_cfi;
+ u32 vm_def_vlan_pri;
+};
+
+struct dsaf_tbl_tcam_data {
+ u32 tbl_tcam_data_high;
+ u32 tbl_tcam_data_low;
+};
+
+#define DSAF_PORT_MSK_NUM \
+ ((DSAF_TOTAL_QUEUE_NUM + DSAF_SERVICE_NW_NUM - 1) / 32 + 1)
+struct dsaf_tbl_tcam_mcast_cfg {
+ u8 tbl_mcast_old_en;
+ u8 tbl_mcast_item_vld;
+ u32 tbl_mcast_port_msk[DSAF_PORT_MSK_NUM];
+};
+
+struct dsaf_tbl_tcam_ucast_cfg {
+ u32 tbl_ucast_old_en;
+ u32 tbl_ucast_item_vld;
+ u32 tbl_ucast_mac_discard;
+ u32 tbl_ucast_dvc;
+ u32 tbl_ucast_out_port;
+};
+
+struct dsaf_tbl_line_cfg {
+ u32 tbl_line_mac_discard;
+ u32 tbl_line_dvc;
+ u32 tbl_line_out_port;
+};
+
+enum dsaf_port_rate_mode {
+ DSAF_PORT_RATE_1000 = 0,
+ DSAF_PORT_RATE_2500,
+ DSAF_PORT_RATE_10000
+};
+
+enum dsaf_stp_port_type {
+ DSAF_STP_PORT_TYPE_DISCARD = 0,
+ DSAF_STP_PORT_TYPE_BLOCK = 1,
+ DSAF_STP_PORT_TYPE_LISTEN = 2,
+ DSAF_STP_PORT_TYPE_LEARN = 3,
+ DSAF_STP_PORT_TYPE_FORWARD = 4
+};
+
+enum dsaf_sw_port_type {
+ DSAF_SW_PORT_TYPE_NON_VLAN = 0,
+ DSAF_SW_PORT_TYPE_ACCESS = 1,
+ DSAF_SW_PORT_TYPE_TRUNK = 2,
+};
+
+#define DSAF_SUB_BASE_SIZE (0x10000)
+
+/* dsaf mode define */
+enum dsaf_mode {
+ DSAF_MODE_INVALID = 0, /**< Invalid dsaf mode */
+ DSAF_MODE_ENABLE_FIX, /**< en DSAF-mode, fixed to queue*/
+ DSAF_MODE_ENABLE_0VM, /**< en DSAF-mode, support 0 VM */
+ DSAF_MODE_ENABLE_8VM, /**< en DSAF-mode, support 8 VM */
+ DSAF_MODE_ENABLE_16VM, /**< en DSAF-mode, support 16 VM */
+ DSAF_MODE_ENABLE_32VM, /**< en DSAF-mode, support 32 VM */
+ DSAF_MODE_ENABLE_128VM, /**< en DSAF-mode, support 128 VM */
+ DSAF_MODE_ENABLE, /**< before is enable DSAF mode*/
+ DSAF_MODE_DISABLE_FIX, /**< non-dasf, fixed to queue*/
+ DSAF_MODE_DISABLE_2PORT_8VM, /**< non-dasf, 2port 8VM */
+ DSAF_MODE_DISABLE_2PORT_16VM, /**< non-dasf, 2port 16VM */
+ DSAF_MODE_DISABLE_2PORT_64VM, /**< non-dasf, 2port 64VM */
+ DSAF_MODE_DISABLE_6PORT_0VM, /**< non-dasf, 6port 0VM */
+ DSAF_MODE_DISABLE_6PORT_2VM, /**< non-dasf, 6port 2VM */
+ DSAF_MODE_DISABLE_6PORT_4VM, /**< non-dasf, 6port 4VM */
+ DSAF_MODE_DISABLE_6PORT_16VM, /**< non-dasf, 6port 16VM */
+ DSAF_MODE_MAX /**< the last one, use as the num */
+};
+
+#define DSAF_DEST_PORT_NUM 256 /* DSAF max port num */
+#define DSAF_WORD_BIT_CNT 32 /* the num bit of word */
+
+/*mac entry, mc or uc entry*/
+struct dsaf_drv_mac_single_dest_entry {
+ /* mac addr, match the entry*/
+ u8 addr[MAC_NUM_OCTETS_PER_ADDR];
+ u16 in_vlan_id; /* value of VlanId */
+
+ /* the vld input port num, dsaf-mode fix 0, */
+ /* non-dasf is the entry whitch port vld*/
+ u8 in_port_num;
+
+ u8 port_num; /*output port num*/
+ u8 rsv[6];
+};
+
+/*only mc entry*/
+struct dsaf_drv_mac_multi_dest_entry {
+ /* mac addr, match the entry*/
+ u8 addr[MAC_NUM_OCTETS_PER_ADDR];
+ u16 in_vlan_id;
+ /* this mac addr output port,*/
+ /* bit0-bit5 means Port0-Port5(1bit is vld)**/
+ u32 port_mask[DSAF_DEST_PORT_NUM / DSAF_WORD_BIT_CNT];
+
+ /* the vld input port num, dsaf-mode fix 0,*/
+ /* non-dasf is the entry whitch port vld*/
+ u8 in_port_num;
+ u8 rsv[7];
+};
+
+struct dsaf_hw_stats {
+ u64 pad_drop;
+ u64 man_pkts;
+ u64 rx_pkts;
+ u64 rx_pkt_id;
+ u64 rx_pause_frame;
+ u64 release_buf_num;
+ u64 sbm_drop;
+ u64 crc_false;
+ u64 bp_drop;
+ u64 rslt_drop;
+ u64 local_addr_false;
+ u64 vlan_drop;
+ u64 stp_drop;
+ u64 tx_pkts;
+};
+
+struct hnae_vf_cb {
+ u8 port_index;
+ struct hns_mac_cb *mac_cb;
+ struct dsaf_device *dsaf_dev;
+ struct hnae_handle ae_handle; /* must be the last number */
+};
+
+struct dsaf_int_xge_src {
+ u32 xid_xge_ecc_err_int_src;
+ u32 xid_xge_fsm_timout_int_src;
+ u32 sbm_xge_lnk_fsm_timout_int_src;
+ u32 sbm_xge_lnk_ecc_2bit_int_src;
+ u32 sbm_xge_mib_req_failed_int_src;
+ u32 sbm_xge_mib_req_fsm_timout_int_src;
+ u32 sbm_xge_mib_rels_fsm_timout_int_src;
+ u32 sbm_xge_sram_ecc_2bit_int_src;
+ u32 sbm_xge_mib_buf_sum_err_int_src;
+ u32 sbm_xge_mib_req_extra_int_src;
+ u32 sbm_xge_mib_rels_extra_int_src;
+ u32 voq_xge_start_to_over_0_int_src;
+ u32 voq_xge_start_to_over_1_int_src;
+ u32 voq_xge_ecc_err_int_src;
+};
+
+struct dsaf_int_ppe_src {
+ u32 xid_ppe_fsm_timout_int_src;
+ u32 sbm_ppe_lnk_fsm_timout_int_src;
+ u32 sbm_ppe_lnk_ecc_2bit_int_src;
+ u32 sbm_ppe_mib_req_failed_int_src;
+ u32 sbm_ppe_mib_req_fsm_timout_int_src;
+ u32 sbm_ppe_mib_rels_fsm_timout_int_src;
+ u32 sbm_ppe_sram_ecc_2bit_int_src;
+ u32 sbm_ppe_mib_buf_sum_err_int_src;
+ u32 sbm_ppe_mib_req_extra_int_src;
+ u32 sbm_ppe_mib_rels_extra_int_src;
+ u32 voq_ppe_start_to_over_0_int_src;
+ u32 voq_ppe_ecc_err_int_src;
+ u32 xod_ppe_fifo_rd_empty_int_src;
+ u32 xod_ppe_fifo_wr_full_int_src;
+};
+
+struct dsaf_int_rocee_src {
+ u32 xid_rocee_fsm_timout_int_src;
+ u32 sbm_rocee_lnk_fsm_timout_int_src;
+ u32 sbm_rocee_lnk_ecc_2bit_int_src;
+ u32 sbm_rocee_mib_req_failed_int_src;
+ u32 sbm_rocee_mib_req_fsm_timout_int_src;
+ u32 sbm_rocee_mib_rels_fsm_timout_int_src;
+ u32 sbm_rocee_sram_ecc_2bit_int_src;
+ u32 sbm_rocee_mib_buf_sum_err_int_src;
+ u32 sbm_rocee_mib_req_extra_int_src;
+ u32 sbm_rocee_mib_rels_extra_int_src;
+ u32 voq_rocee_start_to_over_0_int_src;
+ u32 voq_rocee_ecc_err_int_src;
+};
+
+struct dsaf_int_tbl_src {
+ u32 tbl_da0_mis_src;
+ u32 tbl_da1_mis_src;
+ u32 tbl_da2_mis_src;
+ u32 tbl_da3_mis_src;
+ u32 tbl_da4_mis_src;
+ u32 tbl_da5_mis_src;
+ u32 tbl_da6_mis_src;
+ u32 tbl_da7_mis_src;
+ u32 tbl_sa_mis_src;
+ u32 tbl_old_sech_end_src;
+ u32 lram_ecc_err1_src;
+ u32 lram_ecc_err2_src;
+ u32 tram_ecc_err1_src;
+ u32 tram_ecc_err2_src;
+ u32 tbl_ucast_bcast_xge0_src;
+ u32 tbl_ucast_bcast_xge1_src;
+ u32 tbl_ucast_bcast_xge2_src;
+ u32 tbl_ucast_bcast_xge3_src;
+ u32 tbl_ucast_bcast_xge4_src;
+ u32 tbl_ucast_bcast_xge5_src;
+ u32 tbl_ucast_bcast_ppe_src;
+ u32 tbl_ucast_bcast_rocee_src;
+};
+
+struct dsaf_int_stat {
+ struct dsaf_int_xge_src dsaf_int_xge_stat[DSAF_COMM_CHN];
+ struct dsaf_int_ppe_src dsaf_int_ppe_stat[DSAF_COMM_CHN];
+ struct dsaf_int_rocee_src dsaf_int_rocee_stat[DSAF_COMM_CHN];
+ struct dsaf_int_tbl_src dsaf_int_tbl_stat[1];
+
+};
+
+/* Dsaf device struct define ,and mac -> dsaf */
+struct dsaf_device {
+ struct device *dev;
+ struct hnae_ae_dev ae_dev;
+
+ void *priv;
+
+ int virq[DSAF_IRQ_NUM];
+
+ u8 __iomem *sc_base;
+ u8 __iomem *sds_base;
+ u8 __iomem *ppe_base;
+ u8 __iomem *io_base;
+ u8 __iomem *cpld_base;
+
+ u32 desc_num; /* desc num per queue*/
+ u32 buf_size; /* ring buffer size */
+ int buf_size_type; /* ring buffer size-type */
+ enum dsaf_mode dsaf_mode; /* dsaf mode */
+ enum hal_dsaf_mode dsaf_en;
+ enum hal_dsaf_tc_mode dsaf_tc_mode;
+ u32 dsaf_ver;
+
+ struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM];
+ struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM];
+ struct hns_mac_cb *mac_cb;
+
+ struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM];
+ struct dsaf_int_stat int_stat;
+};
+
+static inline void *hns_dsaf_dev_priv(const struct dsaf_device *dsaf_dev)
+{
+ return (void *)((u8 *)dsaf_dev + sizeof(*dsaf_dev));
+}
+
+struct dsaf_drv_tbl_tcam_key {
+ union {
+ struct {
+ u8 mac_3;
+ u8 mac_2;
+ u8 mac_1;
+ u8 mac_0;
+ } bits;
+
+ u32 val;
+ } high;
+ union {
+ struct {
+ u32 port:4; /* port id, */
+ /* dsaf-mode fixed 0, non-dsaf-mode port id*/
+ u32 vlan:12; /* vlan id */
+ u32 mac_5:8;
+ u32 mac_4:8;
+ } bits;
+
+ u32 val;
+ } low;
+};
+
+struct dsaf_drv_soft_mac_tbl {
+ struct dsaf_drv_tbl_tcam_key tcam_key;
+ u16 index; /*the entry's index in tcam tab*/
+};
+
+struct dsaf_drv_priv {
+ /* soft tab Mac key, for hardware tab*/
+ struct dsaf_drv_soft_mac_tbl *soft_mac_tbl;
+};
+
+static inline void hns_dsaf_tbl_tcam_addr_cfg(struct dsaf_device *dsaf_dev,
+ u32 tab_tcam_addr)
+{
+ dsaf_set_dev_field(dsaf_dev, DSAF_TBL_TCAM_ADDR_0_REG,
+ DSAF_TBL_TCAM_ADDR_M, DSAF_TBL_TCAM_ADDR_S,
+ tab_tcam_addr);
+}
+
+static inline void hns_dsaf_tbl_tcam_load_pul(struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_pul;
+
+ o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_LOAD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_LOAD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+static inline void hns_dsaf_tbl_line_addr_cfg(struct dsaf_device *dsaf_dev,
+ u32 tab_line_addr)
+{
+ dsaf_set_dev_field(dsaf_dev, DSAF_TBL_LINE_ADDR_0_REG,
+ DSAF_TBL_LINE_ADDR_M, DSAF_TBL_LINE_ADDR_S,
+ tab_line_addr);
+}
+
+static inline int hns_dsaf_get_comm_idx_by_port(int port)
+{
+ if ((port < DSAF_COMM_CHN) || (port == DSAF_MAX_PORT_NUM_PER_CHIP))
+ return 0;
+ else
+ return (port - DSAF_COMM_CHN + 1);
+}
+
+static inline struct hnae_vf_cb *hns_ae_get_vf_cb(
+ struct hnae_handle *handle)
+{
+ return container_of(handle, struct hnae_vf_cb, ae_handle);
+}
+
+int hns_dsaf_set_mac_uc_entry(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_set_mac_mc_entry(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_multi_dest_entry *mac_entry);
+int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
+ u8 in_port_num, u8 *addr);
+int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_multi_dest_entry *mac_entry);
+int hns_dsaf_get_mac_entry_by_index(
+ struct dsaf_device *dsaf_dev,
+ u16 entry_index,
+ struct dsaf_drv_mac_multi_dest_entry *mac_entry);
+
+void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val);
+
+void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
+
+void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val);
+
+void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb);
+
+int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev);
+void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev);
+
+void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
+void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
+void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
+ u32 port, u32 val);
+
+void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num);
+
+int hns_dsaf_get_sset_count(int stringset);
+void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port);
+void hns_dsaf_get_strings(int stringset, u8 *data, int port);
+
+void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
+int hns_dsaf_get_regs_count(void);
+void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en);
+
+#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
new file mode 100644
index 000000000000..523e9b83d304
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "hns_dsaf_misc.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_reg.h"
+#include "hns_dsaf_ppe.h"
+
+void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data)
+{
+ int speed_reg = 0;
+ u8 value;
+
+ if (!mac_cb) {
+ pr_err("sfp_led_opt mac_dev is null!\n");
+ return;
+ }
+ if (!mac_cb->cpld_vaddr) {
+ dev_err(mac_cb->dev, "mac_id=%d, cpld_vaddr is null !\n",
+ mac_cb->mac_id);
+ return;
+ }
+
+ if (speed == MAC_SPEED_10000)
+ speed_reg = 1;
+
+ value = mac_cb->cpld_led_value;
+
+ if (link_status) {
+ dsaf_set_bit(value, DSAF_LED_LINK_B, link_status);
+ dsaf_set_field(value, DSAF_LED_SPEED_M,
+ DSAF_LED_SPEED_S, speed_reg);
+ dsaf_set_bit(value, DSAF_LED_DATA_B, data);
+
+ if (value != mac_cb->cpld_led_value) {
+ dsaf_write_b(mac_cb->cpld_vaddr, value);
+ mac_cb->cpld_led_value = value;
+ }
+ } else {
+ dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
+ mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
+ }
+}
+
+void cpld_led_reset(struct hns_mac_cb *mac_cb)
+{
+ if (!mac_cb || !mac_cb->cpld_vaddr)
+ return;
+
+ dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
+ mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
+}
+
+int cpld_set_led_id(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status)
+{
+ switch (status) {
+ case HNAE_LED_ACTIVE:
+ mac_cb->cpld_led_value = dsaf_read_b(mac_cb->cpld_vaddr);
+ dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
+ CPLD_LED_ON_VALUE);
+ dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+ return 2;
+ case HNAE_LED_INACTIVE:
+ dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
+ CPLD_LED_DEFAULT_VALUE);
+ dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define RESET_REQ_OR_DREQ 1
+
+void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val)
+{
+ u32 xbar_reg_addr;
+ u32 nt_reg_addr;
+
+ if (!val) {
+ xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_REQ_REG;
+ nt_reg_addr = DSAF_SUB_SC_NT_RESET_REQ_REG;
+ } else {
+ xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_DREQ_REG;
+ nt_reg_addr = DSAF_SUB_SC_NT_RESET_DREQ_REG;
+ }
+
+ dsaf_write_reg(dsaf_dev->sc_base, xbar_reg_addr,
+ RESET_REQ_OR_DREQ);
+ dsaf_write_reg(dsaf_dev->sc_base, nt_reg_addr,
+ RESET_REQ_OR_DREQ);
+}
+
+void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+{
+ u32 reg_val = 0;
+ u32 reg_addr;
+
+ if (port >= DSAF_XGE_NUM)
+ return;
+
+ reg_val |= RESET_REQ_OR_DREQ;
+ reg_val |= 0x2082082 << port;
+
+ if (val == 0)
+ reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
+
+ dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+}
+
+void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
+ u32 port, u32 val)
+{
+ u32 reg_val = 0;
+ u32 reg_addr;
+
+ if (port >= DSAF_XGE_NUM)
+ return;
+
+ reg_val |= XGMAC_TRX_CORE_SRST_M << port;
+
+ if (val == 0)
+ reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
+
+ dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+}
+
+void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+{
+ u32 reg_val_1;
+ u32 reg_val_2;
+
+ if (port >= DSAF_GE_NUM)
+ return;
+
+ if (port < DSAF_SERVICE_NW_NUM) {
+ reg_val_1 = 0x1 << port;
+ reg_val_2 = 0x1041041 << port;
+
+ if (val == 0) {
+ dsaf_write_reg(dsaf_dev->sc_base,
+ DSAF_SUB_SC_GE_RESET_REQ1_REG,
+ reg_val_1);
+
+ dsaf_write_reg(dsaf_dev->sc_base,
+ DSAF_SUB_SC_GE_RESET_REQ0_REG,
+ reg_val_2);
+ } else {
+ dsaf_write_reg(dsaf_dev->sc_base,
+ DSAF_SUB_SC_GE_RESET_DREQ0_REG,
+ reg_val_2);
+
+ dsaf_write_reg(dsaf_dev->sc_base,
+ DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+ reg_val_1);
+ }
+ } else {
+ reg_val_1 = 0x15540 << (port - 6);
+ reg_val_2 = 0x100 << (port - 6);
+
+ if (val == 0) {
+ dsaf_write_reg(dsaf_dev->sc_base,
+ DSAF_SUB_SC_GE_RESET_REQ1_REG,
+ reg_val_1);
+
+ dsaf_write_reg(dsaf_dev->sc_base,
+ DSAF_SUB_SC_PPE_RESET_REQ_REG,
+ reg_val_2);
+ } else {
+ dsaf_write_reg(dsaf_dev->sc_base,
+ DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+ reg_val_1);
+
+ dsaf_write_reg(dsaf_dev->sc_base,
+ DSAF_SUB_SC_PPE_RESET_DREQ_REG,
+ reg_val_2);
+ }
+ }
+}
+
+void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+{
+ u32 reg_val = 0;
+ u32 reg_addr;
+
+ reg_val |= RESET_REQ_OR_DREQ << port;
+
+ if (val == 0)
+ reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
+
+ dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+}
+
+void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
+{
+ int comm_index = ppe_common->comm_index;
+ struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
+ u32 reg_val;
+ u32 reg_addr;
+
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ reg_val = RESET_REQ_OR_DREQ;
+ if (val == 0)
+ reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG;
+
+ } else {
+ reg_val = 0x100 << (comm_index - 1);
+
+ if (val == 0)
+ reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
+ }
+
+ dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+}
+
+/**
+ * hns_mac_get_sds_mode - get phy ifterface form serdes mode
+ * @mac_cb: mac control block
+ * retuen phy interface
+ */
+phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
+{
+ u32 hilink3_mode;
+ u32 hilink4_mode;
+ void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr;
+ int dev_id = mac_cb->mac_id;
+ phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
+
+ hilink3_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK3_REG);
+ hilink4_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK4_REG);
+ if (dev_id >= 0 && dev_id <= 3) {
+ if (hilink4_mode == 0)
+ phy_if = PHY_INTERFACE_MODE_SGMII;
+ else
+ phy_if = PHY_INTERFACE_MODE_XGMII;
+ } else if (dev_id >= 4 && dev_id <= 5) {
+ if (hilink3_mode == 0)
+ phy_if = PHY_INTERFACE_MODE_SGMII;
+ else
+ phy_if = PHY_INTERFACE_MODE_XGMII;
+ } else {
+ phy_if = PHY_INTERFACE_MODE_SGMII;
+ }
+
+ dev_dbg(mac_cb->dev,
+ "hilink3_mode=%d, hilink4_mode=%d dev_id=%d, phy_if=%d\n",
+ hilink3_mode, hilink4_mode, dev_id, phy_if);
+ return phy_if;
+}
+
+/**
+ * hns_mac_config_sds_loopback - set loop back for serdes
+ * @mac_cb: mac control block
+ * retuen 0 == success
+ */
+int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
+{
+ /* port 0-3 hilink4 base is serdes_vaddr + 0x00280000
+ * port 4-7 hilink3 base is serdes_vaddr + 0x00200000
+ */
+ u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
+ (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
+ const u8 lane_id[] = {
+ 0, /* mac 0 -> lane 0 */
+ 1, /* mac 1 -> lane 1 */
+ 2, /* mac 2 -> lane 2 */
+ 3, /* mac 3 -> lane 3 */
+ 2, /* mac 4 -> lane 2 */
+ 3, /* mac 5 -> lane 3 */
+ 0, /* mac 6 -> lane 0 */
+ 1 /* mac 7 -> lane 1 */
+ };
+#define RX_CSR(lane, reg) ((0x4080 + (reg) * 0x0002 + (lane) * 0x0200) * 2)
+ u64 reg_offset = RX_CSR(lane_id[mac_cb->mac_id], 0);
+
+ int sfp_prsnt;
+ int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
+
+ if (!mac_cb->phy_node) {
+ if (ret)
+ pr_info("please confirm sfp is present or not\n");
+ else
+ if (!sfp_prsnt)
+ pr_info("no sfp in this eth\n");
+ }
+
+ dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
new file mode 100644
index 000000000000..419f07aa9734
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_DSAF_MISC_H
+#define _HNS_DSAF_MISC_H
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "hns_dsaf_mac.h"
+
+#define CPLD_ADDR_PORT_OFFSET 0x4
+
+#define HS_LED_ON 0xE
+#define HS_LED_OFF 0xF
+
+#define CPLD_LED_ON_VALUE 1
+#define CPLD_LED_DEFAULT_VALUE 0
+
+#define MAC_SFP_PORT_OFFSET 0x2
+
+#define DSAF_LED_SPEED_S 0
+#define DSAF_LED_SPEED_M (0x3 << DSAF_LED_SPEED_S)
+
+#define DSAF_LED_LINK_B 2
+#define DSAF_LED_DATA_B 4
+#define DSAF_LED_ANCHOR_B 5
+
+void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data);
+void cpld_led_reset(struct hns_mac_cb *mac_cb);
+int cpld_set_led_id(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status);
+int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
new file mode 100644
index 000000000000..67f33f185a44
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -0,0 +1,583 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include "hns_dsaf_ppe.h"
+
+static void __iomem *hns_ppe_common_get_ioaddr(
+ struct ppe_common_cb *ppe_common)
+{
+ void __iomem *base_addr;
+
+ int idx = ppe_common->comm_index;
+
+ if (idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ base_addr = ppe_common->dsaf_dev->ppe_base
+ + PPE_COMMON_REG_OFFSET;
+ else
+ base_addr = ppe_common->dsaf_dev->sds_base
+ + (idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
+ + PPE_COMMON_REG_OFFSET;
+
+ return base_addr;
+}
+
+/**
+ * hns_ppe_common_get_cfg - get ppe common config
+ * @dsaf_dev: dasf device
+ * comm_index: common index
+ * retuen 0 - success , negative --fail
+ */
+int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
+{
+ struct ppe_common_cb *ppe_common;
+ int ppe_num;
+
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ ppe_num = HNS_PPE_SERVICE_NW_ENGINE_NUM;
+ else
+ ppe_num = HNS_PPE_DEBUG_NW_ENGINE_NUM;
+
+ ppe_common = devm_kzalloc(dsaf_dev->dev, sizeof(*ppe_common) +
+ ppe_num * sizeof(struct hns_ppe_cb), GFP_KERNEL);
+ if (!ppe_common)
+ return -ENOMEM;
+
+ ppe_common->ppe_num = ppe_num;
+ ppe_common->dsaf_dev = dsaf_dev;
+ ppe_common->comm_index = comm_index;
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ ppe_common->ppe_mode = PPE_COMMON_MODE_SERVICE;
+ else
+ ppe_common->ppe_mode = PPE_COMMON_MODE_DEBUG;
+ ppe_common->dev = dsaf_dev->dev;
+
+ ppe_common->io_base = hns_ppe_common_get_ioaddr(ppe_common);
+
+ dsaf_dev->ppe_common[comm_index] = ppe_common;
+
+ return 0;
+}
+
+void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
+{
+ dsaf_dev->ppe_common[comm_index] = NULL;
+}
+
+static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
+ int ppe_idx)
+{
+ void __iomem *base_addr;
+ int common_idx = ppe_common->comm_index;
+
+ if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
+ base_addr = ppe_common->dsaf_dev->ppe_base +
+ ppe_idx * PPE_REG_OFFSET;
+
+ } else {
+ base_addr = ppe_common->dsaf_dev->sds_base +
+ (common_idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET;
+ }
+
+ return base_addr;
+}
+
+static int hns_ppe_get_port(struct ppe_common_cb *ppe_common, int idx)
+{
+ int port;
+
+ if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE)
+ port = idx;
+ else
+ port = HNS_PPE_SERVICE_NW_ENGINE_NUM
+ + ppe_common->comm_index - 1;
+
+ return port;
+}
+
+static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common)
+{
+ u32 i;
+ struct hns_ppe_cb *ppe_cb;
+ u32 ppe_num = ppe_common->ppe_num;
+
+ for (i = 0; i < ppe_num; i++) {
+ ppe_cb = &ppe_common->ppe_cb[i];
+ ppe_cb->dev = ppe_common->dev;
+ ppe_cb->next = NULL;
+ ppe_cb->ppe_common_cb = ppe_common;
+ ppe_cb->index = i;
+ ppe_cb->port = hns_ppe_get_port(ppe_common, i);
+ ppe_cb->io_base = hns_ppe_get_iobase(ppe_common, i);
+ ppe_cb->virq = 0;
+ }
+}
+
+static void hns_ppe_cnt_clr_ce(struct hns_ppe_cb *ppe_cb)
+{
+ dsaf_set_dev_bit(ppe_cb, PPE_TNL_0_5_CNT_CLR_CE_REG,
+ PPE_CNT_CLR_CE_B, 1);
+}
+
+/**
+ * hns_ppe_checksum_hw - set ppe checksum caculate
+ * @ppe_device: ppe device
+ * @value: value
+ */
+static void hns_ppe_checksum_hw(struct hns_ppe_cb *ppe_cb, u32 value)
+{
+ dsaf_set_dev_field(ppe_cb, PPE_CFG_PRO_CHECK_EN_REG,
+ 0xfffffff, 0, value);
+}
+
+static void hns_ppe_set_qid_mode(struct ppe_common_cb *ppe_common,
+ enum ppe_qid_mode qid_mdoe)
+{
+ dsaf_set_dev_field(ppe_common, PPE_COM_CFG_QID_MODE_REG,
+ PPE_CFG_QID_MODE_CF_QID_MODE_M,
+ PPE_CFG_QID_MODE_CF_QID_MODE_S, qid_mdoe);
+}
+
+/**
+ * hns_ppe_set_qid - set ppe qid
+ * @ppe_common: ppe common device
+ * @qid: queue id
+ */
+static void hns_ppe_set_qid(struct ppe_common_cb *ppe_common, u32 qid)
+{
+ u32 qid_mod = dsaf_read_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG);
+
+ if (!dsaf_get_field(qid_mod, PPE_CFG_QID_MODE_DEF_QID_M,
+ PPE_CFG_QID_MODE_DEF_QID_S)) {
+ dsaf_set_field(qid_mod, PPE_CFG_QID_MODE_DEF_QID_M,
+ PPE_CFG_QID_MODE_DEF_QID_S, qid);
+ dsaf_write_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG, qid_mod);
+ }
+}
+
+/**
+ * hns_ppe_set_port_mode - set port mode
+ * @ppe_device: ppe device
+ * @mode: port mode
+ */
+static void hns_ppe_set_port_mode(struct hns_ppe_cb *ppe_cb,
+ enum ppe_port_mode mode)
+{
+ dsaf_write_dev(ppe_cb, PPE_CFG_XGE_MODE_REG, mode);
+}
+
+/**
+ * hns_ppe_common_init_hw - init ppe common device
+ * @ppe_common: ppe common device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common)
+{
+ enum ppe_qid_mode qid_mode;
+ enum dsaf_mode dsaf_mode = ppe_common->dsaf_dev->dsaf_mode;
+
+ hns_ppe_com_srst(ppe_common, 0);
+ mdelay(100);
+ hns_ppe_com_srst(ppe_common, 1);
+ mdelay(100);
+
+ if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
+ switch (dsaf_mode) {
+ case DSAF_MODE_ENABLE_FIX:
+ case DSAF_MODE_DISABLE_FIX:
+ qid_mode = PPE_QID_MODE0;
+ hns_ppe_set_qid(ppe_common, 0);
+ break;
+ case DSAF_MODE_ENABLE_0VM:
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ qid_mode = PPE_QID_MODE3;
+ break;
+ case DSAF_MODE_ENABLE_8VM:
+ case DSAF_MODE_DISABLE_2PORT_16VM:
+ qid_mode = PPE_QID_MODE4;
+ break;
+ case DSAF_MODE_ENABLE_16VM:
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ qid_mode = PPE_QID_MODE5;
+ break;
+ case DSAF_MODE_ENABLE_32VM:
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ qid_mode = PPE_QID_MODE2;
+ break;
+ case DSAF_MODE_ENABLE_128VM:
+ case DSAF_MODE_DISABLE_6PORT_4VM:
+ qid_mode = PPE_QID_MODE1;
+ break;
+ case DSAF_MODE_DISABLE_2PORT_8VM:
+ qid_mode = PPE_QID_MODE7;
+ break;
+ case DSAF_MODE_DISABLE_6PORT_2VM:
+ qid_mode = PPE_QID_MODE6;
+ break;
+ default:
+ dev_err(ppe_common->dev,
+ "get ppe queue mode failed! dsaf_mode=%d\n",
+ dsaf_mode);
+ return -EINVAL;
+ }
+ hns_ppe_set_qid_mode(ppe_common, qid_mode);
+ }
+
+ dsaf_set_dev_bit(ppe_common, PPE_COM_COMMON_CNT_CLR_CE_REG,
+ PPE_COMMON_CNT_CLR_CE_B, 1);
+
+ return 0;
+}
+
+/*clr ppe exception irq*/
+static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
+{
+ u32 clr_vlue = 0xfffffffful;
+ u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/
+ u32 vld_msk = 0;
+
+ /*only care bit 0,1,7*/
+ dsaf_set_bit(vld_msk, 0, 1);
+ dsaf_set_bit(vld_msk, 1, 1);
+ dsaf_set_bit(vld_msk, 7, 1);
+
+ /*clr sts**/
+ dsaf_write_dev(ppe_cb, PPE_RINT_REG, clr_vlue);
+
+ /*for some reserved bits, so set 0**/
+ dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk);
+}
+
+/**
+ * ppe_init_hw - init ppe
+ * @ppe_device: ppe device
+ */
+static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
+{
+ struct ppe_common_cb *ppe_common_cb = ppe_cb->ppe_common_cb;
+ u32 port = ppe_cb->port;
+ struct dsaf_device *dsaf_dev = ppe_common_cb->dsaf_dev;
+
+ hns_ppe_srst_by_port(dsaf_dev, port, 0);
+ mdelay(10);
+ hns_ppe_srst_by_port(dsaf_dev, port, 1);
+
+ /* clr and msk except irq*/
+ hns_ppe_exc_irq_en(ppe_cb, 0);
+
+ if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG)
+ hns_ppe_set_port_mode(ppe_cb, PPE_MODE_GE);
+ else
+ hns_ppe_set_port_mode(ppe_cb, PPE_MODE_XGE);
+ hns_ppe_checksum_hw(ppe_cb, 0xffffffff);
+ hns_ppe_cnt_clr_ce(ppe_cb);
+}
+
+/**
+ * ppe_uninit_hw - uninit ppe
+ * @ppe_device: ppe device
+ */
+static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
+{
+ u32 port;
+
+ if (ppe_cb->ppe_common_cb) {
+ port = ppe_cb->index;
+ hns_ppe_srst_by_port(ppe_cb->ppe_common_cb->dsaf_dev, port, 0);
+ }
+}
+
+void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
+{
+ u32 i;
+
+ for (i = 0; i < ppe_common->ppe_num; i++) {
+ hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]);
+ memset(&ppe_common->ppe_cb[i], 0, sizeof(struct hns_ppe_cb));
+ }
+}
+
+void hns_ppe_uninit(struct dsaf_device *dsaf_dev)
+{
+ u32 i;
+
+ for (i = 0; i < HNS_PPE_COM_NUM; i++) {
+ if (dsaf_dev->ppe_common[i])
+ hns_ppe_uninit_ex(dsaf_dev->ppe_common[i]);
+ hns_rcb_common_free_cfg(dsaf_dev, i);
+ hns_ppe_common_free_cfg(dsaf_dev, i);
+ }
+}
+
+/**
+ * hns_ppe_reset - reinit ppe/rcb hw
+ * @dsaf_dev: dasf device
+ * retuen void
+ */
+void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index)
+{
+ u32 i;
+ int ret;
+ struct ppe_common_cb *ppe_common;
+
+ ppe_common = dsaf_dev->ppe_common[ppe_common_index];
+ ret = hns_ppe_common_init_hw(ppe_common);
+ if (ret)
+ return;
+
+ ret = hns_rcb_common_init_hw(dsaf_dev->rcb_common[ppe_common_index]);
+ if (ret)
+ return;
+
+ for (i = 0; i < ppe_common->ppe_num; i++)
+ hns_ppe_init_hw(&ppe_common->ppe_cb[i]);
+
+ hns_rcb_common_init_commit_hw(dsaf_dev->rcb_common[ppe_common_index]);
+}
+
+void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb)
+{
+ struct hns_ppe_hw_stats *hw_stats = &ppe_cb->hw_stats;
+
+ hw_stats->rx_pkts_from_sw
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_SW_PKT_CNT_REG);
+ hw_stats->rx_pkts
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG);
+ hw_stats->rx_drop_no_bd
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_NO_BUF_CNT_REG);
+ hw_stats->rx_alloc_buf_fail
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_FAIL_CNT_REG);
+ hw_stats->rx_alloc_buf_wait
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_WAIT_CNT_REG);
+ hw_stats->rx_drop_no_buf
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_FUL_CNT_REG);
+ hw_stats->rx_err_fifo_full
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_PRT_CNT_REG);
+
+ hw_stats->tx_bd_form_rcb
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_BD_CNT_REG);
+ hw_stats->tx_pkts_from_rcb
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CNT_REG);
+ hw_stats->tx_pkts
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_OK_CNT_REG);
+ hw_stats->tx_err_fifo_empty
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_EPT_CNT_REG);
+ hw_stats->tx_err_checksum
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CS_FAIL_CNT_REG);
+}
+
+int hns_ppe_get_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return ETH_PPE_STATIC_NUM;
+ return 0;
+}
+
+int hns_ppe_get_regs_count(void)
+{
+ return ETH_PPE_DUMP_NUM;
+}
+
+/**
+ * ppe_get_strings - get ppe srting
+ * @ppe_device: ppe device
+ * @stringset: string set type
+ * @data: output string
+ */
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data)
+{
+ char *buff = (char *)data;
+ int index = ppe_cb->index;
+
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_sw_pkt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_ok", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_drop_pkt_no_bd", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_alloc_buf_fail", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_alloc_buf_wait", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_drop_no_buf", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_err_fifo_full", index);
+ buff = buff + ETH_GSTRING_LEN;
+
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_bd", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_ok", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_err_fifo_empty", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_err_csum_fail", index);
+}
+
+void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data)
+{
+ u64 *regs_buff = data;
+ struct hns_ppe_hw_stats *hw_stats = &ppe_cb->hw_stats;
+
+ regs_buff[0] = hw_stats->rx_pkts_from_sw;
+ regs_buff[1] = hw_stats->rx_pkts;
+ regs_buff[2] = hw_stats->rx_drop_no_bd;
+ regs_buff[3] = hw_stats->rx_alloc_buf_fail;
+ regs_buff[4] = hw_stats->rx_alloc_buf_wait;
+ regs_buff[5] = hw_stats->rx_drop_no_buf;
+ regs_buff[6] = hw_stats->rx_err_fifo_full;
+
+ regs_buff[7] = hw_stats->tx_bd_form_rcb;
+ regs_buff[8] = hw_stats->tx_pkts_from_rcb;
+ regs_buff[9] = hw_stats->tx_pkts;
+ regs_buff[10] = hw_stats->tx_err_fifo_empty;
+ regs_buff[11] = hw_stats->tx_err_checksum;
+}
+
+/**
+ * hns_ppe_init - init ppe device
+ * @dsaf_dev: dasf device
+ * retuen 0 - success , negative --fail
+ */
+int hns_ppe_init(struct dsaf_device *dsaf_dev)
+{
+ int i, k;
+ int ret;
+
+ for (i = 0; i < HNS_PPE_COM_NUM; i++) {
+ ret = hns_ppe_common_get_cfg(dsaf_dev, i);
+ if (ret)
+ goto get_ppe_cfg_fail;
+
+ ret = hns_rcb_common_get_cfg(dsaf_dev, i);
+ if (ret)
+ goto get_rcb_cfg_fail;
+
+ hns_ppe_get_cfg(dsaf_dev->ppe_common[i]);
+
+ hns_rcb_get_cfg(dsaf_dev->rcb_common[i]);
+ }
+
+ for (i = 0; i < HNS_PPE_COM_NUM; i++)
+ hns_ppe_reset_common(dsaf_dev, i);
+
+ return 0;
+
+get_rcb_cfg_fail:
+ hns_ppe_common_free_cfg(dsaf_dev, i);
+get_ppe_cfg_fail:
+ for (k = i - 1; k >= 0; k--) {
+ hns_rcb_common_free_cfg(dsaf_dev, k);
+ hns_ppe_common_free_cfg(dsaf_dev, k);
+ }
+ return ret;
+}
+
+void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data)
+{
+ struct ppe_common_cb *ppe_common = ppe_cb->ppe_common_cb;
+ u32 *regs = data;
+ u32 i;
+ u32 offset;
+
+ /* ppe common registers */
+ regs[0] = dsaf_read_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG);
+ regs[1] = dsaf_read_dev(ppe_common, PPE_COM_INTEN_REG);
+ regs[2] = dsaf_read_dev(ppe_common, PPE_COM_RINT_REG);
+ regs[3] = dsaf_read_dev(ppe_common, PPE_COM_INTSTS_REG);
+ regs[4] = dsaf_read_dev(ppe_common, PPE_COM_COMMON_CNT_CLR_CE_REG);
+
+ for (i = 0; i < DSAF_TOTAL_QUEUE_NUM; i++) {
+ offset = PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 0x4 * i;
+ regs[5 + i] = dsaf_read_dev(ppe_common, offset);
+ offset = PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 0x4 * i;
+ regs[5 + i + DSAF_TOTAL_QUEUE_NUM]
+ = dsaf_read_dev(ppe_common, offset);
+ offset = PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 0x4 * i;
+ regs[5 + i + DSAF_TOTAL_QUEUE_NUM * 2]
+ = dsaf_read_dev(ppe_common, offset);
+ offset = PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 0x4 * i;
+ regs[5 + i + DSAF_TOTAL_QUEUE_NUM * 3]
+ = dsaf_read_dev(ppe_common, offset);
+ }
+
+ /* mark end of ppe regs */
+ for (i = 521; i < 524; i++)
+ regs[i] = 0xeeeeeeee;
+
+ /* ppe channel registers */
+ regs[525] = dsaf_read_dev(ppe_cb, PPE_CFG_TX_FIFO_THRSLD_REG);
+ regs[526] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_THRSLD_REG);
+ regs[527] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_PAUSE_THRSLD_REG);
+ regs[528] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_SW_BP_THRSLD_REG);
+ regs[529] = dsaf_read_dev(ppe_cb, PPE_CFG_PAUSE_IDLE_CNT_REG);
+ regs[530] = dsaf_read_dev(ppe_cb, PPE_CFG_BUS_CTRL_REG);
+ regs[531] = dsaf_read_dev(ppe_cb, PPE_CFG_TNL_TO_BE_RST_REG);
+ regs[532] = dsaf_read_dev(ppe_cb, PPE_CURR_TNL_CAN_RST_REG);
+
+ regs[533] = dsaf_read_dev(ppe_cb, PPE_CFG_XGE_MODE_REG);
+ regs[534] = dsaf_read_dev(ppe_cb, PPE_CFG_MAX_FRAME_LEN_REG);
+ regs[535] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_PKT_MODE_REG);
+ regs[536] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_VLAN_TAG_REG);
+ regs[537] = dsaf_read_dev(ppe_cb, PPE_CFG_TAG_GEN_REG);
+ regs[538] = dsaf_read_dev(ppe_cb, PPE_CFG_PARSE_TAG_REG);
+ regs[539] = dsaf_read_dev(ppe_cb, PPE_CFG_PRO_CHECK_EN_REG);
+
+ regs[540] = dsaf_read_dev(ppe_cb, PPE_INTEN_REG);
+ regs[541] = dsaf_read_dev(ppe_cb, PPE_RINT_REG);
+ regs[542] = dsaf_read_dev(ppe_cb, PPE_INTSTS_REG);
+ regs[543] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_PKT_INT_REG);
+
+ regs[544] = dsaf_read_dev(ppe_cb, PPE_CFG_HEAT_DECT_TIME0_REG);
+ regs[545] = dsaf_read_dev(ppe_cb, PPE_CFG_HEAT_DECT_TIME1_REG);
+
+ /* ppe static */
+ regs[546] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_SW_PKT_CNT_REG);
+ regs[547] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG);
+ regs[548] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_NO_BUF_CNT_REG);
+ regs[549] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_BD_CNT_REG);
+ regs[550] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CNT_REG);
+ regs[551] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_OK_CNT_REG);
+ regs[552] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_EPT_CNT_REG);
+ regs[553] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CS_FAIL_CNT_REG);
+ regs[554] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_FAIL_CNT_REG);
+ regs[555] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_WAIT_CNT_REG);
+ regs[556] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_FUL_CNT_REG);
+ regs[557] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_PRT_CNT_REG);
+
+ regs[558] = dsaf_read_dev(ppe_cb, PPE_TNL_0_5_CNT_CLR_CE_REG);
+ regs[559] = dsaf_read_dev(ppe_cb, PPE_CFG_AXI_DBG_REG);
+ regs[560] = dsaf_read_dev(ppe_cb, PPE_HIS_PRO_ERR_REG);
+ regs[561] = dsaf_read_dev(ppe_cb, PPE_HIS_TNL_FIFO_ERR_REG);
+ regs[562] = dsaf_read_dev(ppe_cb, PPE_CURR_CFF_DATA_NUM_REG);
+ regs[563] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_ST_REG);
+ regs[564] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_ST_REG);
+ regs[565] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_FIFO0_REG);
+ regs[566] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_FIFO1_REG);
+ regs[567] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG);
+ regs[568] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO1_REG);
+ regs[569] = dsaf_read_dev(ppe_cb, PPE_ECO0_REG);
+ regs[570] = dsaf_read_dev(ppe_cb, PPE_ECO1_REG);
+ regs[571] = dsaf_read_dev(ppe_cb, PPE_ECO2_REG);
+
+ /* mark end of ppe regs */
+ for (i = 572; i < 576; i++)
+ regs[i] = 0xeeeeeeee;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
new file mode 100644
index 000000000000..4894f9a0d39f
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_DSAF_PPE_H
+#define _HNS_DSAF_PPE_H
+
+#include <linux/platform_device.h>
+
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_rcb.h"
+
+#define HNS_PPE_SERVICE_NW_ENGINE_NUM DSAF_COMM_CHN
+#define HNS_PPE_DEBUG_NW_ENGINE_NUM 1
+#define HNS_PPE_COM_NUM DSAF_COMM_DEV_NUM
+
+#define PPE_COMMON_REG_OFFSET 0x70000
+#define PPE_REG_OFFSET 0x10000
+
+#define ETH_PPE_DUMP_NUM 576
+#define ETH_PPE_STATIC_NUM 12
+enum ppe_qid_mode {
+ PPE_QID_MODE0 = 0, /* fixed queue id mode */
+ PPE_QID_MODE1, /* switch:128VM non switch:6Port/4VM/4TC */
+ PPE_QID_MODE2, /* switch:32VM/4TC non switch:6Port/16VM */
+ PPE_QID_MODE3, /* switch:4TC/8TAG non switch:2Port/64VM */
+ PPE_QID_MODE4, /* switch:8VM/16TAG non switch:2Port/16VM/4TC */
+ PPE_QID_MODE5, /* non switch:6Port/16TAG */
+ PPE_QID_MODE6, /* non switch:6Port/2VM/8TC */
+ PPE_QID_MODE7, /* non switch:2Port/8VM/8TC */
+};
+
+enum ppe_port_mode {
+ PPE_MODE_GE = 0,
+ PPE_MODE_XGE,
+};
+
+enum ppe_common_mode {
+ PPE_COMMON_MODE_DEBUG = 0,
+ PPE_COMMON_MODE_SERVICE,
+ PPE_COMMON_MODE_MAX
+};
+
+struct hns_ppe_hw_stats {
+ u64 rx_pkts_from_sw;
+ u64 rx_pkts;
+ u64 rx_drop_no_bd;
+ u64 rx_alloc_buf_fail;
+ u64 rx_alloc_buf_wait;
+ u64 rx_drop_no_buf;
+ u64 rx_err_fifo_full;
+ u64 tx_bd_form_rcb;
+ u64 tx_pkts_from_rcb;
+ u64 tx_pkts;
+ u64 tx_err_fifo_empty;
+ u64 tx_err_checksum;
+};
+
+struct hns_ppe_cb {
+ struct device *dev;
+ struct hns_ppe_cb *next; /* pointer to next ppe device */
+ struct ppe_common_cb *ppe_common_cb; /* belong to */
+ struct hns_ppe_hw_stats hw_stats;
+
+ u8 index; /* index in a ppe common device */
+ u8 port; /* port id in dsaf */
+ void __iomem *io_base;
+ int virq;
+};
+
+struct ppe_common_cb {
+ struct device *dev;
+ struct dsaf_device *dsaf_dev;
+ void __iomem *io_base;
+
+ enum ppe_common_mode ppe_mode;
+
+ u8 comm_index; /*ppe_common index*/
+
+ u32 ppe_num;
+ struct hns_ppe_cb ppe_cb[0];
+
+};
+
+int hns_ppe_init(struct dsaf_device *dsaf_dev);
+
+void hns_ppe_uninit(struct dsaf_device *dsaf_dev);
+
+void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index);
+
+void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb);
+
+int hns_ppe_get_sset_count(int stringset);
+int hns_ppe_get_regs_count(void);
+void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data);
+
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data);
+void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data);
+#endif /* _HNS_DSAF_PPE_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
new file mode 100644
index 000000000000..4db32c62f062
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -0,0 +1,1021 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <asm/cacheflush.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_rcb.h"
+
+#define RCB_COMMON_REG_OFFSET 0x80000
+#define TX_RING 0
+#define RX_RING 1
+
+#define RCB_RESET_WAIT_TIMES 30
+#define RCB_RESET_TRY_TIMES 10
+
+/**
+ *hns_rcb_wait_fbd_clean - clean fbd
+ *@qs: ring struct pointer array
+ *@qnum: num of array
+ *@flag: tx or rx flag
+ */
+void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
+{
+ int i, wait_cnt;
+ u32 fbd_num;
+
+ for (wait_cnt = i = 0; i < q_num; wait_cnt++) {
+ usleep_range(200, 300);
+ fbd_num = 0;
+ if (flag & RCB_INT_FLAG_TX)
+ fbd_num += dsaf_read_dev(qs[i],
+ RCB_RING_TX_RING_FBDNUM_REG);
+ if (flag & RCB_INT_FLAG_RX)
+ fbd_num += dsaf_read_dev(qs[i],
+ RCB_RING_RX_RING_FBDNUM_REG);
+ if (!fbd_num)
+ i++;
+ if (wait_cnt >= 10000)
+ break;
+ }
+
+ if (i < q_num)
+ dev_err(qs[i]->handle->owner_dev,
+ "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
+}
+
+/**
+ *hns_rcb_reset_ring_hw - ring reset
+ *@q: ring struct pointer
+ */
+void hns_rcb_reset_ring_hw(struct hnae_queue *q)
+{
+ u32 wait_cnt;
+ u32 try_cnt = 0;
+ u32 could_ret;
+
+ u32 tx_fbd_num;
+
+ while (try_cnt++ < RCB_RESET_TRY_TIMES) {
+ usleep_range(100, 200);
+ tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG);
+ if (tx_fbd_num)
+ continue;
+
+ dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0);
+
+ dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
+
+ msleep(20);
+ could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
+
+ wait_cnt = 0;
+ while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) {
+ dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
+
+ dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
+
+ msleep(20);
+ could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
+
+ wait_cnt++;
+ }
+
+ dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
+
+ if (could_ret)
+ break;
+ }
+
+ if (try_cnt >= RCB_RESET_TRY_TIMES)
+ dev_err(q->dev->dev, "port%d reset ring fail\n",
+ hns_ae_get_vf_cb(q->handle)->port_index);
+}
+
+/**
+ *hns_rcb_int_ctrl_hw - rcb irq enable control
+ *@q: hnae queue struct pointer
+ *@flag:ring flag tx or rx
+ *@mask:mask
+ */
+void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
+{
+ u32 int_mask_en = !!mask;
+
+ if (flag & RCB_INT_FLAG_TX) {
+ dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en);
+ dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG,
+ int_mask_en);
+ }
+
+ if (flag & RCB_INT_FLAG_RX) {
+ dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en);
+ dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG,
+ int_mask_en);
+ }
+}
+
+void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag)
+{
+ u32 clr = 1;
+
+ if (flag & RCB_INT_FLAG_TX) {
+ dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, clr);
+ dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, clr);
+ }
+
+ if (flag & RCB_INT_FLAG_RX) {
+ dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, clr);
+ dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, clr);
+ }
+}
+
+/**
+ *hns_rcb_ring_enable_hw - enable ring
+ *@ring: rcb ring
+ */
+void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
+{
+ dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val);
+}
+
+void hns_rcb_start(struct hnae_queue *q, u32 val)
+{
+ hns_rcb_ring_enable_hw(q, val);
+}
+
+/**
+ *hns_rcb_common_init_commit_hw - make rcb common init completed
+ *@rcb_common: rcb common device
+ */
+void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common)
+{
+ wmb(); /* Sync point before breakpoint */
+ dsaf_write_dev(rcb_common, RCB_COM_CFG_SYS_FSH_REG, 1);
+ wmb(); /* Sync point after breakpoint */
+}
+
+/**
+ *hns_rcb_ring_init - init rcb ring
+ *@ring_pair: ring pair control block
+ *@ring_type: ring type, RX_RING or TX_RING
+ */
+static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
+{
+ struct hnae_queue *q = &ring_pair->q;
+ struct rcb_common_cb *rcb_common = ring_pair->rcb_common;
+ u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type;
+ struct hnae_ring *ring =
+ (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring;
+ dma_addr_t dma = ring->desc_dma_addr;
+
+ if (ring_type == RX_RING) {
+ dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG,
+ (u32)dma);
+ dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG,
+ (u32)((dma >> 31) >> 1));
+ dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
+ bd_size_type);
+ dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
+ ring_pair->port_id_in_dsa);
+ dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
+ ring_pair->port_id_in_dsa);
+ } else {
+ dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG,
+ (u32)dma);
+ dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG,
+ (u32)((dma >> 31) >> 1));
+ dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
+ bd_size_type);
+ dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
+ ring_pair->port_id_in_dsa);
+ dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
+ ring_pair->port_id_in_dsa);
+ }
+}
+
+/**
+ *hns_rcb_init_hw - init rcb hardware
+ *@ring: rcb ring
+ */
+void hns_rcb_init_hw(struct ring_pair_cb *ring)
+{
+ hns_rcb_ring_init(ring, RX_RING);
+ hns_rcb_ring_init(ring, TX_RING);
+}
+
+/**
+ *hns_rcb_set_port_desc_cnt - set rcb port description num
+ *@rcb_common: rcb_common device
+ *@port_idx:port index
+ *@desc_cnt:BD num
+ */
+static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
+ u32 port_idx, u32 desc_cnt)
+{
+ if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM)
+ port_idx = 0;
+
+ dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4,
+ desc_cnt);
+}
+
+/**
+ *hns_rcb_set_port_coalesced_frames - set rcb port coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port index
+ *@coalesced_frames:BD num for coalesced frames
+ */
+static int hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common,
+ u32 port_idx,
+ u32 coalesced_frames)
+{
+ if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM)
+ port_idx = 0;
+ if (coalesced_frames >= rcb_common->desc_num ||
+ coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES)
+ return -EINVAL;
+
+ dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
+ coalesced_frames);
+ return 0;
+}
+
+/**
+ *hns_rcb_get_port_coalesced_frames - set rcb port coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port index
+ * return coaleseced frames value
+ */
+static u32 hns_rcb_get_port_coalesced_frames(struct rcb_common_cb *rcb_common,
+ u32 port_idx)
+{
+ if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM)
+ port_idx = 0;
+
+ return dsaf_read_dev(rcb_common,
+ RCB_CFG_PKTLINE_REG + port_idx * 4);
+}
+
+/**
+ *hns_rcb_set_timeout - set rcb port coalesced time_out
+ *@rcb_common: rcb_common device
+ *@time_out:time for coalesced time_out
+ */
+static void hns_rcb_set_timeout(struct rcb_common_cb *rcb_common,
+ u32 timeout)
+{
+ dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout);
+}
+
+static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
+{
+ if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ return HNS_RCB_SERVICE_NW_ENGINE_NUM;
+ else
+ return HNS_RCB_DEBUG_NW_ENGINE_NUM;
+}
+
+/*clr rcb comm exception irq**/
+static void hns_rcb_comm_exc_irq_en(
+ struct rcb_common_cb *rcb_common, int en)
+{
+ u32 clr_vlue = 0xfffffffful;
+ u32 msk_vlue = en ? 0 : 0xfffffffful;
+
+ /* clr int*/
+ dsaf_write_dev(rcb_common, RCB_COM_INTSTS_ECC_ERR_REG, clr_vlue);
+
+ dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_RING_STS, clr_vlue);
+
+ dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_BD_RINT_STS, clr_vlue);
+
+ dsaf_write_dev(rcb_common, RCB_COM_RINT_TX_PKT_REG, clr_vlue);
+ dsaf_write_dev(rcb_common, RCB_COM_AXI_ERR_STS, clr_vlue);
+
+ /*en msk*/
+ dsaf_write_dev(rcb_common, RCB_COM_INTMASK_ECC_ERR_REG, msk_vlue);
+
+ dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_RING, msk_vlue);
+
+ /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/
+ dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_BD, msk_vlue | 2);
+
+ dsaf_write_dev(rcb_common, RCB_COM_INTMSK_TX_PKT_REG, msk_vlue);
+ dsaf_write_dev(rcb_common, RCB_COM_AXI_WR_ERR_INTMASK, msk_vlue);
+}
+
+/**
+ *hns_rcb_common_init_hw - init rcb common hardware
+ *@rcb_common: rcb_common device
+ *retuen 0 - success , negative --fail
+ */
+int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
+{
+ u32 reg_val;
+ int i;
+ int port_num = hns_rcb_common_get_port_num(rcb_common);
+
+ hns_rcb_comm_exc_irq_en(rcb_common, 0);
+
+ reg_val = dsaf_read_dev(rcb_common, RCB_COM_CFG_INIT_FLAG_REG);
+ if (0x1 != (reg_val & 0x1)) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val);
+ return -EBUSY;
+ }
+
+ for (i = 0; i < port_num; i++) {
+ hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
+ (void)hns_rcb_set_port_coalesced_frames(
+ rcb_common, i, rcb_common->coalesced_frames);
+ }
+ hns_rcb_set_timeout(rcb_common, rcb_common->timeout);
+
+ dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG,
+ HNS_RCB_COMMON_ENDIAN);
+
+ return 0;
+}
+
+int hns_rcb_buf_size2type(u32 buf_size)
+{
+ int bd_size_type;
+
+ switch (buf_size) {
+ case 512:
+ bd_size_type = HNS_BD_SIZE_512_TYPE;
+ break;
+ case 1024:
+ bd_size_type = HNS_BD_SIZE_1024_TYPE;
+ break;
+ case 2048:
+ bd_size_type = HNS_BD_SIZE_2048_TYPE;
+ break;
+ case 4096:
+ bd_size_type = HNS_BD_SIZE_4096_TYPE;
+ break;
+ default:
+ bd_size_type = -EINVAL;
+ }
+
+ return bd_size_type;
+}
+
+static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
+{
+ struct hnae_ring *ring;
+ struct rcb_common_cb *rcb_common;
+ struct ring_pair_cb *ring_pair_cb;
+ u32 buf_size;
+ u16 desc_num;
+ int irq_idx;
+
+ ring_pair_cb = container_of(q, struct ring_pair_cb, q);
+ if (ring_type == RX_RING) {
+ ring = &q->rx_ring;
+ ring->io_base = ring_pair_cb->q.io_base;
+ irq_idx = HNS_RCB_IRQ_IDX_RX;
+ } else {
+ ring = &q->tx_ring;
+ ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base +
+ HNS_RCB_TX_REG_OFFSET;
+ irq_idx = HNS_RCB_IRQ_IDX_TX;
+ }
+
+ rcb_common = ring_pair_cb->rcb_common;
+ buf_size = rcb_common->dsaf_dev->buf_size;
+ desc_num = rcb_common->dsaf_dev->desc_num;
+
+ ring->desc = NULL;
+ ring->desc_cb = NULL;
+
+ ring->irq = ring_pair_cb->virq[irq_idx];
+ ring->desc_dma_addr = 0;
+
+ ring->buf_size = buf_size;
+ ring->desc_num = desc_num;
+ ring->max_desc_num_per_pkt = HNS_RCB_RING_MAX_BD_PER_PKT;
+ ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE;
+ ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE;
+ ring->next_to_use = 0;
+ ring->next_to_clean = 0;
+}
+
+static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
+{
+ ring_pair_cb->q.handle = NULL;
+
+ hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING);
+ hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING);
+}
+
+static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx)
+{
+ int comm_index = rcb_common->comm_index;
+ int port;
+ int q_num;
+
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn;
+ port = ring_idx / q_num;
+ } else {
+ port = HNS_RCB_SERVICE_NW_ENGINE_NUM + comm_index - 1;
+ }
+
+ return port;
+}
+
+static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common)
+{
+ int comm_index = rcb_common->comm_index;
+
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ return HNS_SERVICE_RING_IRQ_IDX;
+ else
+ return HNS_DEBUG_RING_IRQ_IDX + (comm_index - 1) * 2;
+}
+
+#define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
+ ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid))
+/**
+ *hns_rcb_get_cfg - get rcb config
+ *@rcb_common: rcb common device
+ */
+void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
+{
+ struct ring_pair_cb *ring_pair_cb;
+ u32 i;
+ u32 ring_num = rcb_common->ring_num;
+ int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common);
+ struct device_node *np = rcb_common->dsaf_dev->dev->of_node;
+
+ for (i = 0; i < ring_num; i++) {
+ ring_pair_cb = &rcb_common->ring_pair_cb[i];
+ ring_pair_cb->rcb_common = rcb_common;
+ ring_pair_cb->dev = rcb_common->dsaf_dev->dev;
+ ring_pair_cb->index = i;
+ ring_pair_cb->q.io_base =
+ RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i);
+ ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i);
+ ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX]
+ = irq_of_parse_and_map(np, base_irq_idx + i * 2);
+ ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX]
+ = irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1);
+ ring_pair_cb->q.phy_base =
+ RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i);
+ hns_rcb_ring_pair_get_cfg(ring_pair_cb);
+ }
+}
+
+/**
+ *hns_rcb_get_coalesced_frames - get rcb port coalesced frames
+ *@rcb_common: rcb_common device
+ *@comm_index:port index
+ *return coalesced_frames
+ */
+u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int port)
+{
+ int comm_index = hns_dsaf_get_comm_idx_by_port(port);
+ struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+
+ return hns_rcb_get_port_coalesced_frames(rcb_comm, port);
+}
+
+/**
+ *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
+ *@rcb_common: rcb_common device
+ *@comm_index:port index
+ *return time_out
+ */
+u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index)
+{
+ struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+
+ return rcb_comm->timeout;
+}
+
+/**
+ *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
+ *@rcb_common: rcb_common device
+ *@comm_index: comm :index
+ *@etx_usecs:tx time for coalesced time_out
+ *@rx_usecs:rx time for coalesced time_out
+ */
+void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev,
+ int port, u32 timeout)
+{
+ int comm_index = hns_dsaf_get_comm_idx_by_port(port);
+ struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+
+ if (rcb_comm->timeout == timeout)
+ return;
+
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ dev_err(dsaf_dev->dev,
+ "error: not support coalesce_usecs setting!\n");
+ return;
+ }
+ rcb_comm->timeout = timeout;
+ hns_rcb_set_timeout(rcb_comm, rcb_comm->timeout);
+}
+
+/**
+ *hns_rcb_set_coalesced_frames - set rcb coalesced frames
+ *@rcb_common: rcb_common device
+ *@tx_frames:tx BD num for coalesced frames
+ *@rx_frames:rx BD num for coalesced frames
+ *Return 0 on success, negative on failure
+ */
+int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev,
+ int port, u32 coalesced_frames)
+{
+ int comm_index = hns_dsaf_get_comm_idx_by_port(port);
+ struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+ u32 coalesced_reg_val;
+ int ret;
+
+ coalesced_reg_val = hns_rcb_get_port_coalesced_frames(rcb_comm, port);
+
+ if (coalesced_reg_val == coalesced_frames)
+ return 0;
+
+ if (coalesced_frames >= HNS_RCB_MIN_COALESCED_FRAMES) {
+ ret = hns_rcb_set_port_coalesced_frames(rcb_comm, port,
+ coalesced_frames);
+ return ret;
+ } else {
+ return -EINVAL;
+ }
+}
+
+/**
+ *hns_rcb_get_queue_mode - get max VM number and max ring number per VM
+ * accordding to dsaf mode
+ *@dsaf_mode: dsaf mode
+ *@max_vfn : max vfn number
+ *@max_q_per_vf:max ring number per vm
+ */
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
+ u16 *max_vfn, u16 *max_q_per_vf)
+{
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ switch (dsaf_mode) {
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ *max_vfn = 1;
+ *max_q_per_vf = 16;
+ break;
+ case DSAF_MODE_DISABLE_FIX:
+ *max_vfn = 1;
+ *max_q_per_vf = 1;
+ break;
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ *max_vfn = 64;
+ *max_q_per_vf = 1;
+ break;
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ *max_vfn = 16;
+ *max_q_per_vf = 1;
+ break;
+ default:
+ *max_vfn = 1;
+ *max_q_per_vf = 16;
+ break;
+ }
+ } else {
+ *max_vfn = 1;
+ *max_q_per_vf = 1;
+ }
+}
+
+int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev, int comm_index)
+{
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ switch (dsaf_dev->dsaf_mode) {
+ case DSAF_MODE_ENABLE_FIX:
+ return 1;
+
+ case DSAF_MODE_DISABLE_FIX:
+ return 6;
+
+ case DSAF_MODE_ENABLE_0VM:
+ return 32;
+
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ case DSAF_MODE_ENABLE_16VM:
+ case DSAF_MODE_DISABLE_6PORT_2VM:
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ case DSAF_MODE_DISABLE_6PORT_4VM:
+ case DSAF_MODE_ENABLE_8VM:
+ return 96;
+
+ case DSAF_MODE_DISABLE_2PORT_16VM:
+ case DSAF_MODE_DISABLE_2PORT_8VM:
+ case DSAF_MODE_ENABLE_32VM:
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ case DSAF_MODE_ENABLE_128VM:
+ return 128;
+
+ default:
+ dev_warn(dsaf_dev->dev,
+ "get ring num fail,use default!dsaf_mode=%d\n",
+ dsaf_dev->dsaf_mode);
+ return 128;
+ }
+ } else {
+ return 1;
+ }
+}
+
+void __iomem *hns_rcb_common_get_vaddr(struct dsaf_device *dsaf_dev,
+ int comm_index)
+{
+ void __iomem *base_addr;
+
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ base_addr = dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
+ else
+ base_addr = dsaf_dev->sds_base
+ + (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
+ + RCB_COMMON_REG_OFFSET;
+
+ return base_addr;
+}
+
+static phys_addr_t hns_rcb_common_get_paddr(struct dsaf_device *dsaf_dev,
+ int comm_index)
+{
+ struct device_node *np = dsaf_dev->dev->of_node;
+ phys_addr_t phy_addr;
+ const __be32 *tmp_addr;
+ u64 addr_offset = 0;
+ u64 size = 0;
+ int index = 0;
+
+ if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ index = 2;
+ addr_offset = RCB_COMMON_REG_OFFSET;
+ } else {
+ index = 1;
+ addr_offset = (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET +
+ RCB_COMMON_REG_OFFSET;
+ }
+ tmp_addr = of_get_address(np, index, &size, NULL);
+ phy_addr = of_translate_address(np, tmp_addr);
+ return phy_addr + addr_offset;
+}
+
+int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
+ int comm_index)
+{
+ struct rcb_common_cb *rcb_common;
+ enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
+ u16 max_vfn;
+ u16 max_q_per_vf;
+ int ring_num = hns_rcb_get_ring_num(dsaf_dev, comm_index);
+
+ rcb_common =
+ devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) +
+ ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL);
+ if (!rcb_common) {
+ dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n");
+ return -ENOMEM;
+ }
+ rcb_common->comm_index = comm_index;
+ rcb_common->ring_num = ring_num;
+ rcb_common->dsaf_dev = dsaf_dev;
+
+ rcb_common->desc_num = dsaf_dev->desc_num;
+ rcb_common->coalesced_frames = HNS_RCB_DEF_COALESCED_FRAMES;
+ rcb_common->timeout = HNS_RCB_MAX_TIME_OUT;
+
+ hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf);
+ rcb_common->max_vfn = max_vfn;
+ rcb_common->max_q_per_vf = max_q_per_vf;
+
+ rcb_common->io_base = hns_rcb_common_get_vaddr(dsaf_dev, comm_index);
+ rcb_common->phy_base = hns_rcb_common_get_paddr(dsaf_dev, comm_index);
+
+ dsaf_dev->rcb_common[comm_index] = rcb_common;
+ return 0;
+}
+
+void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev,
+ u32 comm_index)
+{
+ dsaf_dev->rcb_common[comm_index] = NULL;
+}
+
+void hns_rcb_update_stats(struct hnae_queue *queue)
+{
+ struct ring_pair_cb *ring =
+ container_of(queue, struct ring_pair_cb, q);
+ struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev;
+ struct ppe_common_cb *ppe_common
+ = dsaf_dev->ppe_common[ring->rcb_common->comm_index];
+ struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
+
+ hw_stats->rx_pkts += dsaf_read_dev(queue,
+ RCB_RING_RX_RING_PKTNUM_RECORD_REG);
+ dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1);
+
+ hw_stats->ppe_rx_ok_pkts += dsaf_read_dev(ppe_common,
+ PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index);
+ hw_stats->ppe_rx_drop_pkts += dsaf_read_dev(ppe_common,
+ PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index);
+
+ hw_stats->tx_pkts += dsaf_read_dev(queue,
+ RCB_RING_TX_RING_PKTNUM_RECORD_REG);
+ dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1);
+
+ hw_stats->ppe_tx_ok_pkts += dsaf_read_dev(ppe_common,
+ PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index);
+ hw_stats->ppe_tx_drop_pkts += dsaf_read_dev(ppe_common,
+ PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index);
+}
+
+/**
+ *hns_rcb_get_stats - get rcb statistic
+ *@ring: rcb ring
+ *@data:statistic value
+ */
+void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
+{
+ u64 *regs_buff = data;
+ struct ring_pair_cb *ring =
+ container_of(queue, struct ring_pair_cb, q);
+ struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
+
+ regs_buff[0] = hw_stats->tx_pkts;
+ regs_buff[1] = hw_stats->ppe_tx_ok_pkts;
+ regs_buff[2] = hw_stats->ppe_tx_drop_pkts;
+ regs_buff[3] =
+ dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
+
+ regs_buff[4] = queue->tx_ring.stats.tx_pkts;
+ regs_buff[5] = queue->tx_ring.stats.tx_bytes;
+ regs_buff[6] = queue->tx_ring.stats.tx_err_cnt;
+ regs_buff[7] = queue->tx_ring.stats.io_err_cnt;
+ regs_buff[8] = queue->tx_ring.stats.sw_err_cnt;
+ regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt;
+ regs_buff[10] = queue->tx_ring.stats.restart_queue;
+ regs_buff[11] = queue->tx_ring.stats.tx_busy;
+
+ regs_buff[12] = hw_stats->rx_pkts;
+ regs_buff[13] = hw_stats->ppe_rx_ok_pkts;
+ regs_buff[14] = hw_stats->ppe_rx_drop_pkts;
+ regs_buff[15] =
+ dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
+
+ regs_buff[16] = queue->rx_ring.stats.rx_pkts;
+ regs_buff[17] = queue->rx_ring.stats.rx_bytes;
+ regs_buff[18] = queue->rx_ring.stats.rx_err_cnt;
+ regs_buff[19] = queue->rx_ring.stats.io_err_cnt;
+ regs_buff[20] = queue->rx_ring.stats.sw_err_cnt;
+ regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt;
+ regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt;
+ regs_buff[23] = queue->rx_ring.stats.err_pkt_len;
+ regs_buff[24] = queue->rx_ring.stats.non_vld_descs;
+ regs_buff[25] = queue->rx_ring.stats.err_bd_num;
+ regs_buff[26] = queue->rx_ring.stats.l2_err;
+ regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err;
+}
+
+/**
+ *hns_rcb_get_ring_sset_count - rcb string set count
+ *@stringset:ethtool cmd
+ *return rcb ring string set count
+ */
+int hns_rcb_get_ring_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return HNS_RING_STATIC_REG_NUM;
+
+ return 0;
+}
+
+/**
+ *hns_rcb_get_common_regs_count - rcb common regs count
+ *return regs count
+ */
+int hns_rcb_get_common_regs_count(void)
+{
+ return HNS_RCB_COMMON_DUMP_REG_NUM;
+}
+
+/**
+ *rcb_get_sset_count - rcb ring regs count
+ *return regs count
+ */
+int hns_rcb_get_ring_regs_count(void)
+{
+ return HNS_RCB_RING_DUMP_REG_NUM;
+}
+
+/**
+ *hns_rcb_get_strings - get rcb string set
+ *@stringset:string set index
+ *@data:strings name value
+ *@index:queue index
+ */
+void hns_rcb_get_strings(int stringset, u8 *data, int index)
+{
+ char *buff = (char *)data;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index);
+ buff = buff + ETH_GSTRING_LEN;
+
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index);
+}
+
+void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
+{
+ u32 *regs = data;
+ u32 i = 0;
+
+ /*rcb common registers */
+ regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG);
+ regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG);
+ regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG);
+
+ regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG);
+ regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG);
+ regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG);
+ regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG);
+ regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG);
+ regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG);
+
+ regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG);
+ regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG);
+ regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG);
+ regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG);
+ regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG);
+ regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG);
+ regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG);
+ regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG);
+ regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG);
+ regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG);
+ regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG);
+ regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG);
+ regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG);
+ regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG);
+ regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG);
+ regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG);
+ regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG);
+ regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG);
+ regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG);
+
+ regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING);
+ regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS);
+ regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING);
+ regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD);
+ regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS);
+ regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY);
+ regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN);
+ regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK);
+ regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS);
+ regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG);
+
+ /* rcb common entry registers */
+ for (i = 0; i < 16; i++) { /* total 16 model registers */
+ regs[38 + i]
+ = dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i);
+ regs[54 + i]
+ = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i);
+ }
+
+ regs[70] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_REG);
+ regs[71] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
+ regs[72] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
+
+ /* mark end of rcb common regs */
+ for (i = 73; i < 80; i++)
+ regs[i] = 0xcccccccc;
+}
+
+void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data)
+{
+ u32 *regs = data;
+ struct ring_pair_cb *ring_pair
+ = container_of(queue, struct ring_pair_cb, q);
+ u32 i = 0;
+
+ /*rcb ring registers */
+ regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG);
+ regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG);
+ regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG);
+ regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG);
+ regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG);
+ regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG);
+ regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG);
+ regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
+ regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG);
+
+ regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG);
+ regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG);
+ regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG);
+ regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG);
+ regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG);
+ regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG);
+ regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG);
+ regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
+ regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG);
+ regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG);
+
+ regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG);
+ regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG);
+ regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG);
+ regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG);
+ regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST);
+ regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST);
+ regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG);
+
+ regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG);
+ regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG);
+ regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG);
+ regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG);
+ regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG);
+ regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG);
+ regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG);
+ regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG);
+
+ /* mark end of ring regs */
+ for (i = 35; i < 40; i++)
+ regs[i] = 0xcccccc00 + ring_pair->index;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
new file mode 100644
index 000000000000..3a2afe2dd8bb
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_DSAF_RCB_H
+#define _HNS_DSAF_RCB_H
+
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "hnae.h"
+#include "hns_dsaf_main.h"
+
+struct rcb_common_cb;
+
+#define HNS_RCB_IRQ_NUM_PER_QUEUE 2
+#define HNS_RCB_IRQ_IDX_TX 0
+#define HNS_RCB_IRQ_IDX_RX 1
+#define HNS_RCB_TX_REG_OFFSET 0x40
+
+#define HNS_RCB_SERVICE_NW_ENGINE_NUM DSAF_COMM_CHN
+#define HNS_RCB_DEBUG_NW_ENGINE_NUM 1
+#define HNS_RCB_RING_MAX_BD_PER_PKT 3
+#define HNS_RCB_MAX_PKT_SIZE MAC_MAX_MTU
+
+#define HNS_RCB_RING_MAX_PENDING_BD 1024
+#define HNS_RCB_RING_MIN_PENDING_BD 16
+
+#define HNS_RCB_REG_OFFSET 0x10000
+
+#define HNS_RCB_MAX_COALESCED_FRAMES 1023
+#define HNS_RCB_MIN_COALESCED_FRAMES 1
+#define HNS_RCB_DEF_COALESCED_FRAMES 50
+#define HNS_RCB_MAX_TIME_OUT 0x500
+
+#define HNS_RCB_COMMON_ENDIAN 1
+
+#define HNS_BD_SIZE_512_TYPE 0
+#define HNS_BD_SIZE_1024_TYPE 1
+#define HNS_BD_SIZE_2048_TYPE 2
+#define HNS_BD_SIZE_4096_TYPE 3
+
+#define HNS_RCB_COMMON_DUMP_REG_NUM 80
+#define HNS_RCB_RING_DUMP_REG_NUM 40
+#define HNS_RING_STATIC_REG_NUM 28
+
+#define HNS_DUMP_REG_NUM 500
+#define HNS_STATIC_REG_NUM 12
+
+enum rcb_int_flag {
+ RCB_INT_FLAG_TX = 0x1,
+ RCB_INT_FLAG_RX = (0x1 << 1),
+ RCB_INT_FLAG_MAX = (0x1 << 2), /*must be the last element */
+};
+
+struct hns_ring_hw_stats {
+ u64 tx_pkts;
+ u64 ppe_tx_ok_pkts;
+ u64 ppe_tx_drop_pkts;
+ u64 rx_pkts;
+ u64 ppe_rx_ok_pkts;
+ u64 ppe_rx_drop_pkts;
+};
+
+struct ring_pair_cb {
+ struct rcb_common_cb *rcb_common; /* ring belongs to */
+ struct device *dev; /*device for DMA mapping */
+ struct hnae_queue q;
+
+ u16 index; /* global index in a rcb common device */
+ u16 buf_size;
+
+ int virq[HNS_RCB_IRQ_NUM_PER_QUEUE];
+
+ u8 port_id_in_dsa;
+ u8 used_by_vf;
+
+ struct hns_ring_hw_stats hw_stats;
+};
+
+struct rcb_common_cb {
+ u8 __iomem *io_base;
+ phys_addr_t phy_base;
+ struct dsaf_device *dsaf_dev;
+ u16 max_vfn;
+ u16 max_q_per_vf;
+
+ u8 comm_index;
+ u32 ring_num;
+ u32 coalesced_frames; /* frames threshold of rx interrupt */
+ u32 timeout; /* time threshold of rx interrupt */
+ u32 desc_num; /* desc num per queue*/
+
+ struct ring_pair_cb ring_pair_cb[0];
+};
+
+int hns_rcb_buf_size2type(u32 buf_size);
+
+int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index);
+void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
+int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
+void hns_rcb_start(struct hnae_queue *q, u32 val);
+void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
+void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common);
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
+ u16 *max_vfn, u16 *max_q_per_vf);
+
+void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val);
+void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag);
+void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 enable);
+void hns_rcb_init_hw(struct ring_pair_cb *ring);
+void hns_rcb_reset_ring_hw(struct hnae_queue *q);
+void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
+
+u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int comm_index);
+u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index);
+void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev,
+ int comm_index, u32 timeout);
+int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev,
+ int comm_index, u32 coalesce_frames);
+void hns_rcb_update_stats(struct hnae_queue *queue);
+
+void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data);
+
+void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_common, void *data);
+
+int hns_rcb_get_ring_sset_count(int stringset);
+int hns_rcb_get_common_regs_count(void);
+int hns_rcb_get_ring_regs_count(void);
+
+void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data);
+
+void hns_rcb_get_strings(int stringset, u8 *data, int index);
+#endif /* _HNS_DSAF_RCB_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
new file mode 100644
index 000000000000..b475e1bf2e6f
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -0,0 +1,972 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _DSAF_REG_H_
+#define _DSAF_REG_H_
+
+#define HNS_GE_FIFO_ERR_INTNUM 8
+#define HNS_XGE_ERR_INTNUM 6
+#define HNS_RCB_COMM_ERR_INTNUM 12
+#define HNS_PPE_TNL_ERR_INTNUM 8
+#define HNS_DSAF_EVENT_INTNUM 21
+#define HNS_DEBUG_RING_INTNUM 4
+#define HNS_SERVICE_RING_INTNUM 256
+
+#define HNS_DEBUG_RING_IRQ_IDX (HNS_GE_FIFO_ERR_INTNUM + HNS_XGE_ERR_INTNUM +\
+ HNS_RCB_COMM_ERR_INTNUM + HNS_PPE_TNL_ERR_INTNUM +\
+ HNS_DSAF_EVENT_INTNUM)
+#define HNS_SERVICE_RING_IRQ_IDX (HNS_DEBUG_RING_IRQ_IDX +\
+ HNS_DEBUG_RING_INTNUM)
+
+#define DSAF_IRQ_NUM 18
+
+#define DSAF_MAX_PORT_NUM_PER_CHIP 8
+#define DSAF_SERVICE_PORT_NUM_PER_DSAF 6
+#define DSAF_MAX_VM_NUM 128
+
+#define DSAF_COMM_DEV_NUM 3
+#define DSAF_PPE_INODE_BASE 6
+#define HNS_DSAF_COMM_SERVICE_NW_IDX 0
+#define DSAF_DEBUG_NW_NUM 2
+#define DSAF_SERVICE_NW_NUM 6
+#define DSAF_COMM_CHN DSAF_SERVICE_NW_NUM
+#define DSAF_GE_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
+#define DSAF_PORT_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
+#define DSAF_XGE_NUM DSAF_SERVICE_NW_NUM
+#define DSAF_NODE_NUM 18
+#define DSAF_XOD_BIG_NUM DSAF_NODE_NUM
+#define DSAF_SBM_NUM DSAF_NODE_NUM
+#define DSAF_VOQ_NUM DSAF_NODE_NUM
+#define DSAF_INODE_NUM DSAF_NODE_NUM
+#define DSAF_XOD_NUM 8
+#define DSAF_TBL_NUM 8
+#define DSAF_SW_PORT_NUM 8
+#define DSAF_TOTAL_QUEUE_NUM 129
+
+#define DSAF_TCAM_SUM 512
+#define DSAF_LINE_SUM (2048 * 14)
+
+#define DSAF_SUB_SC_NT_SRAM_CLK_SEL_REG 0x100
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG 0x180
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG 0x184
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL2_REG 0x188
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL3_REG 0x18C
+#define DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG 0x190
+#define DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG 0x194
+#define DSAF_SUB_SC_DSAF_CLK_EN_REG 0x300
+#define DSAF_SUB_SC_DSAF_CLK_DIS_REG 0x304
+#define DSAF_SUB_SC_NT_CLK_EN_REG 0x308
+#define DSAF_SUB_SC_NT_CLK_DIS_REG 0x30C
+#define DSAF_SUB_SC_XGE_CLK_EN_REG 0x310
+#define DSAF_SUB_SC_XGE_CLK_DIS_REG 0x314
+#define DSAF_SUB_SC_GE_CLK_EN_REG 0x318
+#define DSAF_SUB_SC_GE_CLK_DIS_REG 0x31C
+#define DSAF_SUB_SC_PPE_CLK_EN_REG 0x320
+#define DSAF_SUB_SC_PPE_CLK_DIS_REG 0x324
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_EN_REG 0x350
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_DIS_REG 0x354
+#define DSAF_SUB_SC_XBAR_RESET_REQ_REG 0xA00
+#define DSAF_SUB_SC_XBAR_RESET_DREQ_REG 0xA04
+#define DSAF_SUB_SC_NT_RESET_REQ_REG 0xA08
+#define DSAF_SUB_SC_NT_RESET_DREQ_REG 0xA0C
+#define DSAF_SUB_SC_XGE_RESET_REQ_REG 0xA10
+#define DSAF_SUB_SC_XGE_RESET_DREQ_REG 0xA14
+#define DSAF_SUB_SC_GE_RESET_REQ0_REG 0xA18
+#define DSAF_SUB_SC_GE_RESET_DREQ0_REG 0xA1C
+#define DSAF_SUB_SC_GE_RESET_REQ1_REG 0xA20
+#define DSAF_SUB_SC_GE_RESET_DREQ1_REG 0xA24
+#define DSAF_SUB_SC_PPE_RESET_REQ_REG 0xA48
+#define DSAF_SUB_SC_PPE_RESET_DREQ_REG 0xA4C
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG 0xA88
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG 0xA8C
+#define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG 0x2060
+#define DSAF_SUB_SC_TCAM_MBIST_EN_REG 0x2300
+#define DSAF_SUB_SC_DSAF_CLK_ST_REG 0x5300
+#define DSAF_SUB_SC_NT_CLK_ST_REG 0x5304
+#define DSAF_SUB_SC_XGE_CLK_ST_REG 0x5308
+#define DSAF_SUB_SC_GE_CLK_ST_REG 0x530C
+#define DSAF_SUB_SC_PPE_CLK_ST_REG 0x5310
+#define DSAF_SUB_SC_ROCEE_CLK_ST_REG 0x5314
+#define DSAF_SUB_SC_CPU_CLK_ST_REG 0x5318
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_ST_REG 0x5328
+#define DSAF_SUB_SC_XBAR_RESET_ST_REG 0x5A00
+#define DSAF_SUB_SC_NT_RESET_ST_REG 0x5A04
+#define DSAF_SUB_SC_XGE_RESET_ST_REG 0x5A08
+#define DSAF_SUB_SC_GE_RESET_ST0_REG 0x5A0C
+#define DSAF_SUB_SC_GE_RESET_ST1_REG 0x5A10
+#define DSAF_SUB_SC_PPE_RESET_ST_REG 0x5A24
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_ST_REG 0x5A44
+
+/*serdes offset**/
+#define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG
+#define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG
+#define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL
+#define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL
+#define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL
+#define HNS_MAC_LANE3_CTLEDFE_REG 0x000BFF9CULL
+#define HNS_MAC_LANE0_STATE_REG 0x000BFFD4ULL
+#define HNS_MAC_LANE1_STATE_REG 0x000BFFC4ULL
+#define HNS_MAC_LANE2_STATE_REG 0x000BFFB4ULL
+#define HNS_MAC_LANE3_STATE_REG 0x000BFFA4ULL
+
+#define HILINK_RESET_TIMOUT 10000
+
+#define DSAF_SRAM_INIT_OVER_0_REG 0x0
+#define DSAF_CFG_0_REG 0x4
+#define DSAF_ECC_ERR_INVERT_0_REG 0x8
+#define DSAF_ABNORMAL_TIMEOUT_0_REG 0x1C
+#define DSAF_FSM_TIMEOUT_0_REG 0x20
+#define DSAF_DSA_REG_CNT_CLR_CE_REG 0x2C
+#define DSAF_DSA_SBM_INF_FIFO_THRD_REG 0x30
+#define DSAF_DSA_SRAM_1BIT_ECC_SEL_REG 0x34
+#define DSAF_DSA_SRAM_1BIT_ECC_CNT_REG 0x38
+#define DSAF_PFC_EN_0_REG 0x50
+#define DSAF_PFC_UNIT_CNT_0_REG 0x70
+#define DSAF_XGE_INT_MSK_0_REG 0x100
+#define DSAF_PPE_INT_MSK_0_REG 0x120
+#define DSAF_ROCEE_INT_MSK_0_REG 0x140
+#define DSAF_XGE_INT_SRC_0_REG 0x160
+#define DSAF_PPE_INT_SRC_0_REG 0x180
+#define DSAF_ROCEE_INT_SRC_0_REG 0x1A0
+#define DSAF_XGE_INT_STS_0_REG 0x1C0
+#define DSAF_PPE_INT_STS_0_REG 0x1E0
+#define DSAF_ROCEE_INT_STS_0_REG 0x200
+#define DSAF_PPE_QID_CFG_0_REG 0x300
+#define DSAF_SW_PORT_TYPE_0_REG 0x320
+#define DSAF_STP_PORT_TYPE_0_REG 0x340
+#define DSAF_MIX_DEF_QID_0_REG 0x360
+#define DSAF_PORT_DEF_VLAN_0_REG 0x380
+#define DSAF_VM_DEF_VLAN_0_REG 0x400
+
+#define DSAF_INODE_CUT_THROUGH_CFG_0_REG 0x1000
+#define DSAF_INODE_ECC_INVERT_EN_0_REG 0x1008
+#define DSAF_INODE_ECC_ERR_ADDR_0_REG 0x100C
+#define DSAF_INODE_IN_PORT_NUM_0_REG 0x1018
+#define DSAF_INODE_PRI_TC_CFG_0_REG 0x101C
+#define DSAF_INODE_BP_STATUS_0_REG 0x1020
+#define DSAF_INODE_PAD_DISCARD_NUM_0_REG 0x1028
+#define DSAF_INODE_FINAL_IN_MAN_NUM_0_REG 0x102C
+#define DSAF_INODE_FINAL_IN_PKT_NUM_0_REG 0x1030
+#define DSAF_INODE_SBM_PID_NUM_0_REG 0x1038
+#define DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x103C
+#define DSAF_INODE_SBM_RELS_NUM_0_REG 0x104C
+#define DSAF_INODE_SBM_DROP_NUM_0_REG 0x1050
+#define DSAF_INODE_CRC_FALSE_NUM_0_REG 0x1054
+#define DSAF_INODE_BP_DISCARD_NUM_0_REG 0x1058
+#define DSAF_INODE_RSLT_DISCARD_NUM_0_REG 0x105C
+#define DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG 0x1060
+#define DSAF_INODE_VOQ_OVER_NUM_0_REG 0x1068
+#define DSAF_INODE_BD_SAVE_STATUS_0_REG 0x1900
+#define DSAF_INODE_BD_ORDER_STATUS_0_REG 0x1950
+#define DSAF_INODE_SW_VLAN_TAG_DISC_0_REG 0x1A00
+#define DSAF_INODE_IN_DATA_STP_DISC_0_REG 0x1A50
+#define DSAF_INODE_GE_FC_EN_0_REG 0x1B00
+#define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50
+#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x1C00
+
+#define DSAF_SBM_CFG_REG_0_REG 0x2000
+#define DSAF_SBM_BP_CFG_0_XGE_REG_0_REG 0x2004
+#define DSAF_SBM_BP_CFG_0_PPE_REG_0_REG 0x2304
+#define DSAF_SBM_BP_CFG_0_ROCEE_REG_0_REG 0x2604
+#define DSAF_SBM_BP_CFG_1_REG_0_REG 0x2008
+#define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C
+#define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C
+#define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C
+#define DSAF_SBM_FREE_CNT_0_0_REG 0x2010
+#define DSAF_SBM_FREE_CNT_1_0_REG 0x2014
+#define DSAF_SBM_BP_CNT_0_0_REG 0x2018
+#define DSAF_SBM_BP_CNT_1_0_REG 0x201C
+#define DSAF_SBM_BP_CNT_2_0_REG 0x2020
+#define DSAF_SBM_BP_CNT_3_0_REG 0x2024
+#define DSAF_SBM_INER_ST_0_REG 0x2028
+#define DSAF_SBM_MIB_REQ_FAILED_TC_0_REG 0x202C
+#define DSAF_SBM_LNK_INPORT_CNT_0_REG 0x2030
+#define DSAF_SBM_LNK_DROP_CNT_0_REG 0x2034
+#define DSAF_SBM_INF_OUTPORT_CNT_0_REG 0x2038
+#define DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG 0x203C
+#define DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG 0x2040
+#define DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG 0x2044
+#define DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG 0x2048
+#define DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG 0x204C
+#define DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG 0x2050
+#define DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG 0x2054
+#define DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG 0x2058
+#define DSAF_SBM_LNK_REQ_CNT_0_REG 0x205C
+#define DSAF_SBM_LNK_RELS_CNT_0_REG 0x2060
+#define DSAF_SBM_BP_CFG_3_REG_0_REG 0x2068
+#define DSAF_SBM_BP_CFG_4_REG_0_REG 0x206C
+
+#define DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG 0x3000
+#define DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG 0x3004
+#define DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG 0x3008
+#define DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG 0x300C
+#define DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG 0x3010
+#define DSAF_XOD_ETS_TOKEN_CFG_0_REG 0x3014
+#define DSAF_XOD_PFS_CFG_0_0_REG 0x3018
+#define DSAF_XOD_PFS_CFG_1_0_REG 0x301C
+#define DSAF_XOD_PFS_CFG_2_0_REG 0x3020
+#define DSAF_XOD_GNT_L_0_REG 0x3024
+#define DSAF_XOD_GNT_H_0_REG 0x3028
+#define DSAF_XOD_CONNECT_STATE_0_REG 0x302C
+#define DSAF_XOD_RCVPKT_CNT_0_REG 0x3030
+#define DSAF_XOD_RCVTC0_CNT_0_REG 0x3034
+#define DSAF_XOD_RCVTC1_CNT_0_REG 0x3038
+#define DSAF_XOD_RCVTC2_CNT_0_REG 0x303C
+#define DSAF_XOD_RCVTC3_CNT_0_REG 0x3040
+#define DSAF_XOD_RCVVC0_CNT_0_REG 0x3044
+#define DSAF_XOD_RCVVC1_CNT_0_REG 0x3048
+#define DSAF_XOD_XGE_RCVIN0_CNT_0_REG 0x304C
+#define DSAF_XOD_XGE_RCVIN1_CNT_0_REG 0x3050
+#define DSAF_XOD_XGE_RCVIN2_CNT_0_REG 0x3054
+#define DSAF_XOD_XGE_RCVIN3_CNT_0_REG 0x3058
+#define DSAF_XOD_XGE_RCVIN4_CNT_0_REG 0x305C
+#define DSAF_XOD_XGE_RCVIN5_CNT_0_REG 0x3060
+#define DSAF_XOD_XGE_RCVIN6_CNT_0_REG 0x3064
+#define DSAF_XOD_XGE_RCVIN7_CNT_0_REG 0x3068
+#define DSAF_XOD_PPE_RCVIN0_CNT_0_REG 0x306C
+#define DSAF_XOD_PPE_RCVIN1_CNT_0_REG 0x3070
+#define DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG 0x3074
+#define DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG 0x3078
+#define DSAF_XOD_FIFO_STATUS_0_REG 0x307C
+
+#define DSAF_VOQ_ECC_INVERT_EN_0_REG 0x4004
+#define DSAF_VOQ_SRAM_PKT_NUM_0_REG 0x4008
+#define DSAF_VOQ_IN_PKT_NUM_0_REG 0x400C
+#define DSAF_VOQ_OUT_PKT_NUM_0_REG 0x4010
+#define DSAF_VOQ_ECC_ERR_ADDR_0_REG 0x4014
+#define DSAF_VOQ_BP_STATUS_0_REG 0x4018
+#define DSAF_VOQ_SPUP_IDLE_0_REG 0x401C
+#define DSAF_VOQ_XGE_XOD_REQ_0_0_REG 0x4024
+#define DSAF_VOQ_XGE_XOD_REQ_1_0_REG 0x4028
+#define DSAF_VOQ_PPE_XOD_REQ_0_REG 0x402C
+#define DSAF_VOQ_ROCEE_XOD_REQ_0_REG 0x4030
+#define DSAF_VOQ_BP_ALL_THRD_0_REG 0x4034
+
+#define DSAF_TBL_CTRL_0_REG 0x5000
+#define DSAF_TBL_INT_MSK_0_REG 0x5004
+#define DSAF_TBL_INT_SRC_0_REG 0x5008
+#define DSAF_TBL_INT_STS_0_REG 0x5100
+#define DSAF_TBL_TCAM_ADDR_0_REG 0x500C
+#define DSAF_TBL_LINE_ADDR_0_REG 0x5010
+#define DSAF_TBL_TCAM_HIGH_0_REG 0x5014
+#define DSAF_TBL_TCAM_LOW_0_REG 0x5018
+#define DSAF_TBL_TCAM_MCAST_CFG_4_0_REG 0x501C
+#define DSAF_TBL_TCAM_MCAST_CFG_3_0_REG 0x5020
+#define DSAF_TBL_TCAM_MCAST_CFG_2_0_REG 0x5024
+#define DSAF_TBL_TCAM_MCAST_CFG_1_0_REG 0x5028
+#define DSAF_TBL_TCAM_MCAST_CFG_0_0_REG 0x502C
+#define DSAF_TBL_TCAM_UCAST_CFG_0_REG 0x5030
+#define DSAF_TBL_LIN_CFG_0_REG 0x5034
+#define DSAF_TBL_TCAM_RDATA_HIGH_0_REG 0x5038
+#define DSAF_TBL_TCAM_RDATA_LOW_0_REG 0x503C
+#define DSAF_TBL_TCAM_RAM_RDATA4_0_REG 0x5040
+#define DSAF_TBL_TCAM_RAM_RDATA3_0_REG 0x5044
+#define DSAF_TBL_TCAM_RAM_RDATA2_0_REG 0x5048
+#define DSAF_TBL_TCAM_RAM_RDATA1_0_REG 0x504C
+#define DSAF_TBL_TCAM_RAM_RDATA0_0_REG 0x5050
+#define DSAF_TBL_LIN_RDATA_0_REG 0x5054
+#define DSAF_TBL_DA0_MIS_INFO1_0_REG 0x5058
+#define DSAF_TBL_DA0_MIS_INFO0_0_REG 0x505C
+#define DSAF_TBL_SA_MIS_INFO2_0_REG 0x5104
+#define DSAF_TBL_SA_MIS_INFO1_0_REG 0x5098
+#define DSAF_TBL_SA_MIS_INFO0_0_REG 0x509C
+#define DSAF_TBL_PUL_0_REG 0x50A0
+#define DSAF_TBL_OLD_RSLT_0_REG 0x50A4
+#define DSAF_TBL_OLD_SCAN_VAL_0_REG 0x50A8
+#define DSAF_TBL_DFX_CTRL_0_REG 0x50AC
+#define DSAF_TBL_DFX_STAT_0_REG 0x50B0
+#define DSAF_TBL_DFX_STAT_2_0_REG 0x5108
+#define DSAF_TBL_LKUP_NUM_I_0_REG 0x50C0
+#define DSAF_TBL_LKUP_NUM_O_0_REG 0x50E0
+#define DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG 0x510C
+
+#define DSAF_INODE_FIFO_WL_0_REG 0x6000
+#define DSAF_ONODE_FIFO_WL_0_REG 0x6020
+#define DSAF_XGE_GE_WORK_MODE_0_REG 0x6040
+#define DSAF_XGE_APP_RX_LINK_UP_0_REG 0x6080
+#define DSAF_NETPORT_CTRL_SIG_0_REG 0x60A0
+#define DSAF_XGE_CTRL_SIG_CFG_0_REG 0x60C0
+
+#define PPE_COM_CFG_QID_MODE_REG 0x0
+#define PPE_COM_INTEN_REG 0x110
+#define PPE_COM_RINT_REG 0x114
+#define PPE_COM_INTSTS_REG 0x118
+#define PPE_COM_COMMON_CNT_CLR_CE_REG 0x1120
+#define PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG 0x300
+#define PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG 0x600
+#define PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG 0x900
+#define PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG 0xC00
+#define PPE_COM_COMMON_CNT_CLR_CE_REG 0x1120
+
+#define PPE_CFG_TX_FIFO_THRSLD_REG 0x0
+#define PPE_CFG_RX_FIFO_THRSLD_REG 0x4
+#define PPE_CFG_RX_FIFO_PAUSE_THRSLD_REG 0x8
+#define PPE_CFG_RX_FIFO_SW_BP_THRSLD_REG 0xC
+#define PPE_CFG_PAUSE_IDLE_CNT_REG 0x10
+#define PPE_CFG_BUS_CTRL_REG 0x40
+#define PPE_CFG_TNL_TO_BE_RST_REG 0x48
+#define PPE_CURR_TNL_CAN_RST_REG 0x4C
+#define PPE_CFG_XGE_MODE_REG 0x80
+#define PPE_CFG_MAX_FRAME_LEN_REG 0x84
+#define PPE_CFG_RX_PKT_MODE_REG 0x88
+#define PPE_CFG_RX_VLAN_TAG_REG 0x8C
+#define PPE_CFG_TAG_GEN_REG 0x90
+#define PPE_CFG_PARSE_TAG_REG 0x94
+#define PPE_CFG_PRO_CHECK_EN_REG 0x98
+#define PPE_INTEN_REG 0x100
+#define PPE_RINT_REG 0x104
+#define PPE_INTSTS_REG 0x108
+#define PPE_CFG_RX_PKT_INT_REG 0x140
+#define PPE_CFG_HEAT_DECT_TIME0_REG 0x144
+#define PPE_CFG_HEAT_DECT_TIME1_REG 0x148
+#define PPE_HIS_RX_SW_PKT_CNT_REG 0x200
+#define PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG 0x204
+#define PPE_HIS_RX_PKT_NO_BUF_CNT_REG 0x208
+#define PPE_HIS_TX_BD_CNT_REG 0x20C
+#define PPE_HIS_TX_PKT_CNT_REG 0x210
+#define PPE_HIS_TX_PKT_OK_CNT_REG 0x214
+#define PPE_HIS_TX_PKT_EPT_CNT_REG 0x218
+#define PPE_HIS_TX_PKT_CS_FAIL_CNT_REG 0x21C
+#define PPE_HIS_RX_APP_BUF_FAIL_CNT_REG 0x220
+#define PPE_HIS_RX_APP_BUF_WAIT_CNT_REG 0x224
+#define PPE_HIS_RX_PKT_DROP_FUL_CNT_REG 0x228
+#define PPE_HIS_RX_PKT_DROP_PRT_CNT_REG 0x22C
+#define PPE_TNL_0_5_CNT_CLR_CE_REG 0x300
+#define PPE_CFG_AXI_DBG_REG 0x304
+#define PPE_HIS_PRO_ERR_REG 0x308
+#define PPE_HIS_TNL_FIFO_ERR_REG 0x30C
+#define PPE_CURR_CFF_DATA_NUM_REG 0x310
+#define PPE_CURR_RX_ST_REG 0x314
+#define PPE_CURR_TX_ST_REG 0x318
+#define PPE_CURR_RX_FIFO0_REG 0x31C
+#define PPE_CURR_RX_FIFO1_REG 0x320
+#define PPE_CURR_TX_FIFO0_REG 0x324
+#define PPE_CURR_TX_FIFO1_REG 0x328
+#define PPE_ECO0_REG 0x32C
+#define PPE_ECO1_REG 0x330
+#define PPE_ECO2_REG 0x334
+
+#define RCB_COM_CFG_ENDIAN_REG 0x0
+#define RCB_COM_CFG_SYS_FSH_REG 0xC
+#define RCB_COM_CFG_INIT_FLAG_REG 0x10
+#define RCB_COM_CFG_PKT_REG 0x30
+#define RCB_COM_CFG_RINVLD_REG 0x34
+#define RCB_COM_CFG_FNA_REG 0x38
+#define RCB_COM_CFG_FA_REG 0x3C
+#define RCB_COM_CFG_PKT_TC_BP_REG 0x40
+#define RCB_COM_CFG_PPE_TNL_CLKEN_REG 0x44
+
+#define RCB_COM_INTMSK_TX_PKT_REG 0x3A0
+#define RCB_COM_RINT_TX_PKT_REG 0x3A8
+#define RCB_COM_INTMASK_ECC_ERR_REG 0x400
+#define RCB_COM_INTSTS_ECC_ERR_REG 0x408
+#define RCB_COM_EBD_SRAM_ERR_REG 0x410
+#define RCB_COM_RXRING_ERR_REG 0x41C
+#define RCB_COM_TXRING_ERR_REG 0x420
+#define RCB_COM_TX_FBD_ERR_REG 0x424
+#define RCB_SRAM_ECC_CHK_EN_REG 0x428
+#define RCB_SRAM_ECC_CHK0_REG 0x42C
+#define RCB_SRAM_ECC_CHK1_REG 0x430
+#define RCB_SRAM_ECC_CHK2_REG 0x434
+#define RCB_SRAM_ECC_CHK3_REG 0x438
+#define RCB_SRAM_ECC_CHK4_REG 0x43c
+#define RCB_SRAM_ECC_CHK5_REG 0x440
+#define RCB_ECC_ERR_ADDR0_REG 0x450
+#define RCB_ECC_ERR_ADDR3_REG 0x45C
+#define RCB_ECC_ERR_ADDR4_REG 0x460
+#define RCB_ECC_ERR_ADDR5_REG 0x464
+
+#define RCB_COM_SF_CFG_INTMASK_RING 0x480
+#define RCB_COM_SF_CFG_RING_STS 0x484
+#define RCB_COM_SF_CFG_RING 0x488
+#define RCB_COM_SF_CFG_INTMASK_BD 0x48C
+#define RCB_COM_SF_CFG_BD_RINT_STS 0x470
+#define RCB_COM_RCB_RD_BD_BUSY 0x490
+#define RCB_COM_RCB_FBD_CRT_EN 0x494
+#define RCB_COM_AXI_WR_ERR_INTMASK 0x498
+#define RCB_COM_AXI_ERR_STS 0x49C
+#define RCB_COM_CHK_TX_FBD_NUM_REG 0x4a0
+
+#define RCB_CFG_BD_NUM_REG 0x9000
+#define RCB_CFG_PKTLINE_REG 0x9050
+
+#define RCB_CFG_OVERTIME_REG 0x9300
+#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304
+#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308
+
+#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000
+#define RCB_RING_RX_RING_BASEADDR_H_REG 0x00004
+#define RCB_RING_RX_RING_BD_NUM_REG 0x00008
+#define RCB_RING_RX_RING_BD_LEN_REG 0x0000C
+#define RCB_RING_RX_RING_PKTLINE_REG 0x00010
+#define RCB_RING_RX_RING_TAIL_REG 0x00018
+#define RCB_RING_RX_RING_HEAD_REG 0x0001C
+#define RCB_RING_RX_RING_FBDNUM_REG 0x00020
+#define RCB_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C
+
+#define RCB_RING_TX_RING_BASEADDR_L_REG 0x00040
+#define RCB_RING_TX_RING_BASEADDR_H_REG 0x00044
+#define RCB_RING_TX_RING_BD_NUM_REG 0x00048
+#define RCB_RING_TX_RING_BD_LEN_REG 0x0004C
+#define RCB_RING_TX_RING_PKTLINE_REG 0x00050
+#define RCB_RING_TX_RING_TAIL_REG 0x00058
+#define RCB_RING_TX_RING_HEAD_REG 0x0005C
+#define RCB_RING_TX_RING_FBDNUM_REG 0x00060
+#define RCB_RING_TX_RING_OFFSET_REG 0x00064
+#define RCB_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C
+
+#define RCB_RING_PREFETCH_EN_REG 0x0007C
+#define RCB_RING_CFG_VF_NUM_REG 0x00080
+#define RCB_RING_ASID_REG 0x0008C
+#define RCB_RING_RX_VM_REG 0x00090
+#define RCB_RING_T0_BE_RST 0x00094
+#define RCB_RING_COULD_BE_RST 0x00098
+#define RCB_RING_WRR_WEIGHT_REG 0x0009c
+
+#define RCB_RING_INTMSK_RXWL_REG 0x000A0
+#define RCB_RING_INTSTS_RX_RING_REG 0x000A4
+#define RCB_RING_INTMSK_TXWL_REG 0x000AC
+#define RCB_RING_INTSTS_TX_RING_REG 0x000B0
+#define RCB_RING_INTMSK_RX_OVERTIME_REG 0x000B8
+#define RCB_RING_INTSTS_RX_OVERTIME_REG 0x000BC
+#define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4
+#define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8
+
+#define GMAC_DUPLEX_TYPE_REG 0x0008UL
+#define GMAC_FD_FC_TYPE_REG 0x000CUL
+#define GMAC_FC_TX_TIMER_REG 0x001CUL
+#define GMAC_FD_FC_ADDR_LOW_REG 0x0020UL
+#define GMAC_FD_FC_ADDR_HIGH_REG 0x0024UL
+#define GMAC_IPG_TX_TIMER_REG 0x0030UL
+#define GMAC_PAUSE_THR_REG 0x0038UL
+#define GMAC_MAX_FRM_SIZE_REG 0x003CUL
+#define GMAC_PORT_MODE_REG 0x0040UL
+#define GMAC_PORT_EN_REG 0x0044UL
+#define GMAC_PAUSE_EN_REG 0x0048UL
+#define GMAC_SHORT_RUNTS_THR_REG 0x0050UL
+#define GMAC_AN_NEG_STATE_REG 0x0058UL
+#define GMAC_TX_LOCAL_PAGE_REG 0x005CUL
+#define GMAC_TRANSMIT_CONTROL_REG 0x0060UL
+#define GMAC_REC_FILT_CONTROL_REG 0x0064UL
+#define GMAC_PTP_CONFIG_REG 0x0074UL
+
+#define GMAC_RX_OCTETS_TOTAL_OK_REG 0x0080UL
+#define GMAC_RX_OCTETS_BAD_REG 0x0084UL
+#define GMAC_RX_UC_PKTS_REG 0x0088UL
+#define GMAC_RX_MC_PKTS_REG 0x008CUL
+#define GMAC_RX_BC_PKTS_REG 0x0090UL
+#define GMAC_RX_PKTS_64OCTETS_REG 0x0094UL
+#define GMAC_RX_PKTS_65TO127OCTETS_REG 0x0098UL
+#define GMAC_RX_PKTS_128TO255OCTETS_REG 0x009CUL
+#define GMAC_RX_PKTS_255TO511OCTETS_REG 0x00A0UL
+#define GMAC_RX_PKTS_512TO1023OCTETS_REG 0x00A4UL
+#define GMAC_RX_PKTS_1024TO1518OCTETS_REG 0x00A8UL
+#define GMAC_RX_PKTS_1519TOMAXOCTETS_REG 0x00ACUL
+#define GMAC_RX_FCS_ERRORS_REG 0x00B0UL
+#define GMAC_RX_TAGGED_REG 0x00B4UL
+#define GMAC_RX_DATA_ERR_REG 0x00B8UL
+#define GMAC_RX_ALIGN_ERRORS_REG 0x00BCUL
+#define GMAC_RX_LONG_ERRORS_REG 0x00C0UL
+#define GMAC_RX_JABBER_ERRORS_REG 0x00C4UL
+#define GMAC_RX_PAUSE_MACCTRL_FRAM_REG 0x00C8UL
+#define GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG 0x00CCUL
+#define GMAC_RX_VERY_LONG_ERR_CNT_REG 0x00D0UL
+#define GMAC_RX_RUNT_ERR_CNT_REG 0x00D4UL
+#define GMAC_RX_SHORT_ERR_CNT_REG 0x00D8UL
+#define GMAC_RX_FILT_PKT_CNT_REG 0x00E8UL
+#define GMAC_RX_OCTETS_TOTAL_FILT_REG 0x00ECUL
+#define GMAC_OCTETS_TRANSMITTED_OK_REG 0x0100UL
+#define GMAC_OCTETS_TRANSMITTED_BAD_REG 0x0104UL
+#define GMAC_TX_UC_PKTS_REG 0x0108UL
+#define GMAC_TX_MC_PKTS_REG 0x010CUL
+#define GMAC_TX_BC_PKTS_REG 0x0110UL
+#define GMAC_TX_PKTS_64OCTETS_REG 0x0114UL
+#define GMAC_TX_PKTS_65TO127OCTETS_REG 0x0118UL
+#define GMAC_TX_PKTS_128TO255OCTETS_REG 0x011CUL
+#define GMAC_TX_PKTS_255TO511OCTETS_REG 0x0120UL
+#define GMAC_TX_PKTS_512TO1023OCTETS_REG 0x0124UL
+#define GMAC_TX_PKTS_1024TO1518OCTETS_REG 0x0128UL
+#define GMAC_TX_PKTS_1519TOMAXOCTETS_REG 0x012CUL
+#define GMAC_TX_EXCESSIVE_LENGTH_DROP_REG 0x014CUL
+#define GMAC_TX_UNDERRUN_REG 0x0150UL
+#define GMAC_TX_TAGGED_REG 0x0154UL
+#define GMAC_TX_CRC_ERROR_REG 0x0158UL
+#define GMAC_TX_PAUSE_FRAMES_REG 0x015CUL
+#define GAMC_RX_MAX_FRAME 0x0170UL
+#define GMAC_LINE_LOOP_BACK_REG 0x01A8UL
+#define GMAC_CF_CRC_STRIP_REG 0x01B0UL
+#define GMAC_MODE_CHANGE_EN_REG 0x01B4UL
+#define GMAC_SIXTEEN_BIT_CNTR_REG 0x01CCUL
+#define GMAC_LD_LINK_COUNTER_REG 0x01D0UL
+#define GMAC_LOOP_REG 0x01DCUL
+#define GMAC_RECV_CONTROL_REG 0x01E0UL
+#define GMAC_VLAN_CODE_REG 0x01E8UL
+#define GMAC_RX_OVERRUN_CNT_REG 0x01ECUL
+#define GMAC_RX_LENGTHFIELD_ERR_CNT_REG 0x01F4UL
+#define GMAC_RX_FAIL_COMMA_CNT_REG 0x01F8UL
+#define GMAC_STATION_ADDR_LOW_0_REG 0x0200UL
+#define GMAC_STATION_ADDR_HIGH_0_REG 0x0204UL
+#define GMAC_STATION_ADDR_LOW_1_REG 0x0208UL
+#define GMAC_STATION_ADDR_HIGH_1_REG 0x020CUL
+#define GMAC_STATION_ADDR_LOW_2_REG 0x0210UL
+#define GMAC_STATION_ADDR_HIGH_2_REG 0x0214UL
+#define GMAC_STATION_ADDR_LOW_3_REG 0x0218UL
+#define GMAC_STATION_ADDR_HIGH_3_REG 0x021CUL
+#define GMAC_STATION_ADDR_LOW_4_REG 0x0220UL
+#define GMAC_STATION_ADDR_HIGH_4_REG 0x0224UL
+#define GMAC_STATION_ADDR_LOW_5_REG 0x0228UL
+#define GMAC_STATION_ADDR_HIGH_5_REG 0x022CUL
+#define GMAC_STATION_ADDR_LOW_MSK_0_REG 0x0230UL
+#define GMAC_STATION_ADDR_HIGH_MSK_0_REG 0x0234UL
+#define GMAC_STATION_ADDR_LOW_MSK_1_REG 0x0238UL
+#define GMAC_STATION_ADDR_HIGH_MSK_1_REG 0x023CUL
+#define GMAC_MAC_SKIP_LEN_REG 0x0240UL
+#define GMAC_TX_LOOP_PKT_PRI_REG 0x0378UL
+
+#define XGMAC_INT_STATUS_REG 0x0
+#define XGMAC_INT_ENABLE_REG 0x4
+#define XGMAC_INT_SET_REG 0x8
+#define XGMAC_IERR_U_INFO_REG 0xC
+#define XGMAC_OVF_INFO_REG 0x10
+#define XGMAC_OVF_CNT_REG 0x14
+#define XGMAC_PORT_MODE_REG 0x40
+#define XGMAC_CLK_ENABLE_REG 0x44
+#define XGMAC_RESET_REG 0x48
+#define XGMAC_LINK_CONTROL_REG 0x50
+#define XGMAC_LINK_STATUS_REG 0x54
+#define XGMAC_SPARE_REG 0xC0
+#define XGMAC_SPARE_CNT_REG 0xC4
+
+#define XGMAC_MAC_ENABLE_REG 0x100
+#define XGMAC_MAC_CONTROL_REG 0x104
+#define XGMAC_MAC_IPG_REG 0x120
+#define XGMAC_MAC_MSG_CRC_EN_REG 0x124
+#define XGMAC_MAC_MSG_IMG_REG 0x128
+#define XGMAC_MAC_MSG_FC_CFG_REG 0x12C
+#define XGMAC_MAC_MSG_TC_CFG_REG 0x130
+#define XGMAC_MAC_PAD_SIZE_REG 0x134
+#define XGMAC_MAC_MIN_PKT_SIZE_REG 0x138
+#define XGMAC_MAC_MAX_PKT_SIZE_REG 0x13C
+#define XGMAC_MAC_PAUSE_CTRL_REG 0x160
+#define XGMAC_MAC_PAUSE_TIME_REG 0x164
+#define XGMAC_MAC_PAUSE_GAP_REG 0x168
+#define XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG 0x16C
+#define XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG 0x170
+#define XGMAC_MAC_PAUSE_PEER_MAC_H_REG 0x174
+#define XGMAC_MAC_PAUSE_PEER_MAC_L_REG 0x178
+#define XGMAC_MAC_PFC_PRI_EN_REG 0x17C
+#define XGMAC_MAC_1588_CTRL_REG 0x180
+#define XGMAC_MAC_1588_TX_PORT_DLY_REG 0x184
+#define XGMAC_MAC_1588_RX_PORT_DLY_REG 0x188
+#define XGMAC_MAC_1588_ASYM_DLY_REG 0x18C
+#define XGMAC_MAC_1588_ADJUST_CFG_REG 0x190
+#define XGMAC_MAC_Y1731_ETH_TYPE_REG 0x194
+#define XGMAC_MAC_MIB_CONTROL_REG 0x198
+#define XGMAC_MAC_WAN_RATE_ADJUST_REG 0x19C
+#define XGMAC_MAC_TX_ERR_MARK_REG 0x1A0
+#define XGMAC_MAC_TX_LF_RF_CONTROL_REG 0x1A4
+#define XGMAC_MAC_RX_LF_RF_STATUS_REG 0x1A8
+#define XGMAC_MAC_TX_RUNT_PKT_CNT_REG 0x1C0
+#define XGMAC_MAC_RX_RUNT_PKT_CNT_REG 0x1C4
+#define XGMAC_MAC_RX_PREAM_ERR_PKT_CNT_REG 0x1C8
+#define XGMAC_MAC_TX_LF_RF_TERM_PKT_CNT_REG 0x1CC
+#define XGMAC_MAC_TX_SN_MISMATCH_PKT_CNT_REG 0x1D0
+#define XGMAC_MAC_RX_ERR_MSG_CNT_REG 0x1D4
+#define XGMAC_MAC_RX_ERR_EFD_CNT_REG 0x1D8
+#define XGMAC_MAC_ERR_INFO_REG 0x1DC
+#define XGMAC_MAC_DBG_INFO_REG 0x1E0
+
+#define XGMAC_PCS_BASER_SYNC_THD_REG 0x330
+#define XGMAC_PCS_STATUS1_REG 0x404
+#define XGMAC_PCS_BASER_STATUS1_REG 0x410
+#define XGMAC_PCS_BASER_STATUS2_REG 0x414
+#define XGMAC_PCS_BASER_SEEDA_0_REG 0x420
+#define XGMAC_PCS_BASER_SEEDA_1_REG 0x424
+#define XGMAC_PCS_BASER_SEEDB_0_REG 0x428
+#define XGMAC_PCS_BASER_SEEDB_1_REG 0x42C
+#define XGMAC_PCS_BASER_TEST_CONTROL_REG 0x430
+#define XGMAC_PCS_BASER_TEST_ERR_CNT_REG 0x434
+#define XGMAC_PCS_DBG_INFO_REG 0x4C0
+#define XGMAC_PCS_DBG_INFO1_REG 0x4C4
+#define XGMAC_PCS_DBG_INFO2_REG 0x4C8
+#define XGMAC_PCS_DBG_INFO3_REG 0x4CC
+
+#define XGMAC_PMA_ENABLE_REG 0x700
+#define XGMAC_PMA_CONTROL_REG 0x704
+#define XGMAC_PMA_SIGNAL_STATUS_REG 0x708
+#define XGMAC_PMA_DBG_INFO_REG 0x70C
+#define XGMAC_PMA_FEC_ABILITY_REG 0x740
+#define XGMAC_PMA_FEC_CONTROL_REG 0x744
+#define XGMAC_PMA_FEC_CORR_BLOCK_CNT__REG 0x750
+#define XGMAC_PMA_FEC_UNCORR_BLOCK_CNT__REG 0x760
+
+#define XGMAC_TX_PKTS_FRAGMENT 0x0000
+#define XGMAC_TX_PKTS_UNDERSIZE 0x0008
+#define XGMAC_TX_PKTS_UNDERMIN 0x0010
+#define XGMAC_TX_PKTS_64OCTETS 0x0018
+#define XGMAC_TX_PKTS_65TO127OCTETS 0x0020
+#define XGMAC_TX_PKTS_128TO255OCTETS 0x0028
+#define XGMAC_TX_PKTS_256TO511OCTETS 0x0030
+#define XGMAC_TX_PKTS_512TO1023OCTETS 0x0038
+#define XGMAC_TX_PKTS_1024TO1518OCTETS 0x0040
+#define XGMAC_TX_PKTS_1519TOMAXOCTETS 0x0048
+#define XGMAC_TX_PKTS_1519TOMAXOCTETSOK 0x0050
+#define XGMAC_TX_PKTS_OVERSIZE 0x0058
+#define XGMAC_TX_PKTS_JABBER 0x0060
+#define XGMAC_TX_GOODPKTS 0x0068
+#define XGMAC_TX_GOODOCTETS 0x0070
+#define XGMAC_TX_TOTAL_PKTS 0x0078
+#define XGMAC_TX_TOTALOCTETS 0x0080
+#define XGMAC_TX_UNICASTPKTS 0x0088
+#define XGMAC_TX_MULTICASTPKTS 0x0090
+#define XGMAC_TX_BROADCASTPKTS 0x0098
+#define XGMAC_TX_PRI0PAUSEPKTS 0x00a0
+#define XGMAC_TX_PRI1PAUSEPKTS 0x00a8
+#define XGMAC_TX_PRI2PAUSEPKTS 0x00b0
+#define XGMAC_TX_PRI3PAUSEPKTS 0x00b8
+#define XGMAC_TX_PRI4PAUSEPKTS 0x00c0
+#define XGMAC_TX_PRI5PAUSEPKTS 0x00c8
+#define XGMAC_TX_PRI6PAUSEPKTS 0x00d0
+#define XGMAC_TX_PRI7PAUSEPKTS 0x00d8
+#define XGMAC_TX_MACCTRLPKTS 0x00e0
+#define XGMAC_TX_1731PKTS 0x00e8
+#define XGMAC_TX_1588PKTS 0x00f0
+#define XGMAC_RX_FROMAPPGOODPKTS 0x00f8
+#define XGMAC_RX_FROMAPPBADPKTS 0x0100
+#define XGMAC_TX_ERRALLPKTS 0x0108
+
+#define XGMAC_RX_PKTS_FRAGMENT 0x0110
+#define XGMAC_RX_PKTSUNDERSIZE 0x0118
+#define XGMAC_RX_PKTS_UNDERMIN 0x0120
+#define XGMAC_RX_PKTS_64OCTETS 0x0128
+#define XGMAC_RX_PKTS_65TO127OCTETS 0x0130
+#define XGMAC_RX_PKTS_128TO255OCTETS 0x0138
+#define XGMAC_RX_PKTS_256TO511OCTETS 0x0140
+#define XGMAC_RX_PKTS_512TO1023OCTETS 0x0148
+#define XGMAC_RX_PKTS_1024TO1518OCTETS 0x0150
+#define XGMAC_RX_PKTS_1519TOMAXOCTETS 0x0158
+#define XGMAC_RX_PKTS_1519TOMAXOCTETSOK 0x0160
+#define XGMAC_RX_PKTS_OVERSIZE 0x0168
+#define XGMAC_RX_PKTS_JABBER 0x0170
+#define XGMAC_RX_GOODPKTS 0x0178
+#define XGMAC_RX_GOODOCTETS 0x0180
+#define XGMAC_RX_TOTAL_PKTS 0x0188
+#define XGMAC_RX_TOTALOCTETS 0x0190
+#define XGMAC_RX_UNICASTPKTS 0x0198
+#define XGMAC_RX_MULTICASTPKTS 0x01a0
+#define XGMAC_RX_BROADCASTPKTS 0x01a8
+#define XGMAC_RX_PRI0PAUSEPKTS 0x01b0
+#define XGMAC_RX_PRI1PAUSEPKTS 0x01b8
+#define XGMAC_RX_PRI2PAUSEPKTS 0x01c0
+#define XGMAC_RX_PRI3PAUSEPKTS 0x01c8
+#define XGMAC_RX_PRI4PAUSEPKTS 0x01d0
+#define XGMAC_RX_PRI5PAUSEPKTS 0x01d8
+#define XGMAC_RX_PRI6PAUSEPKTS 0x01e0
+#define XGMAC_RX_PRI7PAUSEPKTS 0x01e8
+#define XGMAC_RX_MACCTRLPKTS 0x01f0
+#define XGMAC_TX_SENDAPPGOODPKTS 0x01f8
+#define XGMAC_TX_SENDAPPBADPKTS 0x0200
+#define XGMAC_RX_1731PKTS 0x0208
+#define XGMAC_RX_SYMBOLERRPKTS 0x0210
+#define XGMAC_RX_FCSERRPKTS 0x0218
+
+#define XGMAC_TRX_CORE_SRST_M 0x2080
+
+#define DSAF_CFG_EN_S 0
+#define DSAF_CFG_TC_MODE_S 1
+#define DSAF_CFG_CRC_EN_S 2
+#define DSAF_CFG_SBM_INIT_S 3
+#define DSAF_CFG_MIX_MODE_S 4
+#define DSAF_CFG_STP_MODE_S 5
+#define DSAF_CFG_LOCA_ADDR_EN_S 6
+
+#define DSAF_CNT_CLR_CE_S 0
+#define DSAF_SNAP_EN_S 1
+
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_XGE 41
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000 410
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_GE_2500 103
+
+#define DSAF_PFC_UNINT_CNT_M ((1ULL << 9) - 1)
+#define DSAF_PFC_UNINT_CNT_S 0
+
+#define DSAF_PPE_QID_CFG_M 0xFF
+#define DSAF_PPE_QID_CFG_S 0
+
+#define DSAF_SW_PORT_TYPE_M 3
+#define DSAF_SW_PORT_TYPE_S 0
+
+#define DSAF_STP_PORT_TYPE_M 7
+#define DSAF_STP_PORT_TYPE_S 0
+
+#define DSAF_INODE_IN_PORT_NUM_M 7
+#define DSAF_INODE_IN_PORT_NUM_S 0
+
+#define HNS_DSAF_I4TC_CFG 0x18688688
+#define HNS_DSAF_I8TC_CFG 0x18FAC688
+
+#define DSAF_SBM_CFG_SHCUT_EN_S 0
+#define DSAF_SBM_CFG_EN_S 1
+#define DSAF_SBM_CFG_MIB_EN_S 2
+#define DSAF_SBM_CFG_ECC_INVERT_EN_S 3
+
+#define DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_S 0
+#define DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_S 10
+#define DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+#define DSAF_SBM_CFG0_COM_MAX_BUF_NUM_S 20
+#define DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M (((1ULL << 11) - 1) << 20)
+
+#define DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_S 0
+#define DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_S 10
+#define DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_SBM_CFG2_SET_BUF_NUM_S 0
+#define DSAF_SBM_CFG2_SET_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG2_RESET_BUF_NUM_S 10
+#define DSAF_SBM_CFG2_RESET_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S 0
+#define DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S 10
+#define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_TBL_TCAM_ADDR_S 0
+#define DSAF_TBL_TCAM_ADDR_M ((1ULL << 9) - 1)
+
+#define DSAF_TBL_LINE_ADDR_S 0
+#define DSAF_TBL_LINE_ADDR_M ((1ULL << 15) - 1)
+
+#define DSAF_TBL_MCAST_CFG4_VM128_112_S 0
+#define DSAF_TBL_MCAST_CFG4_VM128_112_M (((1ULL << 7) - 1) << 0)
+#define DSAF_TBL_MCAST_CFG4_ITEM_VLD_S 7
+#define DSAF_TBL_MCAST_CFG4_OLD_EN_S 8
+
+#define DSAF_TBL_MCAST_CFG0_XGE5_0_S 0
+#define DSAF_TBL_MCAST_CFG0_XGE5_0_M (((1ULL << 6) - 1) << 0)
+#define DSAF_TBL_MCAST_CFG0_VM25_0_S 6
+#define DSAF_TBL_MCAST_CFG0_VM25_0_M (((1ULL << 26) - 1) << 6)
+
+#define DSAF_TBL_UCAST_CFG1_OUT_PORT_S 0
+#define DSAF_TBL_UCAST_CFG1_OUT_PORT_M (((1ULL << 8) - 1) << 0)
+#define DSAF_TBL_UCAST_CFG1_DVC_S 8
+#define DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S 9
+#define DSAF_TBL_UCAST_CFG1_ITEM_VLD_S 10
+#define DSAF_TBL_UCAST_CFG1_OLD_EN_S 11
+
+#define DSAF_TBL_LINE_CFG_OUT_PORT_S 0
+#define DSAF_TBL_LINE_CFG_OUT_PORT_M (((1ULL << 8) - 1) << 0)
+#define DSAF_TBL_LINE_CFG_DVC_S 8
+#define DSAF_TBL_LINE_CFG_MAC_DISCARD_S 9
+
+#define DSAF_TBL_PUL_OLD_RSLT_RE_S 0
+#define DSAF_TBL_PUL_MCAST_VLD_S 1
+#define DSAF_TBL_PUL_TCAM_DATA_VLD_S 2
+#define DSAF_TBL_PUL_UCAST_VLD_S 3
+#define DSAF_TBL_PUL_LINE_VLD_S 4
+#define DSAF_TBL_PUL_TCAM_LOAD_S 5
+#define DSAF_TBL_PUL_LINE_LOAD_S 6
+
+#define DSAF_TBL_DFX_LINE_LKUP_NUM_EN_S 0
+#define DSAF_TBL_DFX_UC_LKUP_NUM_EN_S 1
+#define DSAF_TBL_DFX_MC_LKUP_NUM_EN_S 2
+#define DSAF_TBL_DFX_BC_LKUP_NUM_EN_S 3
+#define DSAF_TBL_DFX_RAM_ERR_INJECT_EN_S 4
+
+#define DSAF_VOQ_BP_ALL_DOWNTHRD_S 0
+#define DSAF_VOQ_BP_ALL_DOWNTHRD_M (((1ULL << 10) - 1) << 0)
+#define DSAF_VOQ_BP_ALL_UPTHRD_S 10
+#define DSAF_VOQ_BP_ALL_UPTHRD_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_XGE_GE_WORK_MODE_S 0
+#define DSAF_XGE_GE_LOOPBACK_S 1
+
+#define DSAF_FC_XGE_TX_PAUSE_S 0
+#define DSAF_REGS_XGE_CNT_CAR_S 1
+
+#define PPE_CFG_QID_MODE_DEF_QID_S 0
+#define PPE_CFG_QID_MODE_DEF_QID_M (0xff << PPE_CFG_QID_MODE_DEF_QID_S)
+
+#define PPE_CFG_QID_MODE_CF_QID_MODE_S 8
+#define PPE_CFG_QID_MODE_CF_QID_MODE_M (0x7 << PPE_CFG_QID_MODE_CF_QID_MODE_S)
+
+#define PPE_CNT_CLR_CE_B 0
+#define PPE_CNT_CLR_SNAP_EN_B 1
+
+#define PPE_COMMON_CNT_CLR_CE_B 0
+#define PPE_COMMON_CNT_CLR_SNAP_EN_B 1
+
+#define GMAC_DUPLEX_TYPE_B 0
+
+#define GMAC_FC_TX_TIMER_S 0
+#define GMAC_FC_TX_TIMER_M 0xffff
+
+#define GMAC_MAX_FRM_SIZE_S 0
+#define GMAC_MAX_FRM_SIZE_M 0xffff
+
+#define GMAC_PORT_MODE_S 0
+#define GMAC_PORT_MODE_M 0xf
+
+#define GMAC_RGMII_1000M_DELAY_B 4
+#define GMAC_MII_TX_EDGE_SEL_B 5
+#define GMAC_FIFO_ERR_AUTO_RST_B 6
+#define GMAC_DBG_CLK_LOS_MSK_B 7
+
+#define GMAC_PORT_RX_EN_B 1
+#define GMAC_PORT_TX_EN_B 2
+
+#define GMAC_PAUSE_EN_RX_FDFC_B 0
+#define GMAC_PAUSE_EN_TX_FDFC_B 1
+#define GMAC_PAUSE_EN_TX_HDFC_B 2
+
+#define GMAC_SHORT_RUNTS_THR_S 0
+#define GMAC_SHORT_RUNTS_THR_M 0x1f
+
+#define GMAC_AN_NEG_STAT_FD_B 5
+#define GMAC_AN_NEG_STAT_HD_B 6
+#define GMAC_AN_NEG_STAT_RF1_DUPLIEX_B 12
+#define GMAC_AN_NEG_STAT_RF2_B 13
+
+#define GMAC_AN_NEG_STAT_NP_LNK_OK_B 15
+#define GMAC_AN_NEG_STAT_RX_SYNC_OK_B 20
+#define GMAC_AN_NEG_STAT_AN_DONE_B 21
+
+#define GMAC_AN_NEG_STAT_PS_S 7
+#define GMAC_AN_NEG_STAT_PS_M (0x3 << GMAC_AN_NEG_STAT_PS_S)
+
+#define GMAC_AN_NEG_STAT_SPEED_S 10
+#define GMAC_AN_NEG_STAT_SPEED_M (0x3 << GMAC_AN_NEG_STAT_SPEED_S)
+
+#define GMAC_TX_AN_EN_B 5
+#define GMAC_TX_CRC_ADD_B 6
+#define GMAC_TX_PAD_EN_B 7
+
+#define GMAC_LINE_LOOPBACK_B 0
+
+#define GMAC_LP_REG_CF_EXT_DRV_LP_B 1
+#define GMAC_LP_REG_CF2MI_LP_EN_B 2
+
+#define GMAC_MODE_CHANGE_EB_B 0
+
+#define GMAC_RECV_CTRL_STRIP_PAD_EN_B 3
+#define GMAC_RECV_CTRL_RUNT_PKT_EN_B 4
+
+#define GMAC_TX_LOOP_PKT_HIG_PRI_B 0
+#define GMAC_TX_LOOP_PKT_EN_B 1
+
+#define XGMAC_PORT_MODE_TX_S 0x0
+#define XGMAC_PORT_MODE_TX_M (0x3 << XGMAC_PORT_MODE_TX_S)
+#define XGMAC_PORT_MODE_TX_40G_B 0x3
+#define XGMAC_PORT_MODE_RX_S 0x4
+#define XGMAC_PORT_MODE_RX_M (0x3 << XGMAC_PORT_MODE_RX_S)
+#define XGMAC_PORT_MODE_RX_40G_B 0x7
+
+#define XGMAC_ENABLE_TX_B 0
+#define XGMAC_ENABLE_RX_B 1
+
+#define XGMAC_CTL_TX_FCS_B 0
+#define XGMAC_CTL_TX_PAD_B 1
+#define XGMAC_CTL_TX_PREAMBLE_TRANS_B 3
+#define XGMAC_CTL_TX_UNDER_MIN_ERR_B 4
+#define XGMAC_CTL_TX_TRUNCATE_B 5
+#define XGMAC_CTL_TX_1588_B 8
+#define XGMAC_CTL_TX_1731_B 9
+#define XGMAC_CTL_TX_PFC_B 10
+#define XGMAC_CTL_RX_FCS_B 16
+#define XGMAC_CTL_RX_FCS_STRIP_B 17
+#define XGMAC_CTL_RX_PREAMBLE_TRANS_B 19
+#define XGMAC_CTL_RX_UNDER_MIN_ERR_B 20
+#define XGMAC_CTL_RX_TRUNCATE_B 21
+#define XGMAC_CTL_RX_1588_B 24
+#define XGMAC_CTL_RX_1731_B 25
+#define XGMAC_CTL_RX_PFC_B 26
+
+#define XGMAC_PMA_FEC_CTL_TX_B 0
+#define XGMAC_PMA_FEC_CTL_RX_B 1
+#define XGMAC_PMA_FEC_CTL_ERR_EN 2
+#define XGMAC_PMA_FEC_CTL_ERR_SH 3
+
+#define XGMAC_PAUSE_CTL_TX_B 0
+#define XGMAC_PAUSE_CTL_RX_B 1
+#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
+#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
+
+static inline void dsaf_write_reg(void *base, u32 reg, u32 value)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(base);
+
+ writel(value, reg_addr + reg);
+}
+
+#define dsaf_write_dev(a, reg, value) \
+ dsaf_write_reg((a)->io_base, (reg), (value))
+
+static inline u32 dsaf_read_reg(u8 *base, u32 reg)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(base);
+
+ return readl(reg_addr + reg);
+}
+
+#define dsaf_read_dev(a, reg) \
+ dsaf_read_reg((a)->io_base, (reg))
+
+#define dsaf_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= (~(mask)); \
+ (origin) |= (((val) << (shift)) & (mask)); \
+ } while (0)
+
+#define dsaf_set_bit(origin, shift, val) \
+ dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
+
+static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
+ u32 val)
+{
+ u32 origin = dsaf_read_reg(base, reg);
+
+ dsaf_set_field(origin, mask, shift, val);
+ dsaf_write_reg(base, reg, origin);
+}
+
+#define dsaf_set_dev_field(dev, reg, mask, shift, val) \
+ dsaf_set_reg_field((dev)->io_base, (reg), (mask), (shift), (val))
+
+#define dsaf_set_dev_bit(dev, reg, bit, val) \
+ dsaf_set_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit), (val))
+
+#define dsaf_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+
+#define dsaf_get_bit(origin, shift) \
+ dsaf_get_field((origin), (1ull << (shift)), (shift))
+
+static inline u32 dsaf_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+{
+ u32 origin;
+
+ origin = dsaf_read_reg(base, reg);
+ return dsaf_get_field(origin, mask, shift);
+}
+
+#define dsaf_get_dev_field(dev, reg, mask, shift) \
+ dsaf_get_reg_field((dev)->io_base, (reg), (mask), (shift))
+
+#define dsaf_get_dev_bit(dev, reg, bit) \
+ dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
+
+#define dsaf_write_b(addr, data)\
+ writeb((data), (__iomem unsigned char *)(addr))
+#define dsaf_read_b(addr)\
+ readb((__iomem unsigned char *)(addr))
+
+#define hns_mac_reg_read64(drv, offset) \
+ readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset))))
+
+#endif /* _DSAF_REG_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
new file mode 100644
index 000000000000..802d55457f19
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -0,0 +1,837 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <asm-generic/io-64-nonatomic-hi-lo.h>
+#include <linux/of_mdio.h>
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_xgmac.h"
+#include "hns_dsaf_reg.h"
+
+static const struct mac_stats_string g_xgmac_stats_string[] = {
+ {"xgmac_tx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(tx_fragment_err)},
+ {"xgmac_tx_good_pkts_minto64", MAC_STATS_FIELD_OFF(tx_undersize)},
+ {"xgmac_tx_total_pkts_minto64", MAC_STATS_FIELD_OFF(tx_under_min_pkts)},
+ {"xgmac_tx_pkts_64", MAC_STATS_FIELD_OFF(tx_64bytes)},
+ {"xgmac_tx_pkts_65to127", MAC_STATS_FIELD_OFF(tx_65to127)},
+ {"xgmac_tx_pkts_128to255", MAC_STATS_FIELD_OFF(tx_128to255)},
+ {"xgmac_tx_pkts_256to511", MAC_STATS_FIELD_OFF(tx_256to511)},
+ {"xgmac_tx_pkts_512to1023", MAC_STATS_FIELD_OFF(tx_512to1023)},
+ {"xgmac_tx_pkts_1024to1518", MAC_STATS_FIELD_OFF(tx_1024to1518)},
+ {"xgmac_tx_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax)},
+ {"xgmac_tx_good_pkts_1519tomax",
+ MAC_STATS_FIELD_OFF(tx_1519tomax_good)},
+ {"xgmac_tx_good_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_oversize)},
+ {"xgmac_tx_bad_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_jabber_err)},
+ {"xgmac_tx_good_pkts_all", MAC_STATS_FIELD_OFF(tx_good_pkts)},
+ {"xgmac_tx_good_byte_all", MAC_STATS_FIELD_OFF(tx_good_bytes)},
+ {"xgmac_tx_total_pkt", MAC_STATS_FIELD_OFF(tx_total_pkts)},
+ {"xgmac_tx_total_byt", MAC_STATS_FIELD_OFF(tx_total_bytes)},
+ {"xgmac_tx_uc_pkt", MAC_STATS_FIELD_OFF(tx_uc_pkts)},
+ {"xgmac_tx_mc_pkt", MAC_STATS_FIELD_OFF(tx_mc_pkts)},
+ {"xgmac_tx_bc_pkt", MAC_STATS_FIELD_OFF(tx_bc_pkts)},
+ {"xgmac_tx_pause_frame_num", MAC_STATS_FIELD_OFF(tx_pfc_tc0)},
+ {"xgmac_tx_pfc_per_1pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc1)},
+ {"xgmac_tx_pfc_per_2pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc2)},
+ {"xgmac_tx_pfc_per_3pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc3)},
+ {"xgmac_tx_pfc_per_4pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc4)},
+ {"xgmac_tx_pfc_per_5pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc5)},
+ {"xgmac_tx_pfc_per_6pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc6)},
+ {"xgmac_tx_pfc_per_7pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc7)},
+ {"xgmac_tx_mac_ctrol_frame", MAC_STATS_FIELD_OFF(tx_ctrl)},
+ {"xgmac_tx_1731_pkts", MAC_STATS_FIELD_OFF(tx_1731_pkts)},
+ {"xgmac_tx_1588_pkts", MAC_STATS_FIELD_OFF(tx_1588_pkts)},
+ {"xgmac_rx_good_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_good_from_sw)},
+ {"xgmac_rx_bad_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_bad_from_sw)},
+ {"xgmac_tx_bad_pkt_64tomax", MAC_STATS_FIELD_OFF(tx_bad_pkts)},
+
+ {"xgmac_rx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(rx_fragment_err)},
+ {"xgmac_rx_good_pkts_minto64", MAC_STATS_FIELD_OFF(rx_undersize)},
+ {"xgmac_rx_total_pkts_minto64", MAC_STATS_FIELD_OFF(rx_under_min)},
+ {"xgmac_rx_pkt_64", MAC_STATS_FIELD_OFF(rx_64bytes)},
+ {"xgmac_rx_pkt_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
+ {"xgmac_rx_pkt_128to255", MAC_STATS_FIELD_OFF(rx_128to255)},
+ {"xgmac_rx_pkt_256to511", MAC_STATS_FIELD_OFF(rx_256to511)},
+ {"xgmac_rx_pkt_512to1023", MAC_STATS_FIELD_OFF(rx_512to1023)},
+ {"xgmac_rx_pkt_1024to1518", MAC_STATS_FIELD_OFF(rx_1024to1518)},
+ {"xgmac_rx_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax)},
+ {"xgmac_rx_good_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax_good)},
+ {"xgmac_rx_good_pkt_untramax", MAC_STATS_FIELD_OFF(rx_oversize)},
+ {"xgmac_rx_bad_pkt_untramax", MAC_STATS_FIELD_OFF(rx_jabber_err)},
+ {"xgmac_rx_good_pkt", MAC_STATS_FIELD_OFF(rx_good_pkts)},
+ {"xgmac_rx_good_byt", MAC_STATS_FIELD_OFF(rx_good_bytes)},
+ {"xgmac_rx_pkt", MAC_STATS_FIELD_OFF(rx_total_pkts)},
+ {"xgmac_rx_byt", MAC_STATS_FIELD_OFF(rx_total_bytes)},
+ {"xgmac_rx_uc_pkt", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
+ {"xgmac_rx_mc_pkt", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
+ {"xgmac_rx_bc_pkt", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
+ {"xgmac_rx_pause_frame_num", MAC_STATS_FIELD_OFF(rx_pfc_tc0)},
+ {"xgmac_rx_pfc_per_1pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc1)},
+ {"xgmac_rx_pfc_per_2pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc2)},
+ {"xgmac_rx_pfc_per_3pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc3)},
+ {"xgmac_rx_pfc_per_4pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc4)},
+ {"xgmac_rx_pfc_per_5pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc5)},
+ {"xgmac_rx_pfc_per_6pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc6)},
+ {"xgmac_rx_pfc_per_7pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc7)},
+ {"xgmac_rx_mac_control", MAC_STATS_FIELD_OFF(rx_unknown_ctrl)},
+ {"xgmac_tx_good_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_good_to_sw)},
+ {"xgmac_tx_bad_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_bad_to_sw)},
+ {"xgmac_rx_1731_pkt", MAC_STATS_FIELD_OFF(rx_1731_pkts)},
+ {"xgmac_rx_symbol_err_pkt", MAC_STATS_FIELD_OFF(rx_symbol_err)},
+ {"xgmac_rx_fcs_pkt", MAC_STATS_FIELD_OFF(rx_fcs_err)}
+};
+
+/**
+ *hns_xgmac_tx_enable - xgmac port tx enable
+ *@drv: mac driver
+ *@value: value of enable
+ */
+static void hns_xgmac_tx_enable(struct mac_driver *drv, u32 value)
+{
+ dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_TX_B, !!value);
+}
+
+/**
+ *hns_xgmac_rx_enable - xgmac port rx enable
+ *@drv: mac driver
+ *@value: value of enable
+ */
+static void hns_xgmac_rx_enable(struct mac_driver *drv, u32 value)
+{
+ dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_RX_B, !!value);
+}
+
+/**
+ *hns_xgmac_enable - enable xgmac port
+ *@drv: mac driver
+ *@mode: mode of mac port
+ */
+static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+ u32 port = drv->mac_id;
+
+ hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 1);
+ mdelay(10);
+
+ /*enable XGE rX/tX */
+ if (mode == MAC_COMM_MODE_TX) {
+ hns_xgmac_tx_enable(drv, 1);
+ } else if (mode == MAC_COMM_MODE_RX) {
+ hns_xgmac_rx_enable(drv, 1);
+ } else if (mode == MAC_COMM_MODE_RX_AND_TX) {
+ hns_xgmac_tx_enable(drv, 1);
+ hns_xgmac_rx_enable(drv, 1);
+ } else {
+ dev_err(drv->dev, "error mac mode:%d\n", mode);
+ }
+}
+
+/**
+ *hns_xgmac_disable - disable xgmac port
+ *@mac_drv: mac driver
+ *@mode: mode of mac port
+ */
+static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+ u32 port = drv->mac_id;
+
+ if (mode == MAC_COMM_MODE_TX) {
+ hns_xgmac_tx_enable(drv, 0);
+ } else if (mode == MAC_COMM_MODE_RX) {
+ hns_xgmac_rx_enable(drv, 0);
+ } else if (mode == MAC_COMM_MODE_RX_AND_TX) {
+ hns_xgmac_tx_enable(drv, 0);
+ hns_xgmac_rx_enable(drv, 0);
+ }
+
+ mdelay(10);
+ hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 0);
+}
+
+/**
+ *hns_xgmac_pma_fec_enable - xgmac PMA FEC enable
+ *@drv: mac driver
+ *@tx_value: tx value
+ *@rx_value: rx value
+ *return status
+ */
+static void hns_xgmac_pma_fec_enable(struct mac_driver *drv, u32 tx_value,
+ u32 rx_value)
+{
+ u32 origin = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG);
+
+ dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_TX_B, !!tx_value);
+ dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_RX_B, !!rx_value);
+ dsaf_write_dev(drv, XGMAC_PMA_FEC_CONTROL_REG, origin);
+}
+
+/* clr exc irq for xge*/
+static void hns_xgmac_exc_irq_en(struct mac_driver *drv, u32 en)
+{
+ u32 clr_vlue = 0xfffffffful;
+ u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/
+
+ dsaf_write_dev(drv, XGMAC_INT_STATUS_REG, clr_vlue);
+ dsaf_write_dev(drv, XGMAC_INT_ENABLE_REG, msk_vlue);
+}
+
+/**
+ *hns_xgmac_init - initialize XGE
+ *@mac_drv: mac driver
+ */
+static void hns_xgmac_init(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+ u32 port = drv->mac_id;
+
+ hns_dsaf_xge_srst_by_port(dsaf_dev, port, 0);
+ mdelay(100);
+ hns_dsaf_xge_srst_by_port(dsaf_dev, port, 1);
+
+ mdelay(100);
+ hns_xgmac_exc_irq_en(drv, 0);
+
+ hns_xgmac_pma_fec_enable(drv, 0x0, 0x0);
+
+ hns_xgmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
+}
+
+/**
+ *hns_xgmac_config_pad_and_crc - set xgmac pad and crc enable the same time
+ *@mac_drv: mac driver
+ *@newval:enable of pad and crc
+ */
+static void hns_xgmac_config_pad_and_crc(void *mac_drv, u8 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 origin = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+
+ dsaf_set_bit(origin, XGMAC_CTL_TX_PAD_B, !!newval);
+ dsaf_set_bit(origin, XGMAC_CTL_TX_FCS_B, !!newval);
+ dsaf_set_bit(origin, XGMAC_CTL_RX_FCS_B, !!newval);
+ dsaf_write_dev(drv, XGMAC_MAC_CONTROL_REG, origin);
+}
+
+/**
+ *hns_xgmac_pausefrm_cfg - set pause param about xgmac
+ *@mac_drv: mac driver
+ *@newval:enable of pad and crc
+ */
+static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 origin = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+
+ dsaf_set_bit(origin, XGMAC_PAUSE_CTL_TX_B, !!tx_en);
+ dsaf_set_bit(origin, XGMAC_PAUSE_CTL_RX_B, !!rx_en);
+ dsaf_write_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG, origin);
+}
+
+static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
+ u32 low_val = mac_addr[5] | (mac_addr[4] << 8)
+ | (mac_addr[3] << 16) | (mac_addr[2] << 24);
+ dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG, low_val);
+ dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG, high_val);
+}
+
+/**
+ *hns_xgmac_set_rx_ignore_pause_frames - set rx pause param about xgmac
+ *@mac_drv: mac driver
+ *@enable:enable rx pause param
+ */
+static void hns_xgmac_set_rx_ignore_pause_frames(void *mac_drv, u32 enable)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG,
+ XGMAC_PAUSE_CTL_RX_B, !!enable);
+}
+
+/**
+ *hns_xgmac_set_tx_auto_pause_frames - set tx pause param about xgmac
+ *@mac_drv: mac driver
+ *@enable:enable tx pause param
+ */
+static void hns_xgmac_set_tx_auto_pause_frames(void *mac_drv, u16 enable)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG,
+ XGMAC_PAUSE_CTL_TX_B, !!enable);
+
+ /*if enable is not zero ,set tx pause time */
+ if (enable)
+ dsaf_write_dev(drv, XGMAC_MAC_PAUSE_TIME_REG, enable);
+}
+
+/**
+ *hns_xgmac_get_id - get xgmac port id
+ *@mac_drv: mac driver
+ *@newval:xgmac max frame length
+ */
+static void hns_xgmac_get_id(void *mac_drv, u8 *mac_id)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *mac_id = drv->mac_id;
+}
+
+/**
+ *hns_xgmac_config_max_frame_length - set xgmac max frame length
+ *@mac_drv: mac driver
+ *@newval:xgmac max frame length
+ */
+static void hns_xgmac_config_max_frame_length(void *mac_drv, u16 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_write_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG, newval);
+}
+
+void hns_xgmac_update_stats(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct mac_hw_stats *hw_stats = &drv->mac_cb->hw_stats;
+
+ /* TX */
+ hw_stats->tx_fragment_err
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT);
+ hw_stats->tx_undersize
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE);
+ hw_stats->tx_under_min_pkts
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN);
+ hw_stats->tx_64bytes = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS);
+ hw_stats->tx_65to127
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS);
+ hw_stats->tx_128to255
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS);
+ hw_stats->tx_256to511
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS);
+ hw_stats->tx_512to1023
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS);
+ hw_stats->tx_1024to1518
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS);
+ hw_stats->tx_1519tomax
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS);
+ hw_stats->tx_1519tomax_good
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK);
+ hw_stats->tx_oversize = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE);
+ hw_stats->tx_jabber_err = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER);
+ hw_stats->tx_good_pkts = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS);
+ hw_stats->tx_good_bytes = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS);
+ hw_stats->tx_total_pkts = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS);
+ hw_stats->tx_total_bytes
+ = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS);
+ hw_stats->tx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS);
+ hw_stats->tx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS);
+ hw_stats->tx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS);
+ hw_stats->tx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS);
+ hw_stats->tx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS);
+ hw_stats->tx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS);
+ hw_stats->tx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS);
+ hw_stats->tx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS);
+ hw_stats->tx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS);
+ hw_stats->tx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS);
+ hw_stats->tx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS);
+ hw_stats->tx_ctrl = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS);
+ hw_stats->tx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS);
+ hw_stats->tx_1588_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS);
+ hw_stats->rx_good_from_sw
+ = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS);
+ hw_stats->rx_bad_from_sw
+ = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS);
+ hw_stats->tx_bad_pkts = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS);
+
+ /* RX */
+ hw_stats->rx_fragment_err
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT);
+ hw_stats->rx_undersize
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE);
+ hw_stats->rx_under_min
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN);
+ hw_stats->rx_64bytes = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS);
+ hw_stats->rx_65to127
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS);
+ hw_stats->rx_128to255
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS);
+ hw_stats->rx_256to511
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS);
+ hw_stats->rx_512to1023
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS);
+ hw_stats->rx_1024to1518
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS);
+ hw_stats->rx_1519tomax
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS);
+ hw_stats->rx_1519tomax_good
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK);
+ hw_stats->rx_oversize = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE);
+ hw_stats->rx_jabber_err = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER);
+ hw_stats->rx_good_pkts = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS);
+ hw_stats->rx_good_bytes = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS);
+ hw_stats->rx_total_pkts = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS);
+ hw_stats->rx_total_bytes
+ = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS);
+ hw_stats->rx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS);
+ hw_stats->rx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS);
+ hw_stats->rx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS);
+ hw_stats->rx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS);
+ hw_stats->rx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS);
+ hw_stats->rx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS);
+ hw_stats->rx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS);
+ hw_stats->rx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS);
+ hw_stats->rx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS);
+ hw_stats->rx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS);
+ hw_stats->rx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS);
+
+ hw_stats->rx_unknown_ctrl
+ = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS);
+ hw_stats->tx_good_to_sw
+ = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS);
+ hw_stats->tx_bad_to_sw
+ = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS);
+ hw_stats->rx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS);
+ hw_stats->rx_symbol_err
+ = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS);
+ hw_stats->rx_fcs_err = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS);
+}
+
+/**
+ *hns_xgmac_free - free xgmac driver
+ *@mac_drv: mac driver
+ */
+static void hns_xgmac_free(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+ u32 mac_id = drv->mac_id;
+
+ hns_dsaf_xge_srst_by_port(dsaf_dev, mac_id, 0);
+}
+
+/**
+ *hns_xgmac_get_info - get xgmac information
+ *@mac_drv: mac driver
+ *@mac_info:mac information
+ */
+static void hns_xgmac_get_info(void *mac_drv, struct mac_info *mac_info)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 pause_time, pause_ctrl, port_mode, ctrl_val;
+
+ ctrl_val = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+ mac_info->pad_and_crc_en = dsaf_get_bit(ctrl_val, XGMAC_CTL_TX_PAD_B);
+ mac_info->auto_neg = 0;
+
+ pause_time = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG);
+ mac_info->tx_pause_time = pause_time;
+
+ port_mode = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG);
+ mac_info->port_en = dsaf_get_field(port_mode, XGMAC_PORT_MODE_TX_M,
+ XGMAC_PORT_MODE_TX_S) &&
+ dsaf_get_field(port_mode, XGMAC_PORT_MODE_RX_M,
+ XGMAC_PORT_MODE_RX_S);
+ mac_info->duplex = 1;
+ mac_info->speed = MAC_SPEED_10000;
+
+ pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+ mac_info->rx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B);
+ mac_info->tx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B);
+}
+
+/**
+ *hns_xgmac_get_pausefrm_cfg - get xgmac pause param
+ *@mac_drv: mac driver
+ *@rx_en:xgmac rx pause enable
+ *@tx_en:xgmac tx pause enable
+ */
+static void hns_xgmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_en, u32 *tx_en)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 pause_ctrl;
+
+ pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+ *rx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B);
+ *tx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B);
+}
+
+/**
+ *hns_xgmac_get_link_status - get xgmac link status
+ *@mac_drv: mac driver
+ *@link_stat: xgmac link stat
+ */
+static void hns_xgmac_get_link_status(void *mac_drv, u32 *link_stat)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *link_stat = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG);
+}
+
+/**
+ *hns_xgmac_get_regs - dump xgmac regs
+ *@mac_drv: mac driver
+ *@cmd:ethtool cmd
+ *@data:data for value of regs
+ */
+static void hns_xgmac_get_regs(void *mac_drv, void *data)
+{
+ u32 i = 0;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 *regs = data;
+ u64 qtmp;
+
+ /* base config registers */
+ regs[0] = dsaf_read_dev(drv, XGMAC_INT_STATUS_REG);
+ regs[1] = dsaf_read_dev(drv, XGMAC_INT_ENABLE_REG);
+ regs[2] = dsaf_read_dev(drv, XGMAC_INT_SET_REG);
+ regs[3] = dsaf_read_dev(drv, XGMAC_IERR_U_INFO_REG);
+ regs[4] = dsaf_read_dev(drv, XGMAC_OVF_INFO_REG);
+ regs[5] = dsaf_read_dev(drv, XGMAC_OVF_CNT_REG);
+ regs[6] = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG);
+ regs[7] = dsaf_read_dev(drv, XGMAC_CLK_ENABLE_REG);
+ regs[8] = dsaf_read_dev(drv, XGMAC_RESET_REG);
+ regs[9] = dsaf_read_dev(drv, XGMAC_LINK_CONTROL_REG);
+ regs[10] = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG);
+
+ regs[11] = dsaf_read_dev(drv, XGMAC_SPARE_REG);
+ regs[12] = dsaf_read_dev(drv, XGMAC_SPARE_CNT_REG);
+ regs[13] = dsaf_read_dev(drv, XGMAC_MAC_ENABLE_REG);
+ regs[14] = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+ regs[15] = dsaf_read_dev(drv, XGMAC_MAC_IPG_REG);
+ regs[16] = dsaf_read_dev(drv, XGMAC_MAC_MSG_CRC_EN_REG);
+ regs[17] = dsaf_read_dev(drv, XGMAC_MAC_MSG_IMG_REG);
+ regs[18] = dsaf_read_dev(drv, XGMAC_MAC_MSG_FC_CFG_REG);
+ regs[19] = dsaf_read_dev(drv, XGMAC_MAC_MSG_TC_CFG_REG);
+ regs[20] = dsaf_read_dev(drv, XGMAC_MAC_PAD_SIZE_REG);
+ regs[21] = dsaf_read_dev(drv, XGMAC_MAC_MIN_PKT_SIZE_REG);
+ regs[22] = dsaf_read_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG);
+ regs[23] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+ regs[24] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG);
+ regs[25] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_GAP_REG);
+ regs[26] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG);
+ regs[27] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG);
+ regs[28] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_H_REG);
+ regs[29] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_L_REG);
+ regs[30] = dsaf_read_dev(drv, XGMAC_MAC_PFC_PRI_EN_REG);
+ regs[31] = dsaf_read_dev(drv, XGMAC_MAC_1588_CTRL_REG);
+ regs[32] = dsaf_read_dev(drv, XGMAC_MAC_1588_TX_PORT_DLY_REG);
+ regs[33] = dsaf_read_dev(drv, XGMAC_MAC_1588_RX_PORT_DLY_REG);
+ regs[34] = dsaf_read_dev(drv, XGMAC_MAC_1588_ASYM_DLY_REG);
+ regs[35] = dsaf_read_dev(drv, XGMAC_MAC_1588_ADJUST_CFG_REG);
+
+ regs[36] = dsaf_read_dev(drv, XGMAC_MAC_Y1731_ETH_TYPE_REG);
+ regs[37] = dsaf_read_dev(drv, XGMAC_MAC_MIB_CONTROL_REG);
+ regs[38] = dsaf_read_dev(drv, XGMAC_MAC_WAN_RATE_ADJUST_REG);
+ regs[39] = dsaf_read_dev(drv, XGMAC_MAC_TX_ERR_MARK_REG);
+ regs[40] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG);
+ regs[41] = dsaf_read_dev(drv, XGMAC_MAC_RX_LF_RF_STATUS_REG);
+ regs[42] = dsaf_read_dev(drv, XGMAC_MAC_TX_RUNT_PKT_CNT_REG);
+ regs[43] = dsaf_read_dev(drv, XGMAC_MAC_RX_RUNT_PKT_CNT_REG);
+ regs[44] = dsaf_read_dev(drv, XGMAC_MAC_RX_PREAM_ERR_PKT_CNT_REG);
+ regs[45] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_TERM_PKT_CNT_REG);
+ regs[46] = dsaf_read_dev(drv, XGMAC_MAC_TX_SN_MISMATCH_PKT_CNT_REG);
+ regs[47] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_MSG_CNT_REG);
+ regs[48] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_EFD_CNT_REG);
+ regs[49] = dsaf_read_dev(drv, XGMAC_MAC_ERR_INFO_REG);
+ regs[50] = dsaf_read_dev(drv, XGMAC_MAC_DBG_INFO_REG);
+
+ regs[51] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SYNC_THD_REG);
+ regs[52] = dsaf_read_dev(drv, XGMAC_PCS_STATUS1_REG);
+ regs[53] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS1_REG);
+ regs[54] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS2_REG);
+ regs[55] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_0_REG);
+ regs[56] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_1_REG);
+ regs[57] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_0_REG);
+ regs[58] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_1_REG);
+ regs[59] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_CONTROL_REG);
+ regs[60] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_ERR_CNT_REG);
+ regs[61] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO_REG);
+ regs[62] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO1_REG);
+ regs[63] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO2_REG);
+ regs[64] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO3_REG);
+
+ regs[65] = dsaf_read_dev(drv, XGMAC_PMA_ENABLE_REG);
+ regs[66] = dsaf_read_dev(drv, XGMAC_PMA_CONTROL_REG);
+ regs[67] = dsaf_read_dev(drv, XGMAC_PMA_SIGNAL_STATUS_REG);
+ regs[68] = dsaf_read_dev(drv, XGMAC_PMA_DBG_INFO_REG);
+ regs[69] = dsaf_read_dev(drv, XGMAC_PMA_FEC_ABILITY_REG);
+ regs[70] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG);
+ regs[71] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CORR_BLOCK_CNT__REG);
+ regs[72] = dsaf_read_dev(drv, XGMAC_PMA_FEC_UNCORR_BLOCK_CNT__REG);
+
+ /* status registers */
+#define hns_xgmac_cpy_q(p, q) \
+ do {\
+ *(p) = (u32)(q);\
+ *((p) + 1) = (u32)((q) >> 32);\
+ } while (0)
+
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT);
+ hns_xgmac_cpy_q(&regs[73], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE);
+ hns_xgmac_cpy_q(&regs[75], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN);
+ hns_xgmac_cpy_q(&regs[77], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS);
+ hns_xgmac_cpy_q(&regs[79], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS);
+ hns_xgmac_cpy_q(&regs[81], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS);
+ hns_xgmac_cpy_q(&regs[83], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS);
+ hns_xgmac_cpy_q(&regs[85], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS);
+ hns_xgmac_cpy_q(&regs[87], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS);
+ hns_xgmac_cpy_q(&regs[89], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS);
+ hns_xgmac_cpy_q(&regs[91], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK);
+ hns_xgmac_cpy_q(&regs[93], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE);
+ hns_xgmac_cpy_q(&regs[95], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER);
+ hns_xgmac_cpy_q(&regs[97], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS);
+ hns_xgmac_cpy_q(&regs[99], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS);
+ hns_xgmac_cpy_q(&regs[101], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS);
+ hns_xgmac_cpy_q(&regs[103], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS);
+ hns_xgmac_cpy_q(&regs[105], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS);
+ hns_xgmac_cpy_q(&regs[107], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS);
+ hns_xgmac_cpy_q(&regs[109], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS);
+ hns_xgmac_cpy_q(&regs[111], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[113], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[115], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[117], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[119], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[121], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[123], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[125], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[127], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS);
+ hns_xgmac_cpy_q(&regs[129], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS);
+ hns_xgmac_cpy_q(&regs[131], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS);
+ hns_xgmac_cpy_q(&regs[133], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS);
+ hns_xgmac_cpy_q(&regs[135], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS);
+ hns_xgmac_cpy_q(&regs[137], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS);
+ hns_xgmac_cpy_q(&regs[139], qtmp);
+
+ /* RX */
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT);
+ hns_xgmac_cpy_q(&regs[141], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE);
+ hns_xgmac_cpy_q(&regs[143], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN);
+ hns_xgmac_cpy_q(&regs[145], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS);
+ hns_xgmac_cpy_q(&regs[147], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS);
+ hns_xgmac_cpy_q(&regs[149], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS);
+ hns_xgmac_cpy_q(&regs[151], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS);
+ hns_xgmac_cpy_q(&regs[153], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS);
+ hns_xgmac_cpy_q(&regs[155], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS);
+ hns_xgmac_cpy_q(&regs[157], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS);
+ hns_xgmac_cpy_q(&regs[159], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK);
+ hns_xgmac_cpy_q(&regs[161], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE);
+ hns_xgmac_cpy_q(&regs[163], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER);
+ hns_xgmac_cpy_q(&regs[165], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS);
+ hns_xgmac_cpy_q(&regs[167], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS);
+ hns_xgmac_cpy_q(&regs[169], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS);
+ hns_xgmac_cpy_q(&regs[171], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS);
+ hns_xgmac_cpy_q(&regs[173], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS);
+ hns_xgmac_cpy_q(&regs[175], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS);
+ hns_xgmac_cpy_q(&regs[177], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS);
+ hns_xgmac_cpy_q(&regs[179], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[181], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[183], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[185], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[187], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[189], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[191], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[193], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[195], qtmp);
+
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS);
+ hns_xgmac_cpy_q(&regs[197], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS);
+ hns_xgmac_cpy_q(&regs[199], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS);
+ hns_xgmac_cpy_q(&regs[201], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS);
+ hns_xgmac_cpy_q(&regs[203], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS);
+ hns_xgmac_cpy_q(&regs[205], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS);
+ hns_xgmac_cpy_q(&regs[207], qtmp);
+
+ /* mark end of mac regs */
+ for (i = 208; i < 214; i++)
+ regs[i] = 0xaaaaaaaa;
+}
+
+/**
+ *hns_xgmac_get_stats - get xgmac statistic
+ *@mac_drv: mac driver
+ *@data:data for value of stats regs
+ */
+static void hns_xgmac_get_stats(void *mac_drv, u64 *data)
+{
+ u32 i;
+ u64 *buf = data;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct mac_hw_stats *hw_stats = NULL;
+
+ hw_stats = &drv->mac_cb->hw_stats;
+
+ for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) {
+ buf[i] = DSAF_STATS_READ(hw_stats,
+ g_xgmac_stats_string[i].offset);
+ }
+}
+
+/**
+ *hns_xgmac_get_strings - get xgmac strings name
+ *@stringset: type of values in data
+ *@data:data for value of string name
+ */
+static void hns_xgmac_get_strings(u32 stringset, u8 *data)
+{
+ char *buff = (char *)data;
+ u32 i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) {
+ snprintf(buff, ETH_GSTRING_LEN, g_xgmac_stats_string[i].desc);
+ buff = buff + ETH_GSTRING_LEN;
+ }
+}
+
+/**
+ *hns_xgmac_get_sset_count - get xgmac string set count
+ *@stringset: type of values in data
+ *return xgmac string set count
+ */
+static int hns_xgmac_get_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return ARRAY_SIZE(g_xgmac_stats_string);
+
+ return 0;
+}
+
+/**
+ *hns_xgmac_get_regs_count - get xgmac regs count
+ *return xgmac regs count
+ */
+static int hns_xgmac_get_regs_count(void)
+{
+ return ETH_XGMAC_DUMP_NUM;
+}
+
+void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
+{
+ struct mac_driver *mac_drv;
+
+ mac_drv = devm_kzalloc(mac_cb->dev, sizeof(*mac_drv), GFP_KERNEL);
+ if (!mac_drv)
+ return NULL;
+
+ mac_drv->mac_init = hns_xgmac_init;
+ mac_drv->mac_enable = hns_xgmac_enable;
+ mac_drv->mac_disable = hns_xgmac_disable;
+
+ mac_drv->mac_id = mac_param->mac_id;
+ mac_drv->mac_mode = mac_param->mac_mode;
+ mac_drv->io_base = mac_param->vaddr;
+ mac_drv->dev = mac_param->dev;
+ mac_drv->mac_cb = mac_cb;
+
+ mac_drv->set_mac_addr = hns_xgmac_set_pausefrm_mac_addr;
+ mac_drv->set_an_mode = NULL;
+ mac_drv->config_loopback = NULL;
+ mac_drv->config_pad_and_crc = hns_xgmac_config_pad_and_crc;
+ mac_drv->config_half_duplex = NULL;
+ mac_drv->set_rx_ignore_pause_frames =
+ hns_xgmac_set_rx_ignore_pause_frames;
+ mac_drv->mac_get_id = hns_xgmac_get_id;
+ mac_drv->mac_free = hns_xgmac_free;
+ mac_drv->adjust_link = NULL;
+ mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames;
+ mac_drv->config_max_frame_length = hns_xgmac_config_max_frame_length;
+ mac_drv->mac_pausefrm_cfg = hns_xgmac_pausefrm_cfg;
+ mac_drv->autoneg_stat = NULL;
+ mac_drv->get_info = hns_xgmac_get_info;
+ mac_drv->get_pause_enable = hns_xgmac_get_pausefrm_cfg;
+ mac_drv->get_link_status = hns_xgmac_get_link_status;
+ mac_drv->get_regs = hns_xgmac_get_regs;
+ mac_drv->get_ethtool_stats = hns_xgmac_get_stats;
+ mac_drv->get_sset_count = hns_xgmac_get_sset_count;
+ mac_drv->get_regs_count = hns_xgmac_get_regs_count;
+ mac_drv->get_strings = hns_xgmac_get_strings;
+ mac_drv->update_stats = hns_xgmac_update_stats;
+
+ return (void *)mac_drv;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h
new file mode 100644
index 000000000000..139f7297c7b4
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_XGMAC_H
+#define _HNS_XGMAC_H
+
+#define ETH_XGMAC_DUMP_NUM (214)
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
new file mode 100644
index 000000000000..08cef0dfb5db
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -0,0 +1,1642 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpumask.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+
+#include "hnae.h"
+#include "hns_enet.h"
+
+#define NIC_MAX_Q_PER_VF 16
+#define HNS_NIC_TX_TIMEOUT (5 * HZ)
+
+#define SERVICE_TIMER_HZ (1 * HZ)
+
+#define NIC_TX_CLEAN_MAX_NUM 256
+#define NIC_RX_CLEAN_MAX_NUM 64
+
+#define RCB_IRQ_NOT_INITED 0
+#define RCB_IRQ_INITED 1
+
+static void fill_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type)
+{
+ struct hnae_desc *desc = &ring->desc[ring->next_to_use];
+ struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ struct sk_buff *skb;
+ __be16 protocol;
+ u32 ip_offset;
+ u32 asid_bufnum_pid = 0;
+ u32 flag_ipoffset = 0;
+
+ desc_cb->priv = priv;
+ desc_cb->length = size;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+
+ desc->addr = cpu_to_le64(dma);
+ desc->tx.send_size = cpu_to_le16((u16)size);
+
+ /*config bd buffer end */
+ flag_ipoffset |= 1 << HNS_TXD_VLD_B;
+
+ asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
+
+ if (type == DESC_TYPE_SKB) {
+ skb = (struct sk_buff *)priv;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ protocol = skb->protocol;
+ ip_offset = ETH_HLEN;
+
+ /*if it is a SW VLAN check the next protocol*/
+ if (protocol == htons(ETH_P_8021Q)) {
+ ip_offset += VLAN_HLEN;
+ protocol = vlan_get_protocol(skb);
+ skb->protocol = protocol;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
+ /* check for tcp/udp header */
+ flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
+
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ /* ipv6 has not l3 cs, check for L4 header */
+ flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
+ }
+
+ flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
+ }
+ }
+
+ flag_ipoffset |= frag_end << HNS_TXD_FE_B;
+
+ desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
+ desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
+
+ ring_ptr_move_fw(ring, next_to_use);
+}
+
+static void unfill_desc(struct hnae_ring *ring)
+{
+ ring_ptr_move_bw(ring, next_to_use);
+}
+
+int hns_nic_net_xmit_hw(struct net_device *ndev,
+ struct sk_buff *skb,
+ struct hns_nic_ring_data *ring_data)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct device *dev = priv->dev;
+ struct hnae_ring *ring = ring_data->ring;
+ struct netdev_queue *dev_queue;
+ struct skb_frag_struct *frag;
+ int buf_num;
+ dma_addr_t dma;
+ int size, next_to_use;
+ int i, j;
+ struct sk_buff *new_skb;
+
+ assert(ring->max_desc_num_per_pkt <= ring->desc_num);
+
+ /* no. of segments (plus a header) */
+ buf_num = skb_shinfo(skb)->nr_frags + 1;
+
+ if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
+ if (ring_space(ring) < 1) {
+ ring->stats.tx_busy++;
+ goto out_net_tx_busy;
+ }
+
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ if (!new_skb) {
+ ring->stats.sw_err_cnt++;
+ netdev_err(ndev, "no memory to xmit!\n");
+ goto out_err_tx_ok;
+ }
+
+ dev_kfree_skb_any(skb);
+ skb = new_skb;
+ buf_num = 1;
+ assert(skb_shinfo(skb)->nr_frags == 1);
+ } else if (buf_num > ring_space(ring)) {
+ ring->stats.tx_busy++;
+ goto out_net_tx_busy;
+ }
+ next_to_use = ring->next_to_use;
+
+ /* fill the first part */
+ size = skb_headlen(skb);
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma)) {
+ netdev_err(ndev, "TX head DMA map failed\n");
+ ring->stats.sw_err_cnt++;
+ goto out_err_tx_ok;
+ }
+ fill_desc(ring, skb, size, dma, buf_num == 1 ? 1 : 0, buf_num,
+ DESC_TYPE_SKB);
+
+ /* fill the fragments */
+ for (i = 1; i < buf_num; i++) {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+ size = skb_frag_size(frag);
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma)) {
+ netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
+ ring->stats.sw_err_cnt++;
+ goto out_map_frag_fail;
+ }
+ fill_desc(ring, skb_frag_page(frag), size, dma,
+ buf_num - 1 == i ? 1 : 0, buf_num, DESC_TYPE_PAGE);
+ }
+
+ /*complete translate all packets*/
+ dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
+ netdev_tx_sent_queue(dev_queue, skb->len);
+
+ wmb(); /* commit all data before submit */
+ assert(skb->queue_mapping < priv->ae_handle->q_num);
+ hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
+ ring->stats.tx_pkts++;
+ ring->stats.tx_bytes += skb->len;
+
+ return NETDEV_TX_OK;
+
+out_map_frag_fail:
+
+ for (j = i - 1; j > 0; j--) {
+ unfill_desc(ring);
+ next_to_use = ring->next_to_use;
+ dma_unmap_page(dev, ring->desc_cb[next_to_use].dma,
+ ring->desc_cb[next_to_use].length,
+ DMA_TO_DEVICE);
+ }
+
+ unfill_desc(ring);
+ next_to_use = ring->next_to_use;
+ dma_unmap_single(dev, ring->desc_cb[next_to_use].dma,
+ ring->desc_cb[next_to_use].length, DMA_TO_DEVICE);
+
+out_err_tx_ok:
+
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+
+out_net_tx_busy:
+
+ netif_stop_subqueue(ndev, skb->queue_mapping);
+
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it.
+ */
+ smp_mb();
+ return NETDEV_TX_BUSY;
+}
+
+/**
+ * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
+ * @data: pointer to the start of the headers
+ * @max: total length of section to find headers in
+ *
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, GRO, and RSC offloads. The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
+ **/
+static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
+ unsigned int max_size)
+{
+ unsigned char *network;
+ u8 hlen;
+
+ /* this should never happen, but better safe than sorry */
+ if (max_size < ETH_HLEN)
+ return max_size;
+
+ /* initialize network frame pointer */
+ network = data;
+
+ /* set first protocol and move network header forward */
+ network += ETH_HLEN;
+
+ /* handle any vlan tag if present */
+ if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
+ == HNS_RX_FLAG_VLAN_PRESENT) {
+ if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
+ return max_size;
+
+ network += VLAN_HLEN;
+ }
+
+ /* handle L3 protocols */
+ if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
+ == HNS_RX_FLAG_L3ID_IPV4) {
+ if ((typeof(max_size))(network - data) >
+ (max_size - sizeof(struct iphdr)))
+ return max_size;
+
+ /* access ihl as a u8 to avoid unaligned access on ia64 */
+ hlen = (network[0] & 0x0F) << 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct iphdr))
+ return network - data;
+
+ /* record next protocol if header is present */
+ } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
+ == HNS_RX_FLAG_L3ID_IPV6) {
+ if ((typeof(max_size))(network - data) >
+ (max_size - sizeof(struct ipv6hdr)))
+ return max_size;
+
+ /* record next protocol */
+ hlen = sizeof(struct ipv6hdr);
+ } else {
+ return network - data;
+ }
+
+ /* relocate pointer to start of L4 header */
+ network += hlen;
+
+ /* finally sort out TCP/UDP */
+ if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
+ == HNS_RX_FLAG_L4ID_TCP) {
+ if ((typeof(max_size))(network - data) >
+ (max_size - sizeof(struct tcphdr)))
+ return max_size;
+
+ /* access doff as a u8 to avoid unaligned access on ia64 */
+ hlen = (network[12] & 0xF0) >> 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct tcphdr))
+ return network - data;
+
+ network += hlen;
+ } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
+ == HNS_RX_FLAG_L4ID_UDP) {
+ if ((typeof(max_size))(network - data) >
+ (max_size - sizeof(struct udphdr)))
+ return max_size;
+
+ network += sizeof(struct udphdr);
+ }
+
+ /* If everything has gone correctly network should be the
+ * data section of the packet and will be the end of the header.
+ * If not then it probably represents the end of the last recognized
+ * header.
+ */
+ if ((typeof(max_size))(network - data) < max_size)
+ return network - data;
+ else
+ return max_size;
+}
+
+static void
+hns_nic_reuse_page(struct hnae_desc_cb *desc_cb, int tsize, int last_offset)
+{
+ /* avoid re-using remote pages,flag default unreuse */
+ if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) {
+ /* move offset up to the next cache line */
+ desc_cb->page_offset += tsize;
+
+ if (desc_cb->page_offset <= last_offset) {
+ desc_cb->reuse_flag = 1;
+ /* bump ref count on page before it is given*/
+ get_page(desc_cb->priv);
+ }
+ }
+}
+
+static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
+ struct sk_buff **out_skb, int *out_bnum)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct net_device *ndev = ring_data->napi.dev;
+ struct sk_buff *skb;
+ struct hnae_desc *desc;
+ struct hnae_desc_cb *desc_cb;
+ unsigned char *va;
+ int bnum, length, size, i, truesize, last_offset;
+ int pull_len;
+ u32 bnum_flag;
+
+ last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+ length = le16_to_cpu(desc->rx.pkt_len);
+ bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
+ bnum = hnae_get_field(bnum_flag, HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
+ *out_bnum = bnum;
+ va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
+
+ skb = *out_skb = napi_alloc_skb(&ring_data->napi, HNS_RX_HEAD_SIZE);
+ if (unlikely(!skb)) {
+ netdev_err(ndev, "alloc rx skb fail\n");
+ ring->stats.sw_err_cnt++;
+ return -ENOMEM;
+ }
+
+ if (length <= HNS_RX_HEAD_SIZE) {
+ memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+
+ /* we can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
+ desc_cb->reuse_flag = 1;
+ else /* this page cannot be reused so discard it */
+ put_page(desc_cb->priv);
+
+ ring_ptr_move_fw(ring, next_to_clean);
+
+ if (unlikely(bnum != 1)) { /* check err*/
+ *out_bnum = 1;
+ goto out_bnum_err;
+ }
+ } else {
+ ring->stats.seg_pkt_cnt++;
+
+ pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
+ memcpy(__skb_put(skb, pull_len), va,
+ ALIGN(pull_len, sizeof(long)));
+
+ size = le16_to_cpu(desc->rx.size);
+ truesize = ALIGN(size, L1_CACHE_BYTES);
+ skb_add_rx_frag(skb, 0, desc_cb->priv,
+ desc_cb->page_offset + pull_len,
+ size - pull_len, truesize - pull_len);
+
+ hns_nic_reuse_page(desc_cb, truesize, last_offset);
+ ring_ptr_move_fw(ring, next_to_clean);
+
+ if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
+ *out_bnum = 1;
+ goto out_bnum_err;
+ }
+ for (i = 1; i < bnum; i++) {
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+ size = le16_to_cpu(desc->rx.size);
+ truesize = ALIGN(size, L1_CACHE_BYTES);
+ skb_add_rx_frag(skb, i, desc_cb->priv,
+ desc_cb->page_offset,
+ size, truesize);
+
+ hns_nic_reuse_page(desc_cb, truesize, last_offset);
+ ring_ptr_move_fw(ring, next_to_clean);
+ }
+ }
+
+ /* check except process, free skb and jump the desc */
+ if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
+out_bnum_err:
+ *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
+ netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
+ bnum, ring->max_desc_num_per_pkt,
+ length, (int)MAX_SKB_FRAGS,
+ ((u64 *)desc)[0], ((u64 *)desc)[1]);
+ ring->stats.err_bd_num++;
+ dev_kfree_skb_any(skb);
+ return -EDOM;
+ }
+
+ bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
+
+ if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
+ netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
+ ((u64 *)desc)[0], ((u64 *)desc)[1]);
+ ring->stats.non_vld_descs++;
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ if (unlikely((!desc->rx.pkt_len) ||
+ hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
+ ring->stats.err_pkt_len++;
+ dev_kfree_skb_any(skb);
+ return -EFAULT;
+ }
+
+ if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
+ ring->stats.l2_err++;
+ dev_kfree_skb_any(skb);
+ return -EFAULT;
+ }
+
+ ring->stats.rx_pkts++;
+ ring->stats.rx_bytes += skb->len;
+
+ if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L3E_B) ||
+ hnae_get_bit(bnum_flag, HNS_RXD_L4E_B))) {
+ ring->stats.l3l4_csum_err++;
+ return 0;
+ }
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return 0;
+}
+
+static void
+hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
+{
+ int i, ret;
+ struct hnae_desc_cb res_cbs;
+ struct hnae_desc_cb *desc_cb;
+ struct hnae_ring *ring = ring_data->ring;
+ struct net_device *ndev = ring_data->napi.dev;
+
+ for (i = 0; i < cleand_count; i++) {
+ desc_cb = &ring->desc_cb[ring->next_to_use];
+ if (desc_cb->reuse_flag) {
+ ring->stats.reuse_pg_cnt++;
+ hnae_reuse_buffer(ring, ring->next_to_use);
+ } else {
+ ret = hnae_reserve_buffer_map(ring, &res_cbs);
+ if (ret) {
+ ring->stats.sw_err_cnt++;
+ netdev_err(ndev, "hnae reserve buffer map failed.\n");
+ break;
+ }
+ hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
+ }
+
+ ring_ptr_move_fw(ring, next_to_use);
+ }
+
+ wmb(); /* make all data has been write before submit */
+ writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
+}
+
+/* return error number for error or number of desc left to take
+ */
+static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
+ struct sk_buff *skb)
+{
+ struct net_device *ndev = ring_data->napi.dev;
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ (void)napi_gro_receive(&ring_data->napi, skb);
+ ndev->last_rx = jiffies;
+}
+
+static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
+ int budget, void *v)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct sk_buff *skb;
+ int num, bnum, ex_num;
+#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
+ int recv_pkts, recv_bds, clean_count, err;
+
+ num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+ rmb(); /* make sure num taken effect before the other data is touched */
+
+ recv_pkts = 0, recv_bds = 0, clean_count = 0;
+recv:
+ while (recv_pkts < budget && recv_bds < num) {
+ /* reuse or realloc buffers*/
+ if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
+ hns_nic_alloc_rx_buffers(ring_data, clean_count);
+ clean_count = 0;
+ }
+
+ /* poll one pkg*/
+ err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
+ if (unlikely(!skb)) /* this fault cannot be repaired */
+ break;
+
+ recv_bds += bnum;
+ clean_count += bnum;
+ if (unlikely(err)) { /* do jump the err */
+ recv_pkts++;
+ continue;
+ }
+
+ /* do update ip stack process*/
+ ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
+ ring_data, skb);
+ recv_pkts++;
+ }
+
+ /* make all data has been write before submit */
+ if (clean_count > 0) {
+ hns_nic_alloc_rx_buffers(ring_data, clean_count);
+ clean_count = 0;
+ }
+
+ if (recv_pkts < budget) {
+ ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+ rmb(); /*complete read rx ring bd number*/
+ if (ex_num > 0) {
+ num += ex_num;
+ goto recv;
+ }
+ }
+
+ return recv_pkts;
+}
+
+static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int num = 0;
+
+ /* for hardware bug fixed */
+ num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+
+ if (num > 0) {
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring_data->ring, 1);
+
+ napi_schedule(&ring_data->napi);
+ }
+}
+
+static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
+ int *bytes, int *pkts)
+{
+ struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+
+ (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
+ (*bytes) += desc_cb->length;
+ /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
+ hnae_free_buffer_detach(ring, ring->next_to_clean);
+
+ ring_ptr_move_fw(ring, next_to_clean);
+}
+
+static int is_valid_clean_head(struct hnae_ring *ring, int h)
+{
+ int u = ring->next_to_use;
+ int c = ring->next_to_clean;
+
+ if (unlikely(h > ring->desc_num))
+ return 0;
+
+ assert(u > 0 && u < ring->desc_num);
+ assert(c > 0 && c < ring->desc_num);
+ assert(u != c && h != c); /* must be checked before call this func */
+
+ return u > c ? (h > c && h <= u) : (h > c || h <= u);
+}
+
+/* netif_tx_lock will turn down the performance, set only when necessary */
+#ifdef CONFIG_NET_POLL_CONTROLLER
+#define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
+#define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
+#else
+#define NETIF_TX_LOCK(ndev)
+#define NETIF_TX_UNLOCK(ndev)
+#endif
+/* reclaim all desc in one budget
+ * return error or number of desc left
+ */
+static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
+ int budget, void *v)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct net_device *ndev = ring_data->napi.dev;
+ struct netdev_queue *dev_queue;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ int head;
+ int bytes, pkts;
+
+ NETIF_TX_LOCK(ndev);
+
+ head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+ rmb(); /* make sure head is ready before touch any data */
+
+ if (is_ring_empty(ring) || head == ring->next_to_clean) {
+ NETIF_TX_UNLOCK(ndev);
+ return 0; /* no data to poll */
+ }
+
+ if (!is_valid_clean_head(ring, head)) {
+ netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
+ ring->next_to_use, ring->next_to_clean);
+ ring->stats.io_err_cnt++;
+ NETIF_TX_UNLOCK(ndev);
+ return -EIO;
+ }
+
+ bytes = 0;
+ pkts = 0;
+ while (head != ring->next_to_clean)
+ hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
+
+ NETIF_TX_UNLOCK(ndev);
+
+ dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+ netdev_tx_completed_queue(dev_queue, pkts, bytes);
+
+ if (unlikely(pkts && netif_carrier_ok(ndev) &&
+ (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (netif_tx_queue_stopped(dev_queue) &&
+ !test_bit(NIC_STATE_DOWN, &priv->state)) {
+ netif_tx_wake_queue(dev_queue);
+ ring->stats.restart_queue++;
+ }
+ }
+ return 0;
+}
+
+static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int head = ring->next_to_clean;
+
+ /* for hardware bug fixed */
+ head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+
+ if (head != ring->next_to_clean) {
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring_data->ring, 1);
+
+ napi_schedule(&ring_data->napi);
+ }
+}
+
+static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct net_device *ndev = ring_data->napi.dev;
+ struct netdev_queue *dev_queue;
+ int head;
+ int bytes, pkts;
+
+ NETIF_TX_LOCK(ndev);
+
+ head = ring->next_to_use; /* ntu :soft setted ring position*/
+ bytes = 0;
+ pkts = 0;
+ while (head != ring->next_to_clean)
+ hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
+
+ NETIF_TX_UNLOCK(ndev);
+
+ dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+ netdev_tx_reset_queue(dev_queue);
+}
+
+static int hns_nic_common_poll(struct napi_struct *napi, int budget)
+{
+ struct hns_nic_ring_data *ring_data =
+ container_of(napi, struct hns_nic_ring_data, napi);
+ int clean_complete = ring_data->poll_one(
+ ring_data, budget, ring_data->ex_process);
+
+ if (clean_complete >= 0 && clean_complete < budget) {
+ napi_complete(napi);
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring_data->ring, 0);
+
+ ring_data->fini_process(ring_data);
+ }
+
+ return clean_complete;
+}
+
+static irqreturn_t hns_irq_handle(int irq, void *dev)
+{
+ struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
+
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring_data->ring, 1);
+ napi_schedule(&ring_data->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
+ *@ndev: net device
+ */
+static void hns_nic_adjust_link(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ h->dev->ops->adjust_link(h, ndev->phydev->speed, ndev->phydev->duplex);
+}
+
+/**
+ *hns_nic_init_phy - init phy
+ *@ndev: net device
+ *@h: ae handle
+ * Return 0 on success, negative on failure
+ */
+int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct phy_device *phy_dev = NULL;
+
+ if (!h->phy_node)
+ return 0;
+
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
+ phy_dev = of_phy_connect(ndev, h->phy_node,
+ hns_nic_adjust_link, 0, h->phy_if);
+ else
+ phy_dev = of_phy_attach(ndev, h->phy_node, 0, h->phy_if);
+
+ if (unlikely(!phy_dev) || IS_ERR(phy_dev))
+ return !phy_dev ? -ENODEV : PTR_ERR(phy_dev);
+
+ phy_dev->supported &= h->if_support;
+ phy_dev->advertising = phy_dev->supported;
+
+ if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
+ phy_dev->autoneg = false;
+
+ priv->phy = phy_dev;
+
+ return 0;
+}
+
+static int hns_nic_ring_open(struct net_device *netdev, int idx)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ napi_enable(&priv->ring_data[idx].napi);
+
+ enable_irq(priv->ring_data[idx].ring->irq);
+ h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
+
+ return 0;
+}
+
+static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct sockaddr *mac_addr = p;
+ int ret;
+
+ if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
+ if (ret) {
+ netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
+
+ return 0;
+}
+
+void hns_nic_update_stats(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ h->dev->ops->update_stats(h, &netdev->stats);
+}
+
+/* set mac addr if it is configed. or leave it to the AE driver */
+static void hns_init_mac_addr(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct device_node *node = priv->dev->of_node;
+ const void *mac_addr_temp;
+
+ mac_addr_temp = of_get_mac_address(node);
+ if (mac_addr_temp && is_valid_ether_addr(mac_addr_temp)) {
+ memcpy(ndev->dev_addr, mac_addr_temp, ndev->addr_len);
+ } else {
+ eth_hw_addr_random(ndev);
+ dev_warn(priv->dev, "No valid mac, use random mac %pM",
+ ndev->dev_addr);
+ }
+}
+
+static void hns_nic_ring_close(struct net_device *netdev, int idx)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
+ disable_irq(priv->ring_data[idx].ring->irq);
+
+ napi_disable(&priv->ring_data[idx].napi);
+}
+
+static int hns_nic_init_irq(struct hns_nic_priv *priv)
+{
+ struct hnae_handle *h = priv->ae_handle;
+ struct hns_nic_ring_data *rd;
+ int i;
+ int ret;
+ int cpu;
+ cpumask_t mask;
+
+ for (i = 0; i < h->q_num * 2; i++) {
+ rd = &priv->ring_data[i];
+
+ if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
+ break;
+
+ snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
+ "%s-%s%d", priv->netdev->name,
+ (i < h->q_num ? "tx" : "rx"), rd->queue_index);
+
+ rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
+
+ ret = request_irq(rd->ring->irq,
+ hns_irq_handle, 0, rd->ring->ring_name, rd);
+ if (ret) {
+ netdev_err(priv->netdev, "request irq(%d) fail\n",
+ rd->ring->irq);
+ return ret;
+ }
+ disable_irq(rd->ring->irq);
+ rd->ring->irq_init_flag = RCB_IRQ_INITED;
+
+ /*set cpu affinity*/
+ if (cpu_online(rd->queue_index)) {
+ cpumask_clear(&mask);
+ cpu = rd->queue_index;
+ cpumask_set_cpu(cpu, &mask);
+ irq_set_affinity_hint(rd->ring->irq, &mask);
+ }
+ }
+
+ return 0;
+}
+
+static int hns_nic_net_up(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int i, j, k;
+ int ret;
+
+ ret = hns_nic_init_irq(priv);
+ if (ret != 0) {
+ netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < h->q_num * 2; i++) {
+ ret = hns_nic_ring_open(ndev, i);
+ if (ret)
+ goto out_has_some_queues;
+ }
+
+ for (k = 0; k < h->q_num; k++)
+ h->dev->ops->toggle_queue_status(h->qs[k], 1);
+
+ ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
+ if (ret)
+ goto out_set_mac_addr_err;
+
+ ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
+ if (ret)
+ goto out_start_err;
+
+ if (priv->phy)
+ phy_start(priv->phy);
+
+ clear_bit(NIC_STATE_DOWN, &priv->state);
+ (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
+
+ return 0;
+
+out_start_err:
+ netif_stop_queue(ndev);
+out_set_mac_addr_err:
+ for (k = 0; k < h->q_num; k++)
+ h->dev->ops->toggle_queue_status(h->qs[k], 0);
+out_has_some_queues:
+ for (j = i - 1; j >= 0; j--)
+ hns_nic_ring_close(ndev, j);
+
+ set_bit(NIC_STATE_DOWN, &priv->state);
+
+ return ret;
+}
+
+static void hns_nic_net_down(struct net_device *ndev)
+{
+ int i;
+ struct hnae_ae_ops *ops;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
+ return;
+
+ (void)del_timer_sync(&priv->service_timer);
+ netif_tx_stop_all_queues(ndev);
+ netif_carrier_off(ndev);
+ netif_tx_disable(ndev);
+ priv->link = 0;
+
+ if (priv->phy)
+ phy_stop(priv->phy);
+
+ ops = priv->ae_handle->dev->ops;
+
+ if (ops->stop)
+ ops->stop(priv->ae_handle);
+
+ netif_tx_stop_all_queues(ndev);
+
+ for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
+ hns_nic_ring_close(ndev, i);
+ hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
+
+ /* clean tx buffers*/
+ hns_nic_tx_clr_all_bufs(priv->ring_data + i);
+ }
+}
+
+void hns_nic_net_reset(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *handle = priv->ae_handle;
+
+ while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
+ usleep_range(1000, 2000);
+
+ (void)hnae_reinit_handle(handle);
+
+ clear_bit(NIC_STATE_RESETTING, &priv->state);
+}
+
+void hns_nic_net_reinit(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+
+ priv->netdev->trans_start = jiffies;
+ while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
+ usleep_range(1000, 2000);
+
+ hns_nic_net_down(netdev);
+ hns_nic_net_reset(netdev);
+ (void)hns_nic_net_up(netdev);
+ clear_bit(NIC_STATE_REINITING, &priv->state);
+}
+
+static int hns_nic_net_open(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int ret;
+
+ if (test_bit(NIC_STATE_TESTING, &priv->state))
+ return -EBUSY;
+
+ priv->link = 0;
+ netif_carrier_off(ndev);
+
+ ret = netif_set_real_num_tx_queues(ndev, h->q_num);
+ if (ret < 0) {
+ netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
+ ret);
+ return ret;
+ }
+
+ ret = netif_set_real_num_rx_queues(ndev, h->q_num);
+ if (ret < 0) {
+ netdev_err(ndev,
+ "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ ret = hns_nic_net_up(ndev);
+ if (ret) {
+ netdev_err(ndev,
+ "hns net up fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hns_nic_net_stop(struct net_device *ndev)
+{
+ hns_nic_net_down(ndev);
+
+ return 0;
+}
+
+static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
+static void hns_nic_net_timeout(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ hns_tx_timeout_reset(priv);
+}
+
+static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct phy_device *phy_dev = priv->phy;
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ if (!phy_dev)
+ return -ENOTSUPP;
+
+ return phy_mii_ioctl(phy_dev, ifr, cmd);
+}
+
+/* use only for netconsole to poll with the device without interrupt */
+#ifdef CONFIG_NET_POLL_CONTROLLER
+void hns_nic_poll_controller(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ for (i = 0; i < priv->ae_handle->q_num * 2; i++)
+ napi_schedule(&priv->ring_data[i].napi);
+ local_irq_restore(flags);
+}
+#endif
+
+static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ assert(skb->queue_mapping < ndev->ae_handle->q_num);
+ ret = hns_nic_net_xmit_hw(ndev, skb,
+ &tx_ring_data(priv, skb->queue_mapping));
+ if (ret == NETDEV_TX_OK) {
+ ndev->trans_start = jiffies;
+ ndev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_packets++;
+ }
+ return (netdev_tx_t)ret;
+}
+
+static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int ret;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if (new_mtu < 68)
+ return -EINVAL;
+
+ if (!h->dev->ops->set_mtu)
+ return -ENOTSUPP;
+
+ if (netif_running(ndev)) {
+ (void)hns_nic_net_stop(ndev);
+ msleep(100);
+
+ ret = h->dev->ops->set_mtu(h, new_mtu);
+ if (ret)
+ netdev_err(ndev, "set mtu fail, return value %d\n",
+ ret);
+
+ if (hns_nic_net_open(ndev))
+ netdev_err(ndev, "hns net open fail\n");
+ } else {
+ ret = h->dev->ops->set_mtu(h, new_mtu);
+ }
+
+ if (!ret)
+ ndev->mtu = new_mtu;
+
+ return ret;
+}
+
+/**
+ * nic_set_multicast_list - set mutl mac address
+ * @netdev: net device
+ * @p: mac address
+ *
+ * return void
+ */
+void hns_set_multicast_list(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct netdev_hw_addr *ha = NULL;
+
+ if (!h) {
+ netdev_err(ndev, "hnae handle is null\n");
+ return;
+ }
+
+ if (h->dev->ops->set_mc_addr) {
+ netdev_for_each_mc_addr(ha, ndev)
+ if (h->dev->ops->set_mc_addr(h, ha->addr))
+ netdev_err(ndev, "set multicast fail\n");
+ }
+}
+
+void hns_nic_set_rx_mode(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ if (h->dev->ops->set_promisc_mode) {
+ if (ndev->flags & IFF_PROMISC)
+ h->dev->ops->set_promisc_mode(h, 1);
+ else
+ h->dev->ops->set_promisc_mode(h, 0);
+ }
+
+ hns_set_multicast_list(ndev);
+}
+
+struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ int idx = 0;
+ u64 tx_bytes = 0;
+ u64 rx_bytes = 0;
+ u64 tx_pkts = 0;
+ u64 rx_pkts = 0;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ for (idx = 0; idx < h->q_num; idx++) {
+ tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
+ tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
+ rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
+ rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
+ }
+
+ stats->tx_bytes = tx_bytes;
+ stats->tx_packets = tx_pkts;
+ stats->rx_bytes = rx_bytes;
+ stats->rx_packets = rx_pkts;
+
+ stats->rx_errors = ndev->stats.rx_errors;
+ stats->multicast = ndev->stats.multicast;
+ stats->rx_length_errors = ndev->stats.rx_length_errors;
+ stats->rx_crc_errors = ndev->stats.rx_crc_errors;
+ stats->rx_missed_errors = ndev->stats.rx_missed_errors;
+
+ stats->tx_errors = ndev->stats.tx_errors;
+ stats->rx_dropped = ndev->stats.rx_dropped;
+ stats->tx_dropped = ndev->stats.tx_dropped;
+ stats->collisions = ndev->stats.collisions;
+ stats->rx_over_errors = ndev->stats.rx_over_errors;
+ stats->rx_frame_errors = ndev->stats.rx_frame_errors;
+ stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
+ stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
+ stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
+ stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
+ stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
+ stats->tx_window_errors = ndev->stats.tx_window_errors;
+ stats->rx_compressed = ndev->stats.rx_compressed;
+ stats->tx_compressed = ndev->stats.tx_compressed;
+
+ return stats;
+}
+
+static const struct net_device_ops hns_nic_netdev_ops = {
+ .ndo_open = hns_nic_net_open,
+ .ndo_stop = hns_nic_net_stop,
+ .ndo_start_xmit = hns_nic_net_xmit,
+ .ndo_tx_timeout = hns_nic_net_timeout,
+ .ndo_set_mac_address = hns_nic_net_set_mac_address,
+ .ndo_change_mtu = hns_nic_change_mtu,
+ .ndo_do_ioctl = hns_nic_do_ioctl,
+ .ndo_get_stats64 = hns_nic_get_stats64,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = hns_nic_poll_controller,
+#endif
+ .ndo_set_rx_mode = hns_nic_set_rx_mode,
+};
+
+static void hns_nic_update_link_status(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+
+ struct hnae_handle *h = priv->ae_handle;
+ int state = 1;
+
+ if (priv->phy) {
+ if (!genphy_update_link(priv->phy))
+ state = priv->phy->link;
+ else
+ state = 0;
+ }
+ state = state && h->dev->ops->get_status(h);
+
+ if (state != priv->link) {
+ if (state) {
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+ netdev_info(netdev, "link up\n");
+ } else {
+ netif_carrier_off(netdev);
+ netdev_info(netdev, "link down\n");
+ }
+ priv->link = state;
+ }
+}
+
+/* for dumping key regs*/
+static void hns_nic_dump(struct hns_nic_priv *priv)
+{
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+ u32 *data, reg_num, i;
+
+ if (ops->get_regs_len && ops->get_regs) {
+ reg_num = ops->get_regs_len(priv->ae_handle);
+ reg_num = (reg_num + 3ul) & ~3ul;
+ data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
+ if (data) {
+ ops->get_regs(priv->ae_handle, data);
+ for (i = 0; i < reg_num; i += 4)
+ pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, data[i], data[i + 1],
+ data[i + 2], data[i + 3]);
+ kfree(data);
+ }
+ }
+
+ for (i = 0; i < h->q_num; i++) {
+ pr_info("tx_queue%d_next_to_clean:%d\n",
+ i, h->qs[i]->tx_ring.next_to_clean);
+ pr_info("tx_queue%d_next_to_use:%d\n",
+ i, h->qs[i]->tx_ring.next_to_use);
+ pr_info("rx_queue%d_next_to_clean:%d\n",
+ i, h->qs[i]->rx_ring.next_to_clean);
+ pr_info("rx_queue%d_next_to_use:%d\n",
+ i, h->qs[i]->rx_ring.next_to_use);
+ }
+}
+
+/* for resetting suntask*/
+static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
+{
+ enum hnae_port_type type = priv->ae_handle->port_type;
+
+ if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
+ return;
+ clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
+
+ /* If we're already down, removing or resetting, just bail */
+ if (test_bit(NIC_STATE_DOWN, &priv->state) ||
+ test_bit(NIC_STATE_REMOVING, &priv->state) ||
+ test_bit(NIC_STATE_RESETTING, &priv->state))
+ return;
+
+ hns_nic_dump(priv);
+ netdev_info(priv->netdev, "Reset %s port\n",
+ (type == HNAE_PORT_DEBUG ? "debug" : "business"));
+
+ rtnl_lock();
+ /* put off any impending NetWatchDogTimeout */
+ priv->netdev->trans_start = jiffies;
+
+ if (type == HNAE_PORT_DEBUG)
+ hns_nic_net_reinit(priv->netdev);
+ rtnl_unlock();
+}
+
+/* for doing service complete*/
+static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
+{
+ assert(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
+
+ smp_mb__before_atomic();
+ clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
+}
+
+static void hns_nic_service_task(struct work_struct *work)
+{
+ struct hns_nic_priv *priv
+ = container_of(work, struct hns_nic_priv, service_task);
+ struct hnae_handle *h = priv->ae_handle;
+
+ hns_nic_update_link_status(priv->netdev);
+ h->dev->ops->update_led_status(h);
+ hns_nic_update_stats(priv->netdev);
+
+ hns_nic_reset_subtask(priv);
+ hns_nic_service_event_complete(priv);
+}
+
+static void hns_nic_task_schedule(struct hns_nic_priv *priv)
+{
+ if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
+ !test_bit(NIC_STATE_REMOVING, &priv->state) &&
+ !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
+ (void)schedule_work(&priv->service_task);
+}
+
+static void hns_nic_service_timer(unsigned long data)
+{
+ struct hns_nic_priv *priv = (struct hns_nic_priv *)data;
+
+ (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
+
+ hns_nic_task_schedule(priv);
+}
+
+/**
+ * hns_tx_timeout_reset - initiate reset due to Tx timeout
+ * @priv: driver private struct
+ **/
+static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
+{
+ /* Do the reset outside of interrupt context */
+ if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
+ set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
+ netdev_warn(priv->netdev,
+ "initiating reset due to tx timeout(%llu,0x%lx)\n",
+ priv->tx_timeout_count, priv->state);
+ priv->tx_timeout_count++;
+ hns_nic_task_schedule(priv);
+ }
+}
+
+static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
+{
+ struct hnae_handle *h = priv->ae_handle;
+ struct hns_nic_ring_data *rd;
+ int i;
+
+ if (h->q_num > NIC_MAX_Q_PER_VF) {
+ netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
+ return -EINVAL;
+ }
+
+ priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2,
+ GFP_KERNEL);
+ if (!priv->ring_data)
+ return -ENOMEM;
+
+ for (i = 0; i < h->q_num; i++) {
+ rd = &priv->ring_data[i];
+ rd->queue_index = i;
+ rd->ring = &h->qs[i]->tx_ring;
+ rd->poll_one = hns_nic_tx_poll_one;
+ rd->fini_process = hns_nic_tx_fini_pro;
+
+ netif_napi_add(priv->netdev, &rd->napi,
+ hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
+ rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+ }
+ for (i = h->q_num; i < h->q_num * 2; i++) {
+ rd = &priv->ring_data[i];
+ rd->queue_index = i - h->q_num;
+ rd->ring = &h->qs[i - h->q_num]->rx_ring;
+ rd->poll_one = hns_nic_rx_poll_one;
+ rd->ex_process = hns_nic_rx_up_pro;
+ rd->fini_process = hns_nic_rx_fini_pro;
+
+ netif_napi_add(priv->netdev, &rd->napi,
+ hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
+ rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+ }
+
+ return 0;
+}
+
+static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
+{
+ struct hnae_handle *h = priv->ae_handle;
+ int i;
+
+ for (i = 0; i < h->q_num * 2; i++) {
+ netif_napi_del(&priv->ring_data[i].napi);
+ if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
+ irq_set_affinity_hint(priv->ring_data[i].ring->irq,
+ NULL);
+ free_irq(priv->ring_data[i].ring->irq,
+ &priv->ring_data[i]);
+ }
+
+ priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+ }
+ kfree(priv->ring_data);
+}
+
+static int hns_nic_try_get_ae(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h;
+ int ret;
+
+ h = hnae_get_handle(&priv->netdev->dev,
+ priv->ae_name, priv->port_id, NULL);
+ if (IS_ERR_OR_NULL(h)) {
+ ret = PTR_ERR(h);
+ dev_dbg(priv->dev, "has not handle, register notifier!\n");
+ goto out;
+ }
+ priv->ae_handle = h;
+
+ ret = hns_nic_init_phy(ndev, h);
+ if (ret) {
+ dev_err(priv->dev, "probe phy device fail!\n");
+ goto out_init_phy;
+ }
+
+ ret = hns_nic_init_ring_data(priv);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_init_ring_data;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(priv->dev, "probe register netdev fail!\n");
+ goto out_reg_ndev_fail;
+ }
+ return 0;
+
+out_reg_ndev_fail:
+ hns_nic_uninit_ring_data(priv);
+ priv->ring_data = NULL;
+out_init_phy:
+out_init_ring_data:
+ hnae_put_handle(priv->ae_handle);
+ priv->ae_handle = NULL;
+out:
+ return ret;
+}
+
+static int hns_nic_notifier_action(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct hns_nic_priv *priv =
+ container_of(nb, struct hns_nic_priv, notifier_block);
+
+ assert(action == HNAE_AE_REGISTER);
+
+ if (!hns_nic_try_get_ae(priv->netdev)) {
+ hnae_unregister_notifier(&priv->notifier_block);
+ priv->notifier_block.notifier_call = NULL;
+ }
+ return 0;
+}
+
+static int hns_nic_dev_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *ndev;
+ struct hns_nic_priv *priv;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+
+ priv = netdev_priv(ndev);
+ priv->dev = dev;
+ priv->netdev = ndev;
+
+ if (of_device_is_compatible(node, "hisilicon,hns-nic-v2"))
+ priv->enet_ver = AE_VERSION_2;
+ else
+ priv->enet_ver = AE_VERSION_1;
+
+ ret = of_property_read_string(node, "ae-name", &priv->ae_name);
+ if (ret)
+ goto out_read_string_fail;
+
+ ret = of_property_read_u32(node, "port-id", &priv->port_id);
+ if (ret)
+ goto out_read_string_fail;
+
+ hns_init_mac_addr(ndev);
+
+ ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->netdev_ops = &hns_nic_netdev_ops;
+ hns_ethtool_set_ops(ndev);
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GRO;
+ ndev->vlan_features |=
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+ ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
+
+ SET_NETDEV_DEV(ndev, dev);
+
+ if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
+ dev_dbg(dev, "set mask to 64bit\n");
+ else
+ dev_err(dev, "set mask to 32bit fail!\n");
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(ndev);
+
+ setup_timer(&priv->service_timer, hns_nic_service_timer,
+ (unsigned long)priv);
+ INIT_WORK(&priv->service_task, hns_nic_service_task);
+
+ set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
+ clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
+ set_bit(NIC_STATE_DOWN, &priv->state);
+
+ if (hns_nic_try_get_ae(priv->netdev)) {
+ priv->notifier_block.notifier_call = hns_nic_notifier_action;
+ ret = hnae_register_notifier(&priv->notifier_block);
+ if (ret) {
+ dev_err(dev, "register notifier fail!\n");
+ goto out_notify_fail;
+ }
+ dev_dbg(dev, "has not handle, register notifier!\n");
+ }
+
+ return 0;
+
+out_notify_fail:
+ (void)cancel_work_sync(&priv->service_task);
+out_read_string_fail:
+ free_netdev(ndev);
+ return ret;
+}
+
+static int hns_nic_dev_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ if (ndev->reg_state != NETREG_UNINITIALIZED)
+ unregister_netdev(ndev);
+
+ if (priv->ring_data)
+ hns_nic_uninit_ring_data(priv);
+ priv->ring_data = NULL;
+
+ if (priv->phy)
+ phy_disconnect(priv->phy);
+ priv->phy = NULL;
+
+ if (!IS_ERR_OR_NULL(priv->ae_handle))
+ hnae_put_handle(priv->ae_handle);
+ priv->ae_handle = NULL;
+ if (priv->notifier_block.notifier_call)
+ hnae_unregister_notifier(&priv->notifier_block);
+ priv->notifier_block.notifier_call = NULL;
+
+ set_bit(NIC_STATE_REMOVING, &priv->state);
+ (void)cancel_work_sync(&priv->service_task);
+
+ free_netdev(ndev);
+ return 0;
+}
+
+static const struct of_device_id hns_enet_of_match[] = {
+ {.compatible = "hisilicon,hns-nic-v1",},
+ {.compatible = "hisilicon,hns-nic-v2",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hns_enet_of_match);
+
+static struct platform_driver hns_nic_dev_driver = {
+ .driver = {
+ .name = "hns-nic",
+ .of_match_table = hns_enet_of_match,
+ },
+ .probe = hns_nic_dev_probe,
+ .remove = hns_nic_dev_remove,
+};
+
+module_platform_driver(hns_nic_dev_driver);
+
+MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
+MODULE_AUTHOR("Hisilicon, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:hns-nic");
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
new file mode 100644
index 000000000000..dae0ed19ac6d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __HNS_ENET_H
+#define __HNS_ENET_H
+
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+#include "hnae.h"
+
+enum hns_nic_state {
+ NIC_STATE_TESTING = 0,
+ NIC_STATE_RESETTING,
+ NIC_STATE_REINITING,
+ NIC_STATE_DOWN,
+ NIC_STATE_DISABLED,
+ NIC_STATE_REMOVING,
+ NIC_STATE_SERVICE_INITED,
+ NIC_STATE_SERVICE_SCHED,
+ NIC_STATE2_RESET_REQUESTED,
+ NIC_STATE_MAX
+};
+
+struct hns_nic_ring_data {
+ struct hnae_ring *ring;
+ struct napi_struct napi;
+ int queue_index;
+ int (*poll_one)(struct hns_nic_ring_data *, int, void *);
+ void (*ex_process)(struct hns_nic_ring_data *, struct sk_buff *);
+ void (*fini_process)(struct hns_nic_ring_data *);
+};
+
+struct hns_nic_priv {
+ const char *ae_name;
+ u32 enet_ver;
+ u32 port_id;
+ int phy_mode;
+ int phy_led_val;
+ struct phy_device *phy;
+ struct net_device *netdev;
+ struct device *dev;
+ struct hnae_handle *ae_handle;
+
+ /* the cb for nic to manage the ring buffer, the first half of the
+ * array is for tx_ring and vice versa for the second half
+ */
+ struct hns_nic_ring_data *ring_data;
+
+ /* The most recently read link state */
+ int link;
+ u64 tx_timeout_count;
+
+ unsigned long state;
+
+ struct timer_list service_timer;
+
+ struct work_struct service_task;
+
+ struct notifier_block notifier_block;
+};
+
+#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
+#define rx_ring_data(priv, idx) \
+ ((priv)->ring_data[(priv)->ae_handle->q_num + (idx)])
+
+void hns_ethtool_set_ops(struct net_device *ndev);
+void hns_nic_net_reset(struct net_device *ndev);
+void hns_nic_net_reinit(struct net_device *netdev);
+int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h);
+int hns_nic_net_xmit_hw(struct net_device *ndev,
+ struct sk_buff *skb,
+ struct hns_nic_ring_data *ring_data);
+
+#endif /**__HNS_ENET_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
new file mode 100644
index 000000000000..a0332129970b
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -0,0 +1,1214 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "hns_enet.h"
+
+#define HNS_PHY_PAGE_MDIX 0
+#define HNS_PHY_PAGE_LED 3
+#define HNS_PHY_PAGE_COPPER 0
+
+#define HNS_PHY_PAGE_REG 22 /* Page Selection Reg. */
+#define HNS_PHY_CSC_REG 16 /* Copper Specific Control Register */
+#define HNS_PHY_CSS_REG 17 /* Copper Specific Status Register */
+#define HNS_LED_FC_REG 16 /* LED Function Control Reg. */
+#define HNS_LED_PC_REG 17 /* LED Polarity Control Reg. */
+
+#define HNS_LED_FORCE_ON 9
+#define HNS_LED_FORCE_OFF 8
+
+#define HNS_CHIP_VERSION 660
+#define HNS_NET_STATS_CNT 26
+
+#define PHY_MDIX_CTRL_S (5)
+#define PHY_MDIX_CTRL_M (3 << PHY_MDIX_CTRL_S)
+
+#define PHY_MDIX_STATUS_B (6)
+#define PHY_SPEED_DUP_RESOLVE_B (11)
+
+/**
+ *hns_nic_get_link - get current link status
+ *@net_dev: net_device
+ *retuen 0 - success , negative --fail
+ */
+static u32 hns_nic_get_link(struct net_device *net_dev)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ u32 link_stat = priv->link;
+ struct hnae_handle *h;
+
+ assert(priv && priv->ae_handle);
+ h = priv->ae_handle;
+
+ if (priv->phy) {
+ if (!genphy_update_link(priv->phy))
+ link_stat = priv->phy->link;
+ else
+ link_stat = 0;
+ }
+
+ if (h->dev && h->dev->ops && h->dev->ops->get_status)
+ link_stat = link_stat && h->dev->ops->get_status(h);
+ else
+ link_stat = 0;
+
+ return link_stat;
+}
+
+static void hns_get_mdix_mode(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ int mdix_ctrl, mdix, retval, is_resolved;
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct phy_device *phy_dev = priv->phy;
+
+ if (!phy_dev || !phy_dev->bus) {
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ return;
+ }
+
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_MDIX);
+
+ retval = mdiobus_read(phy_dev->bus, phy_dev->addr, HNS_PHY_CSC_REG);
+ mdix_ctrl = hnae_get_field(retval, PHY_MDIX_CTRL_M, PHY_MDIX_CTRL_S);
+
+ retval = mdiobus_read(phy_dev->bus, phy_dev->addr, HNS_PHY_CSS_REG);
+ mdix = hnae_get_bit(retval, PHY_MDIX_STATUS_B);
+ is_resolved = hnae_get_bit(retval, PHY_SPEED_DUP_RESOLVE_B);
+
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_COPPER);
+
+ switch (mdix_ctrl) {
+ case 0x0:
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
+ break;
+ case 0x1:
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
+ break;
+ case 0x3:
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ break;
+ default:
+ cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ break;
+ }
+
+ if (!is_resolved)
+ cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ else if (mdix)
+ cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ else
+ cmd->eth_tp_mdix = ETH_TP_MDI;
+}
+
+/**
+ *hns_nic_get_settings - implement ethtool get settings
+ *@net_dev: net_device
+ *@cmd: ethtool_cmd
+ *retuen 0 - success , negative --fail
+ */
+static int hns_nic_get_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_handle *h;
+ u32 link_stat;
+ int ret;
+ u8 duplex;
+ u16 speed;
+
+ if (!priv || !priv->ae_handle)
+ return -ESRCH;
+
+ h = priv->ae_handle;
+ if (!h->dev || !h->dev->ops || !h->dev->ops->get_info)
+ return -ESRCH;
+
+ ret = h->dev->ops->get_info(h, NULL, &speed, &duplex);
+ if (ret < 0) {
+ netdev_err(net_dev, "%s get_info error!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* When there is no phy, autoneg is off. */
+ cmd->autoneg = false;
+ ethtool_cmd_speed_set(cmd, speed);
+ cmd->duplex = duplex;
+
+ if (priv->phy)
+ (void)phy_ethtool_gset(priv->phy, cmd);
+
+ link_stat = hns_nic_get_link(net_dev);
+ if (!link_stat) {
+ ethtool_cmd_speed_set(cmd, (u32)SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
+ }
+
+ if (cmd->autoneg)
+ cmd->advertising |= ADVERTISED_Autoneg;
+
+ cmd->supported |= h->if_support;
+ if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ cmd->supported |= SUPPORTED_TP;
+ cmd->advertising |= ADVERTISED_1000baseT_Full;
+ } else if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
+ cmd->supported |= SUPPORTED_FIBRE;
+ cmd->advertising |= ADVERTISED_10000baseKR_Full;
+ }
+
+ if (h->port_type == HNAE_PORT_SERVICE) {
+ cmd->port = PORT_FIBRE;
+ cmd->supported |= SUPPORTED_Pause;
+ } else {
+ cmd->port = PORT_TP;
+ }
+
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->mdio_support = (ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22);
+ hns_get_mdix_mode(net_dev, cmd);
+
+ return 0;
+}
+
+/**
+ *hns_nic_set_settings - implement ethtool set settings
+ *@net_dev: net_device
+ *@cmd: ethtool_cmd
+ *retuen 0 - success , negative --fail
+ */
+static int hns_nic_set_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_handle *h;
+ u32 speed;
+
+ if (!netif_running(net_dev))
+ return -ESRCH;
+
+ if (!priv || !priv->ae_handle || !priv->ae_handle->dev ||
+ !priv->ae_handle->dev->ops)
+ return -ENODEV;
+
+ h = priv->ae_handle;
+ speed = ethtool_cmd_speed(cmd);
+
+ if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
+ if (cmd->autoneg == AUTONEG_ENABLE || speed != SPEED_10000 ||
+ cmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ } else if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ if (!priv->phy && cmd->autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
+ if (speed == SPEED_1000 && cmd->duplex == DUPLEX_HALF)
+ return -EINVAL;
+ if (priv->phy)
+ return phy_ethtool_sset(priv->phy, cmd);
+
+ if ((speed != SPEED_10 && speed != SPEED_100 &&
+ speed != SPEED_1000) || (cmd->duplex != DUPLEX_HALF &&
+ cmd->duplex != DUPLEX_FULL))
+ return -EINVAL;
+ } else {
+ netdev_err(net_dev, "Not supported!");
+ return -ENOTSUPP;
+ }
+
+ if (h->dev->ops->adjust_link) {
+ h->dev->ops->adjust_link(h, (int)speed, cmd->duplex);
+ return 0;
+ }
+
+ netdev_err(net_dev, "Not supported!");
+ return -ENOTSUPP;
+}
+
+static const char hns_nic_test_strs[][ETH_GSTRING_LEN] = {
+ "Mac Loopback test",
+ "Serdes Loopback test",
+ "Phy Loopback test"
+};
+
+static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
+{
+#define COPPER_CONTROL_REG 0
+#define PHY_LOOP_BACK BIT(14)
+ u16 val = 0;
+
+ if (phy_dev->is_c45) /* c45 branch adding for XGE PHY */
+ return -ENOTSUPP;
+
+ if (en) {
+ /* speed : 1000M */
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ HNS_PHY_PAGE_REG, 2);
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 21, 0x1046);
+ /* Force Master */
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 9, 0x1F00);
+ /* Soft-reset */
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 0, 0x9140);
+ /* If autoneg disabled,two soft-reset operations */
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 0, 0x9140);
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 22, 0xFA);
+
+ /* Default is 0x0400 */
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 1, 0x418);
+
+ /* Force 1000M Link, Default is 0x0200 */
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 7, 0x20C);
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 22, 0);
+
+ /* Enable MAC loop-back */
+ val = (u16)mdiobus_read(phy_dev->bus, phy_dev->addr,
+ COPPER_CONTROL_REG);
+ val |= PHY_LOOP_BACK;
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ COPPER_CONTROL_REG, val);
+ } else {
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 22, 0xFA);
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 1, 0x400);
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 7, 0x200);
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ 22, 0);
+
+ val = (u16)mdiobus_read(phy_dev->bus, phy_dev->addr,
+ COPPER_CONTROL_REG);
+ val &= ~PHY_LOOP_BACK;
+ (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+ COPPER_CONTROL_REG, val);
+ }
+ return 0;
+}
+
+static int __lb_setup(struct net_device *ndev,
+ enum hnae_loop loop)
+{
+ int ret = 0;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct phy_device *phy_dev = priv->phy;
+ struct hnae_handle *h = priv->ae_handle;
+
+ switch (loop) {
+ case MAC_INTERNALLOOP_PHY:
+ if ((phy_dev) && (!phy_dev->is_c45))
+ ret = hns_nic_config_phy_loopback(phy_dev, 0x1);
+ break;
+ case MAC_INTERNALLOOP_MAC:
+ if ((h->dev->ops->set_loopback) &&
+ (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII))
+ ret = h->dev->ops->set_loopback(h, loop, 0x1);
+ break;
+ case MAC_INTERNALLOOP_SERDES:
+ if (h->dev->ops->set_loopback)
+ ret = h->dev->ops->set_loopback(h, loop, 0x1);
+ break;
+ case MAC_LOOP_NONE:
+ if ((phy_dev) && (!phy_dev->is_c45))
+ ret |= hns_nic_config_phy_loopback(phy_dev, 0x0);
+
+ if (h->dev->ops->set_loopback) {
+ if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
+ ret |= h->dev->ops->set_loopback(h,
+ MAC_INTERNALLOOP_MAC, 0x0);
+
+ ret |= h->dev->ops->set_loopback(h,
+ MAC_INTERNALLOOP_SERDES, 0x0);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int __lb_up(struct net_device *ndev,
+ enum hnae_loop loop_mode)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int speed, duplex;
+ int ret;
+
+ hns_nic_net_reset(ndev);
+
+ if (priv->phy) {
+ phy_disconnect(priv->phy);
+ msleep(100);
+
+ ret = hns_nic_init_phy(ndev, h);
+ if (ret)
+ return ret;
+ }
+
+ ret = __lb_setup(ndev, loop_mode);
+ if (ret)
+ return ret;
+
+ msleep(100);
+
+ ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
+ if (ret)
+ return ret;
+
+ if (priv->phy)
+ phy_start(priv->phy);
+
+ /* link adjust duplex*/
+ if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
+ speed = 1000;
+ else
+ speed = 10000;
+ duplex = 1;
+
+ h->dev->ops->adjust_link(h, speed, duplex);
+
+ return 0;
+}
+
+static void __lb_other_process(struct hns_nic_ring_data *ring_data,
+ struct sk_buff *skb)
+{
+ struct net_device *ndev;
+ struct hnae_ring *ring;
+ struct netdev_queue *dev_queue;
+ struct sk_buff *new_skb;
+ unsigned int frame_size;
+ int check_ok;
+ u32 i;
+ char buff[33]; /* 32B data and the last character '\0' */
+
+ if (!ring_data) { /* Just for doing create frame*/
+ frame_size = skb->len;
+ memset(skb->data, 0xFF, frame_size);
+ frame_size &= ~1ul;
+ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size / 2 + 10], 0xBE,
+ frame_size / 2 - 11);
+ memset(&skb->data[frame_size / 2 + 12], 0xAF,
+ frame_size / 2 - 13);
+ return;
+ }
+
+ ring = ring_data->ring;
+ ndev = ring_data->napi.dev;
+ if (is_tx_ring(ring)) { /* for tx queue reset*/
+ dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+ netdev_tx_reset_queue(dev_queue);
+ return;
+ }
+
+ frame_size = skb->len;
+ frame_size &= ~1ul;
+ /* for mutl buffer*/
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ dev_kfree_skb_any(skb);
+ skb = new_skb;
+
+ check_ok = 0;
+ if (*(skb->data + 10) == 0xFF) { /* for rx check frame*/
+ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF))
+ check_ok = 1;
+ }
+
+ if (check_ok) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
+ } else {
+ ndev->stats.rx_frame_errors++;
+ for (i = 0; i < skb->len; i++) {
+ snprintf(buff + i % 16 * 2, 3, /* tailing \0*/
+ "%02x", *(skb->data + i));
+ if ((i % 16 == 15) || (i == skb->len - 1))
+ pr_info("%s\n", buff);
+ }
+ }
+ dev_kfree_skb_any(skb);
+}
+
+static int __lb_clean_rings(struct hns_nic_priv *priv,
+ int ringid0, int ringid1, int budget)
+{
+ int i, ret;
+ struct hns_nic_ring_data *ring_data;
+ struct net_device *ndev = priv->netdev;
+ unsigned long rx_packets = ndev->stats.rx_packets;
+ unsigned long rx_bytes = ndev->stats.rx_bytes;
+ unsigned long rx_frame_errors = ndev->stats.rx_frame_errors;
+
+ for (i = ringid0; i <= ringid1; i++) {
+ ring_data = &priv->ring_data[i];
+ (void)ring_data->poll_one(ring_data,
+ budget, __lb_other_process);
+ }
+ ret = (int)(ndev->stats.rx_packets - rx_packets);
+ ndev->stats.rx_packets = rx_packets;
+ ndev->stats.rx_bytes = rx_bytes;
+ ndev->stats.rx_frame_errors = rx_frame_errors;
+ return ret;
+}
+
+/**
+ * nic_run_loopback_test - run loopback test
+ * @nic_dev: net device
+ * @loopback_type: loopback type
+ */
+static int __lb_run_test(struct net_device *ndev,
+ enum hnae_loop loop_mode)
+{
+#define NIC_LB_TEST_PKT_NUM_PER_CYCLE 1
+#define NIC_LB_TEST_RING_ID 0
+#define NIC_LB_TEST_FRAME_SIZE 128
+/* nic loopback test err */
+#define NIC_LB_TEST_NO_MEM_ERR 1
+#define NIC_LB_TEST_TX_CNT_ERR 2
+#define NIC_LB_TEST_RX_CNT_ERR 3
+#define NIC_LB_TEST_RX_PKG_ERR 4
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int i, j, lc, good_cnt, ret_val = 0;
+ unsigned int size;
+ netdev_tx_t tx_ret_val;
+ struct sk_buff *skb;
+
+ size = NIC_LB_TEST_FRAME_SIZE;
+ /* allocate test skb */
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return NIC_LB_TEST_NO_MEM_ERR;
+
+ /* place data into test skb */
+ (void)skb_put(skb, size);
+ __lb_other_process(NULL, skb);
+ skb->queue_mapping = NIC_LB_TEST_RING_ID;
+
+ lc = 1;
+ for (j = 0; j < lc; j++) {
+ /* reset count of good packets */
+ good_cnt = 0;
+ /* place 64 packets on the transmit queue*/
+ for (i = 0; i < NIC_LB_TEST_PKT_NUM_PER_CYCLE; i++) {
+ (void)skb_get(skb);
+
+ tx_ret_val = (netdev_tx_t)hns_nic_net_xmit_hw(
+ ndev, skb,
+ &tx_ring_data(priv, skb->queue_mapping));
+ if (tx_ret_val == NETDEV_TX_OK)
+ good_cnt++;
+ else
+ break;
+ }
+ if (good_cnt != NIC_LB_TEST_PKT_NUM_PER_CYCLE) {
+ ret_val = NIC_LB_TEST_TX_CNT_ERR;
+ dev_err(priv->dev, "%s sent fail, cnt=0x%x, budget=0x%x\n",
+ hns_nic_test_strs[loop_mode], good_cnt,
+ NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+ break;
+ }
+
+ /* allow 100 milliseconds for packets to go from Tx to Rx */
+ msleep(100);
+
+ good_cnt = __lb_clean_rings(priv,
+ h->q_num, h->q_num * 2 - 1,
+ NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+ if (good_cnt != NIC_LB_TEST_PKT_NUM_PER_CYCLE) {
+ ret_val = NIC_LB_TEST_RX_CNT_ERR;
+ dev_err(priv->dev, "%s recv fail, cnt=0x%x, budget=0x%x\n",
+ hns_nic_test_strs[loop_mode], good_cnt,
+ NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+ break;
+ }
+ (void)__lb_clean_rings(priv,
+ NIC_LB_TEST_RING_ID, NIC_LB_TEST_RING_ID,
+ NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+ }
+
+ /* free the original skb */
+ kfree_skb(skb);
+
+ return ret_val;
+}
+
+static int __lb_down(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int ret;
+
+ ret = __lb_setup(ndev, MAC_LOOP_NONE);
+ if (ret)
+ netdev_err(ndev, "%s: __lb_setup return error(%d)!\n",
+ __func__,
+ ret);
+
+ if (priv->phy)
+ phy_stop(priv->phy);
+
+ if (h->dev->ops->stop)
+ h->dev->ops->stop(h);
+
+ usleep_range(10000, 20000);
+ (void)__lb_clean_rings(priv, 0, h->q_num - 1, 256);
+
+ hns_nic_net_reset(ndev);
+
+ return 0;
+}
+
+/**
+ * hns_nic_self_test - self test
+ * @dev: net device
+ * @eth_test: test cmd
+ * @data: test result
+ */
+static void hns_nic_self_test(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ bool if_running = netif_running(ndev);
+#define SELF_TEST_TPYE_NUM 3
+ int st_param[SELF_TEST_TPYE_NUM][2];
+ int i;
+ int test_index = 0;
+
+ st_param[0][0] = MAC_INTERNALLOOP_MAC; /* XGE not supported lb */
+ st_param[0][1] = (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII);
+ st_param[1][0] = MAC_INTERNALLOOP_SERDES;
+ st_param[1][1] = 1; /*serdes must exist*/
+ st_param[2][0] = MAC_INTERNALLOOP_PHY; /* only supporte phy node*/
+ st_param[2][1] = ((!!(priv->ae_handle->phy_node)) &&
+ (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII));
+
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ set_bit(NIC_STATE_TESTING, &priv->state);
+
+ if (if_running)
+ (void)dev_close(ndev);
+
+ for (i = 0; i < SELF_TEST_TPYE_NUM; i++) {
+ if (!st_param[i][1])
+ continue; /* NEXT testing */
+
+ data[test_index] = __lb_up(ndev,
+ (enum hnae_loop)st_param[i][0]);
+ if (!data[test_index]) {
+ data[test_index] = __lb_run_test(
+ ndev, (enum hnae_loop)st_param[i][0]);
+ (void)__lb_down(ndev);
+ }
+
+ if (data[test_index])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ test_index++;
+ }
+
+ hns_nic_net_reset(priv->netdev);
+
+ clear_bit(NIC_STATE_TESTING, &priv->state);
+
+ if (if_running)
+ (void)dev_open(ndev);
+ }
+ /* Online tests aren't run; pass by default */
+
+ (void)msleep_interruptible(4 * 1000);
+}
+
+/**
+ * hns_nic_get_drvinfo - get net driver info
+ * @dev: net device
+ * @drvinfo: driver info
+ */
+static void hns_nic_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+
+ assert(priv);
+
+ strncpy(drvinfo->version, HNAE_DRIVER_VERSION,
+ sizeof(drvinfo->version));
+ drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
+
+ strncpy(drvinfo->driver, HNAE_DRIVER_NAME, sizeof(drvinfo->driver));
+ drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
+
+ strncpy(drvinfo->bus_info, priv->dev->bus->name,
+ sizeof(drvinfo->bus_info));
+ drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
+
+ strncpy(drvinfo->fw_version, "N/A", ETHTOOL_FWVERS_LEN);
+}
+
+/**
+ * hns_get_ringparam - get ring parameter
+ * @dev: net device
+ * @param: ethtool parameter
+ */
+void hns_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *param)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+ struct hnae_queue *queue;
+ u32 uplimit = 0;
+
+ queue = priv->ae_handle->qs[0];
+ ops = priv->ae_handle->dev->ops;
+
+ if (ops->get_ring_bdnum_limit)
+ ops->get_ring_bdnum_limit(queue, &uplimit);
+
+ param->rx_max_pending = uplimit;
+ param->tx_max_pending = uplimit;
+ param->rx_pending = queue->rx_ring.desc_num;
+ param->tx_pending = queue->tx_ring.desc_num;
+}
+
+/**
+ * hns_get_pauseparam - get pause parameter
+ * @dev: net device
+ * @param: pause parameter
+ */
+static void hns_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+
+ ops = priv->ae_handle->dev->ops;
+
+ if (ops->get_pauseparam)
+ ops->get_pauseparam(priv->ae_handle, &param->autoneg,
+ &param->rx_pause, &param->tx_pause);
+}
+
+/**
+ * hns_set_pauseparam - set pause parameter
+ * @dev: net device
+ * @param: pause parameter
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_handle *h;
+ struct hnae_ae_ops *ops;
+
+ assert(priv || priv->ae_handle);
+
+ h = priv->ae_handle;
+ ops = h->dev->ops;
+
+ if (!ops->set_pauseparam)
+ return -ESRCH;
+
+ return ops->set_pauseparam(priv->ae_handle, param->autoneg,
+ param->rx_pause, param->tx_pause);
+}
+
+/**
+ * hns_get_coalesce - get coalesce info.
+ * @dev: net device
+ * @ec: coalesce info.
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int hns_get_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *ec)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+
+ ops = priv->ae_handle->dev->ops;
+
+ ec->use_adaptive_rx_coalesce = 1;
+ ec->use_adaptive_tx_coalesce = 1;
+
+ if ((!ops->get_coalesce_usecs) ||
+ (!ops->get_rx_max_coalesced_frames))
+ return -ESRCH;
+
+ ops->get_coalesce_usecs(priv->ae_handle,
+ &ec->tx_coalesce_usecs,
+ &ec->rx_coalesce_usecs);
+
+ ops->get_rx_max_coalesced_frames(
+ priv->ae_handle,
+ &ec->tx_max_coalesced_frames,
+ &ec->rx_max_coalesced_frames);
+
+ return 0;
+}
+
+/**
+ * hns_set_coalesce - set coalesce info.
+ * @dev: net device
+ * @ec: coalesce info.
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int hns_set_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *ec)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+ int ret;
+
+ assert(priv || priv->ae_handle);
+
+ ops = priv->ae_handle->dev->ops;
+
+ if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
+ return -EINVAL;
+
+ if (ec->rx_max_coalesced_frames != ec->tx_max_coalesced_frames)
+ return -EINVAL;
+
+ if ((!ops->set_coalesce_usecs) ||
+ (!ops->set_coalesce_frames))
+ return -ESRCH;
+
+ ops->set_coalesce_usecs(priv->ae_handle,
+ ec->rx_coalesce_usecs);
+
+ ret = ops->set_coalesce_frames(
+ priv->ae_handle,
+ ec->rx_max_coalesced_frames);
+
+ return ret;
+}
+
+/**
+ * hns_get_channels - get channel info.
+ * @dev: net device
+ * @ch: channel info.
+ */
+void hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+
+ ch->max_rx = priv->ae_handle->q_num;
+ ch->max_tx = priv->ae_handle->q_num;
+
+ ch->rx_count = priv->ae_handle->q_num;
+ ch->tx_count = priv->ae_handle->q_num;
+}
+
+/**
+ * get_ethtool_stats - get detail statistics.
+ * @dev: net device
+ * @stats: statistics info.
+ * @data: statistics data.
+ */
+void hns_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ u64 *p = data;
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+ const struct rtnl_link_stats64 *net_stats;
+ struct rtnl_link_stats64 temp;
+
+ if (!h->dev->ops->get_stats || !h->dev->ops->update_stats) {
+ netdev_err(netdev, "get_stats or update_stats is null!\n");
+ return;
+ }
+
+ h->dev->ops->update_stats(h, &netdev->stats);
+
+ net_stats = dev_get_stats(netdev, &temp);
+
+ /* get netdev statistics */
+ p[0] = net_stats->rx_packets;
+ p[1] = net_stats->tx_packets;
+ p[2] = net_stats->rx_bytes;
+ p[3] = net_stats->tx_bytes;
+ p[4] = net_stats->rx_errors;
+ p[5] = net_stats->tx_errors;
+ p[6] = net_stats->rx_dropped;
+ p[7] = net_stats->tx_dropped;
+ p[8] = net_stats->multicast;
+ p[9] = net_stats->collisions;
+ p[10] = net_stats->rx_over_errors;
+ p[11] = net_stats->rx_crc_errors;
+ p[12] = net_stats->rx_frame_errors;
+ p[13] = net_stats->rx_fifo_errors;
+ p[14] = net_stats->rx_missed_errors;
+ p[15] = net_stats->tx_aborted_errors;
+ p[16] = net_stats->tx_carrier_errors;
+ p[17] = net_stats->tx_fifo_errors;
+ p[18] = net_stats->tx_heartbeat_errors;
+ p[19] = net_stats->rx_length_errors;
+ p[20] = net_stats->tx_window_errors;
+ p[21] = net_stats->rx_compressed;
+ p[22] = net_stats->tx_compressed;
+
+ p[23] = netdev->rx_dropped.counter;
+ p[24] = netdev->tx_dropped.counter;
+
+ p[25] = priv->tx_timeout_count;
+
+ /* get driver statistics */
+ h->dev->ops->get_stats(h, &p[26]);
+}
+
+/**
+ * get_strings: Return a set of strings that describe the requested objects
+ * @dev: net device
+ * @stats: string set ID.
+ * @data: objects data.
+ */
+void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+ char *buff = (char *)data;
+
+ if (!h->dev->ops->get_strings) {
+ netdev_err(netdev, "h->dev->ops->get_strings is null!\n");
+ return;
+ }
+
+ if (stringset == ETH_SS_TEST) {
+ if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII) {
+ memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_MAC],
+ ETH_GSTRING_LEN);
+ buff += ETH_GSTRING_LEN;
+ }
+ memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES],
+ ETH_GSTRING_LEN);
+ buff += ETH_GSTRING_LEN;
+ if ((priv->phy) && (!priv->phy->is_c45))
+ memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_PHY],
+ ETH_GSTRING_LEN);
+
+ } else {
+ snprintf(buff, ETH_GSTRING_LEN, "rx_packets");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_packets");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_bytes");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_bytes");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_dropped");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_dropped");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "multicast");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "collisions");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_over_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_crc_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_frame_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_fifo_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_missed_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_aborted_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_carrier_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_fifo_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_heartbeat_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_length_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_window_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_compressed");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_compressed");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "netdev_rx_dropped");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "netdev_tx_dropped");
+ buff = buff + ETH_GSTRING_LEN;
+
+ snprintf(buff, ETH_GSTRING_LEN, "netdev_tx_timeout");
+ buff = buff + ETH_GSTRING_LEN;
+
+ h->dev->ops->get_strings(h, stringset, (u8 *)buff);
+ }
+}
+
+/**
+ * nic_get_sset_count - get string set count witch returned by nic_get_strings.
+ * @dev: net device
+ * @stringset: string set index, 0: self test string; 1: statistics string.
+ *
+ * Return string set count.
+ */
+int hns_get_sset_count(struct net_device *netdev, int stringset)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+
+ if (!ops->get_sset_count) {
+ netdev_err(netdev, "get_sset_count is null!\n");
+ return -EOPNOTSUPP;
+ }
+ if (stringset == ETH_SS_TEST) {
+ u32 cnt = (sizeof(hns_nic_test_strs) / ETH_GSTRING_LEN);
+
+ if (priv->ae_handle->phy_if == PHY_INTERFACE_MODE_XGMII)
+ cnt--;
+
+ if ((!priv->phy) || (priv->phy->is_c45))
+ cnt--;
+
+ return cnt;
+ } else {
+ return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset));
+ }
+}
+
+/**
+ * hns_phy_led_set - set phy LED status.
+ * @dev: net device
+ * @value: LED state.
+ *
+ * Return 0 on success, negative on failure.
+ */
+int hns_phy_led_set(struct net_device *netdev, int value)
+{
+ int retval;
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct phy_device *phy_dev = priv->phy;
+
+ if (!phy_dev->bus) {
+ netdev_err(netdev, "phy_dev->bus is null!\n");
+ return -EINVAL;
+ }
+ retval = mdiobus_write(phy_dev->bus, phy_dev->addr,
+ HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED);
+ retval = mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_LED_FC_REG,
+ value);
+ retval = mdiobus_write(phy_dev->bus, phy_dev->addr,
+ HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
+ if (retval) {
+ netdev_err(netdev, "mdiobus_write fail !\n");
+ return retval;
+ }
+ return 0;
+}
+
+/**
+ * nic_set_phys_id - set phy identify LED.
+ * @dev: net device
+ * @state: LED state.
+ *
+ * Return 0 on success, negative on failure.
+ */
+int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct phy_device *phy_dev = priv->phy;
+ int ret;
+
+ if (phy_dev)
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+ HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_LED);
+ if (ret)
+ return ret;
+
+ priv->phy_led_val = (u16)mdiobus_read(phy_dev->bus,
+ phy_dev->addr,
+ HNS_LED_FC_REG);
+
+ ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+ HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_COPPER);
+ if (ret)
+ return ret;
+ return 2;
+ case ETHTOOL_ID_ON:
+ ret = hns_phy_led_set(netdev, HNS_LED_FORCE_ON);
+ if (ret)
+ return ret;
+ break;
+ case ETHTOOL_ID_OFF:
+ ret = hns_phy_led_set(netdev, HNS_LED_FORCE_OFF);
+ if (ret)
+ return ret;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+ HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_LED);
+ if (ret)
+ return ret;
+
+ ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+ HNS_LED_FC_REG, priv->phy_led_val);
+ if (ret)
+ return ret;
+
+ ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+ HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_COPPER);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return h->dev->ops->set_led_id(h, HNAE_LED_ACTIVE);
+ case ETHTOOL_ID_ON:
+ return h->dev->ops->set_led_id(h, HNAE_LED_ON);
+ case ETHTOOL_ID_OFF:
+ return h->dev->ops->set_led_id(h, HNAE_LED_OFF);
+ case ETHTOOL_ID_INACTIVE:
+ return h->dev->ops->set_led_id(h, HNAE_LED_INACTIVE);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * hns_get_regs - get net device register
+ * @dev: net device
+ * @cmd: ethtool cmd
+ * @date: register data
+ */
+void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
+ void *data)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+
+ assert(priv || priv->ae_handle);
+
+ ops = priv->ae_handle->dev->ops;
+
+ cmd->version = HNS_CHIP_VERSION;
+ if (!ops->get_regs) {
+ netdev_err(net_dev, "ops->get_regs is null!\n");
+ return;
+ }
+ ops->get_regs(priv->ae_handle, data);
+}
+
+/**
+ * nic_get_regs_len - get total register len.
+ * @dev: net device
+ *
+ * Return total register len.
+ */
+static int hns_get_regs_len(struct net_device *net_dev)
+{
+ u32 reg_num;
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+
+ assert(priv || priv->ae_handle);
+
+ ops = priv->ae_handle->dev->ops;
+ if (!ops->get_regs_len) {
+ netdev_err(net_dev, "ops->get_regs_len is null!\n");
+ return -EOPNOTSUPP;
+ }
+
+ reg_num = ops->get_regs_len(priv->ae_handle);
+ if (reg_num > 0)
+ return reg_num * sizeof(u32);
+ else
+ return reg_num; /* error code */
+}
+
+/**
+ * hns_nic_nway_reset - nway reset
+ * @dev: net device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_nic_nway_reset(struct net_device *netdev)
+{
+ int ret = 0;
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct phy_device *phy = priv->phy;
+
+ if (netif_running(netdev)) {
+ if (phy)
+ ret = genphy_restart_aneg(phy);
+ }
+
+ return ret;
+}
+
+static struct ethtool_ops hns_ethtool_ops = {
+ .get_drvinfo = hns_nic_get_drvinfo,
+ .get_link = hns_nic_get_link,
+ .get_settings = hns_nic_get_settings,
+ .set_settings = hns_nic_set_settings,
+ .get_ringparam = hns_get_ringparam,
+ .get_pauseparam = hns_get_pauseparam,
+ .set_pauseparam = hns_set_pauseparam,
+ .get_coalesce = hns_get_coalesce,
+ .set_coalesce = hns_set_coalesce,
+ .get_channels = hns_get_channels,
+ .self_test = hns_nic_self_test,
+ .get_strings = hns_get_strings,
+ .get_sset_count = hns_get_sset_count,
+ .get_ethtool_stats = hns_get_ethtool_stats,
+ .set_phys_id = hns_set_phys_id,
+ .get_regs_len = hns_get_regs_len,
+ .get_regs = hns_get_regs,
+ .nway_reset = hns_nic_nway_reset,
+};
+
+void hns_ethtool_set_ops(struct net_device *ndev)
+{
+ ndev->ethtool_ops = &hns_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
new file mode 100644
index 000000000000..37491c85bc42
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -0,0 +1,521 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spinlock_types.h>
+
+#define MDIO_DRV_NAME "Hi-HNS_MDIO"
+#define MDIO_BUS_NAME "Hisilicon MII Bus"
+#define MDIO_DRV_VERSION "1.3.0"
+#define MDIO_COPYRIGHT "Copyright(c) 2015 Huawei Corporation."
+#define MDIO_DRV_STRING MDIO_BUS_NAME
+#define MDIO_DEFAULT_DEVICE_DESCR MDIO_BUS_NAME
+
+#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
+
+#define MDIO_TIMEOUT 1000000
+
+struct hns_mdio_device {
+ void *vbase; /* mdio reg base address */
+ struct regmap *subctrl_vbase;
+};
+
+/* mdio reg */
+#define MDIO_COMMAND_REG 0x0
+#define MDIO_ADDR_REG 0x4
+#define MDIO_WDATA_REG 0x8
+#define MDIO_RDATA_REG 0xc
+#define MDIO_STA_REG 0x10
+
+/* cfg phy bit map */
+#define MDIO_CMD_DEVAD_M 0x1f
+#define MDIO_CMD_DEVAD_S 0
+#define MDIO_CMD_PRTAD_M 0x1f
+#define MDIO_CMD_PRTAD_S 5
+#define MDIO_CMD_OP_M 0x3
+#define MDIO_CMD_OP_S 10
+#define MDIO_CMD_ST_M 0x3
+#define MDIO_CMD_ST_S 12
+#define MDIO_CMD_START_B 14
+
+#define MDIO_ADDR_DATA_M 0xffff
+#define MDIO_ADDR_DATA_S 0
+
+#define MDIO_WDATA_DATA_M 0xffff
+#define MDIO_WDATA_DATA_S 0
+
+#define MDIO_RDATA_DATA_M 0xffff
+#define MDIO_RDATA_DATA_S 0
+
+#define MDIO_STATE_STA_B 0
+
+enum mdio_st_clause {
+ MDIO_ST_CLAUSE_45 = 0,
+ MDIO_ST_CLAUSE_22
+};
+
+enum mdio_c22_op_seq {
+ MDIO_C22_WRITE = 1,
+ MDIO_C22_READ = 2
+};
+
+enum mdio_c45_op_seq {
+ MDIO_C45_WRITE_ADDR = 0,
+ MDIO_C45_WRITE_DATA,
+ MDIO_C45_READ_INCREMENT,
+ MDIO_C45_READ
+};
+
+/* peri subctrl reg */
+#define MDIO_SC_CLK_EN 0x338
+#define MDIO_SC_CLK_DIS 0x33C
+#define MDIO_SC_RESET_REQ 0xA38
+#define MDIO_SC_RESET_DREQ 0xA3C
+#define MDIO_SC_CTRL 0x2010
+#define MDIO_SC_CLK_ST 0x531C
+#define MDIO_SC_RESET_ST 0x5A1C
+
+static void mdio_write_reg(void *base, u32 reg, u32 value)
+{
+ u8 __iomem *reg_addr = (u8 __iomem *)base;
+
+ writel_relaxed(value, reg_addr + reg);
+}
+
+#define MDIO_WRITE_REG(a, reg, value) \
+ mdio_write_reg((a)->vbase, (reg), (value))
+
+static u32 mdio_read_reg(void *base, u32 reg)
+{
+ u8 __iomem *reg_addr = (u8 __iomem *)base;
+
+ return readl_relaxed(reg_addr + reg);
+}
+
+#define mdio_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= (~((mask) << (shift))); \
+ (origin) |= (((val) & (mask)) << (shift)); \
+ } while (0)
+
+#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
+
+static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
+ u32 val)
+{
+ u32 origin = mdio_read_reg(base, reg);
+
+ mdio_set_field(origin, mask, shift, val);
+ mdio_write_reg(base, reg, origin);
+}
+
+#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
+ mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
+
+static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+{
+ u32 origin;
+
+ origin = mdio_read_reg(base, reg);
+ return mdio_get_field(origin, mask, shift);
+}
+
+#define MDIO_GET_REG_FIELD(dev, reg, mask, shift) \
+ mdio_get_reg_field((dev)->vbase, (reg), (mask), (shift))
+
+#define MDIO_GET_REG_BIT(dev, reg, bit) \
+ mdio_get_reg_field((dev)->vbase, (reg), 0x1ull, (bit))
+
+#define MDIO_CHECK_SET_ST 1
+#define MDIO_CHECK_CLR_ST 0
+
+static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
+ u32 cfg_reg, u32 set_val,
+ u32 st_reg, u32 st_msk, u8 check_st)
+{
+ u32 time_cnt;
+ u32 reg_value;
+
+ regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
+
+ for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
+ regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+ reg_value &= st_msk;
+ if ((!!check_st) == (!!reg_value))
+ break;
+ }
+
+ if ((!!check_st) != (!!reg_value))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int hns_mdio_wait_ready(struct mii_bus *bus)
+{
+ struct hns_mdio_device *mdio_dev = bus->priv;
+ int i;
+ u32 cmd_reg_value = 1;
+
+ /* waitting for MDIO_COMMAND_REG 's mdio_start==0 */
+ /* after that can do read or write*/
+ for (i = 0; cmd_reg_value; i++) {
+ cmd_reg_value = MDIO_GET_REG_BIT(mdio_dev,
+ MDIO_COMMAND_REG,
+ MDIO_CMD_START_B);
+ if (i == MDIO_TIMEOUT)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev,
+ u8 is_c45, u8 op, u8 phy_id, u16 cmd)
+{
+ u32 cmd_reg_value;
+ u8 st = is_c45 ? MDIO_ST_CLAUSE_45 : MDIO_ST_CLAUSE_22;
+
+ cmd_reg_value = st << MDIO_CMD_ST_S;
+ cmd_reg_value |= op << MDIO_CMD_OP_S;
+ cmd_reg_value |=
+ (phy_id & MDIO_CMD_PRTAD_M) << MDIO_CMD_PRTAD_S;
+ cmd_reg_value |= (cmd & MDIO_CMD_DEVAD_M) << MDIO_CMD_DEVAD_S;
+ cmd_reg_value |= 1 << MDIO_CMD_START_B;
+
+ MDIO_WRITE_REG(mdio_dev, MDIO_COMMAND_REG, cmd_reg_value);
+}
+
+/**
+ * hns_mdio_write - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @regnum: register num
+ * @value: register value
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_write(struct mii_bus *bus,
+ int phy_id, int regnum, u16 data)
+{
+ int ret;
+ struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ u8 devad = ((regnum >> 16) & 0x1f);
+ u8 is_c45 = !!(regnum & MII_ADDR_C45);
+ u16 reg = (u16)(regnum & 0xffff);
+ u8 op;
+ u16 cmd_reg_cfg;
+
+ dev_dbg(&bus->dev, "mdio write %s,base is %p\n",
+ bus->id, mdio_dev->vbase);
+ dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x, write data=%d\n",
+ phy_id, is_c45, devad, reg, data);
+
+ /* wait for ready */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ if (!is_c45) {
+ cmd_reg_cfg = reg;
+ op = MDIO_C22_WRITE;
+ } else {
+ /* config the cmd-reg to write addr*/
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+ MDIO_ADDR_DATA_S, reg);
+
+ hns_mdio_cmd_write(mdio_dev, is_c45,
+ MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+ /* check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ /* config the data needed writing */
+ cmd_reg_cfg = devad;
+ op = MDIO_C45_WRITE_ADDR;
+ }
+
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_WDATA_REG, MDIO_WDATA_DATA_M,
+ MDIO_WDATA_DATA_S, data);
+
+ hns_mdio_cmd_write(mdio_dev, is_c45, op, phy_id, cmd_reg_cfg);
+
+ return 0;
+}
+
+/**
+ * hns_mdio_read - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @regnum: register num
+ * @value: register value
+ *
+ * Return phy register value
+ */
+static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ int ret;
+ u16 reg_val = 0;
+ u8 devad = ((regnum >> 16) & 0x1f);
+ u8 is_c45 = !!(regnum & MII_ADDR_C45);
+ u16 reg = (u16)(regnum & 0xffff);
+ struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+
+ dev_dbg(&bus->dev, "mdio read %s,base is %p\n",
+ bus->id, mdio_dev->vbase);
+ dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x!\n",
+ phy_id, is_c45, devad, reg);
+
+ /* Step 1: wait for ready */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ if (!is_c45) {
+ hns_mdio_cmd_write(mdio_dev, is_c45,
+ MDIO_C22_READ, phy_id, reg);
+ } else {
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+ MDIO_ADDR_DATA_S, reg);
+
+ /* Step 2; config the cmd-reg to write addr*/
+ hns_mdio_cmd_write(mdio_dev, is_c45,
+ MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+ /* Step 3: check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ hns_mdio_cmd_write(mdio_dev, is_c45,
+ MDIO_C45_WRITE_ADDR, phy_id, devad);
+ }
+
+ /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
+ /* check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ reg_val = MDIO_GET_REG_BIT(mdio_dev, MDIO_STA_REG, MDIO_STATE_STA_B);
+ if (reg_val) {
+ dev_err(&bus->dev, " ERROR! MDIO Read failed!\n");
+ return -EBUSY;
+ }
+
+ /* Step 6; get out data*/
+ reg_val = (u16)MDIO_GET_REG_FIELD(mdio_dev, MDIO_RDATA_REG,
+ MDIO_RDATA_DATA_M, MDIO_RDATA_DATA_S);
+
+ return reg_val;
+}
+
+/**
+ * hns_mdio_reset - reset mdio bus
+ * @bus: mdio bus
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_reset(struct mii_bus *bus)
+{
+ struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ int ret;
+
+ if (!mdio_dev->subctrl_vbase) {
+ dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
+ return -ENODEV;
+ }
+
+ /*1. reset req, and read reset st check*/
+ ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_REQ, 0x1,
+ MDIO_SC_RESET_ST, 0x1,
+ MDIO_CHECK_SET_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO reset fail\n");
+ return ret;
+ }
+
+ /*2. dis clk, and read clk st check*/
+ ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_DIS,
+ 0x1, MDIO_SC_CLK_ST, 0x1,
+ MDIO_CHECK_CLR_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO dis clk fail\n");
+ return ret;
+ }
+
+ /*3. reset dreq, and read reset st check*/
+ ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_DREQ, 0x1,
+ MDIO_SC_RESET_ST, 0x1,
+ MDIO_CHECK_CLR_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO dis clk fail\n");
+ return ret;
+ }
+
+ /*4. en clk, and read clk st check*/
+ ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_EN,
+ 0x1, MDIO_SC_CLK_ST, 0x1,
+ MDIO_CHECK_SET_ST);
+ if (ret)
+ dev_err(&bus->dev, "MDIO en clk fail\n");
+
+ return ret;
+}
+
+/**
+ * hns_mdio_bus_name - get mdio bus name
+ * @name: mdio bus name
+ * @np: mdio device node pointer
+ */
+static void hns_mdio_bus_name(char *name, struct device_node *np)
+{
+ const u32 *addr;
+ u64 taddr = OF_BAD_ADDR;
+
+ addr = of_get_address(np, 0, NULL, NULL);
+ if (addr)
+ taddr = of_translate_address(np, addr);
+
+ snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name,
+ (unsigned long long)taddr);
+}
+
+/**
+ * hns_mdio_probe - probe mdio device
+ * @pdev: mdio platform device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct hns_mdio_device *mdio_dev;
+ struct mii_bus *new_bus;
+ struct resource *res;
+ int ret;
+
+ if (!pdev) {
+ dev_err(NULL, "pdev is NULL!\r\n");
+ return -ENODEV;
+ }
+ np = pdev->dev.of_node;
+ mdio_dev = devm_kzalloc(&pdev->dev, sizeof(*mdio_dev), GFP_KERNEL);
+ if (!mdio_dev)
+ return -ENOMEM;
+
+ new_bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!new_bus) {
+ dev_err(&pdev->dev, "mdiobus_alloc fail!\n");
+ return -ENOMEM;
+ }
+
+ new_bus->name = MDIO_BUS_NAME;
+ new_bus->read = hns_mdio_read;
+ new_bus->write = hns_mdio_write;
+ new_bus->reset = hns_mdio_reset;
+ new_bus->priv = mdio_dev;
+ hns_mdio_bus_name(new_bus->id, np);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mdio_dev->vbase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mdio_dev->vbase)) {
+ ret = PTR_ERR(mdio_dev->vbase);
+ return ret;
+ }
+
+ mdio_dev->subctrl_vbase =
+ syscon_node_to_regmap(of_parse_phandle(np, "subctrl_vbase", 0));
+ if (IS_ERR(mdio_dev->subctrl_vbase)) {
+ dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n");
+ mdio_dev->subctrl_vbase = NULL;
+ }
+ new_bus->irq = devm_kcalloc(&pdev->dev, PHY_MAX_ADDR,
+ sizeof(int), GFP_KERNEL);
+ if (!new_bus->irq)
+ return -ENOMEM;
+
+ new_bus->parent = &pdev->dev;
+ platform_set_drvdata(pdev, new_bus);
+
+ ret = of_mdiobus_register(new_bus, np);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register as MDIO bus!\n");
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * hns_mdio_remove - remove mdio device
+ * @pdev: mdio platform device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus;
+
+ bus = platform_get_drvdata(pdev);
+
+ mdiobus_unregister(bus);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static const struct of_device_id hns_mdio_match[] = {
+ {.compatible = "hisilicon,mdio"},
+ {.compatible = "hisilicon,hns-mdio"},
+ {}
+};
+
+static struct platform_driver hns_mdio_driver = {
+ .probe = hns_mdio_probe,
+ .remove = hns_mdio_remove,
+ .driver = {
+ .name = MDIO_DRV_NAME,
+ .of_match_table = hns_mdio_match,
+ },
+};
+
+module_platform_driver(hns_mdio_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("Hisilicon HNS MDIO driver");
+MODULE_ALIAS("platform:" MDIO_DRV_NAME);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index b60a34d982a9..5d7db6c01c46 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2204,7 +2204,6 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s",
dev->cell_index, dev->ofdev->dev.of_node->full_name);
- info->regdump_len = emac_ethtool_get_regs_len(ndev);
}
static const struct ethtool_ops emac_ethtool_ops = {
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 28df37420da9..93ae11494810 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -181,7 +181,7 @@ struct emac_instance {
struct mal_commac commac;
/* PHY infos */
- u32 phy_mode;
+ int phy_mode;
u32 phy_map;
u32 phy_address;
u32 phy_feat_exc;
@@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr {
u32 index;
};
-#define EMAC_ETHTOOL_REGS_VER 0
-#define EMAC4_ETHTOOL_REGS_VER 1
-#define EMAC4SYNC_ETHTOOL_REGS_VER 2
+#define EMAC_ETHTOOL_REGS_VER 3
+#define EMAC4_ETHTOOL_REGS_VER 4
+#define EMAC4SYNC_ETHTOOL_REGS_VER 5
#endif /* __IBM_NEWEMAC_CORE_H */
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 29bbb628d712..7af870a3c549 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -79,6 +79,11 @@ static unsigned int rx_flush __read_mostly = 0;
module_param(rx_flush, uint, 0644);
MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
+static bool old_large_send __read_mostly;
+module_param(old_large_send, bool, S_IRUGO);
+MODULE_PARM_DESC(old_large_send,
+ "Use old large send method on firmware that supports the new method");
+
struct ibmveth_stat {
char name[ETH_GSTRING_LEN];
int offset;
@@ -101,7 +106,8 @@ struct ibmveth_stat ibmveth_stats[] = {
{ "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
{ "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
{ "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
- { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }
+ { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
+ { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
};
/* simple methods of getting data from the current rxq entry */
@@ -848,25 +854,91 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
return rc1 ? rc1 : rc2;
}
+static int ibmveth_set_tso(struct net_device *dev, u32 data)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+ unsigned long set_attr, clr_attr, ret_attr;
+ long ret1, ret2;
+ int rc1 = 0, rc2 = 0;
+ int restart = 0;
+
+ if (netif_running(dev)) {
+ restart = 1;
+ adapter->pool_config = 1;
+ ibmveth_close(dev);
+ adapter->pool_config = 0;
+ }
+
+ set_attr = 0;
+ clr_attr = 0;
+
+ if (data)
+ set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
+ else
+ clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
+
+ ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
+
+ if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
+ !old_large_send) {
+ ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
+ set_attr, &ret_attr);
+
+ if (ret2 != H_SUCCESS) {
+ netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
+ data, ret2);
+
+ h_illan_attributes(adapter->vdev->unit_address,
+ set_attr, clr_attr, &ret_attr);
+
+ if (data == 1)
+ dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ rc1 = -EIO;
+
+ } else {
+ adapter->fw_large_send_support = data;
+ adapter->large_send = data;
+ }
+ } else {
+ /* Older firmware version of large send offload does not
+ * support tcp6/ipv6
+ */
+ if (data == 1) {
+ dev->features &= ~NETIF_F_TSO6;
+ netdev_info(dev, "TSO feature requires all partitions to have updated driver");
+ }
+ adapter->large_send = data;
+ }
+
+ if (restart)
+ rc2 = ibmveth_open(dev);
+
+ return rc1 ? rc1 : rc2;
+}
+
static int ibmveth_set_features(struct net_device *dev,
netdev_features_t features)
{
struct ibmveth_adapter *adapter = netdev_priv(dev);
int rx_csum = !!(features & NETIF_F_RXCSUM);
- int rc;
- netdev_features_t changed = features ^ dev->features;
-
- if (features & NETIF_F_TSO & changed)
- netdev_info(dev, "TSO feature requires all partitions to have updated driver");
+ int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
+ int rc1 = 0, rc2 = 0;
- if (rx_csum == adapter->rx_csum)
- return 0;
+ if (rx_csum != adapter->rx_csum) {
+ rc1 = ibmveth_set_csum_offload(dev, rx_csum);
+ if (rc1 && !adapter->rx_csum)
+ dev->features =
+ features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+ }
- rc = ibmveth_set_csum_offload(dev, rx_csum);
- if (rc && !adapter->rx_csum)
- dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+ if (large_send != adapter->large_send) {
+ rc2 = ibmveth_set_tso(dev, large_send);
+ if (rc2 && !adapter->large_send)
+ dev->features =
+ features & ~(NETIF_F_TSO | NETIF_F_TSO6);
+ }
- return rc;
+ return rc1 ? rc1 : rc2;
}
static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -917,7 +989,7 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
static int ibmveth_send(struct ibmveth_adapter *adapter,
- union ibmveth_buf_desc *descs)
+ union ibmveth_buf_desc *descs, unsigned long mss)
{
unsigned long correlator;
unsigned int retry_count;
@@ -934,7 +1006,8 @@ static int ibmveth_send(struct ibmveth_adapter *adapter,
descs[0].desc, descs[1].desc,
descs[2].desc, descs[3].desc,
descs[4].desc, descs[5].desc,
- correlator, &correlator);
+ correlator, &correlator, mss,
+ adapter->fw_large_send_support);
} while ((ret == H_BUSY) && (retry_count--));
if (ret != H_SUCCESS && ret != H_DROPPED) {
@@ -955,6 +1028,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
int last, i;
int force_bounce = 0;
dma_addr_t dma_addr;
+ unsigned long mss = 0;
/*
* veth handles a maximum of 6 segments including the header, so
@@ -980,6 +1054,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
desc_flags = IBMVETH_BUF_VALID;
+ if (skb_is_gso(skb) && adapter->fw_large_send_support)
+ desc_flags |= IBMVETH_BUF_LRG_SND;
+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
unsigned char *buf = skb_transport_header(skb) +
skb->csum_offset;
@@ -1007,7 +1084,7 @@ retry_bounce:
descs[0].fields.flags_len = desc_flags | skb->len;
descs[0].fields.address = adapter->bounce_buffer_dma;
- if (ibmveth_send(adapter, descs)) {
+ if (ibmveth_send(adapter, descs, 0)) {
adapter->tx_send_failed++;
netdev->stats.tx_dropped++;
} else {
@@ -1041,16 +1118,23 @@ retry_bounce:
descs[i+1].fields.address = dma_addr;
}
- if (skb_is_gso(skb) && !skb_is_gso_v6(skb)) {
- /* Put -1 in the IP checksum to tell phyp it
- * is a largesend packet and put the mss in the TCP checksum.
- */
- ip_hdr(skb)->check = 0xffff;
- tcp_hdr(skb)->check = cpu_to_be16(skb_shinfo(skb)->gso_size);
- adapter->tx_large_packets++;
+ if (skb_is_gso(skb)) {
+ if (adapter->fw_large_send_support) {
+ mss = (unsigned long)skb_shinfo(skb)->gso_size;
+ adapter->tx_large_packets++;
+ } else if (!skb_is_gso_v6(skb)) {
+ /* Put -1 in the IP checksum to tell phyp it
+ * is a largesend packet. Put the mss in
+ * the TCP checksum.
+ */
+ ip_hdr(skb)->check = 0xffff;
+ tcp_hdr(skb)->check =
+ cpu_to_be16(skb_shinfo(skb)->gso_size);
+ adapter->tx_large_packets++;
+ }
}
- if (ibmveth_send(adapter, descs)) {
+ if (ibmveth_send(adapter, descs, mss)) {
adapter->tx_send_failed++;
netdev->stats.tx_dropped++;
} else {
@@ -1401,6 +1485,8 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
struct ibmveth_adapter *adapter;
unsigned char *mac_addr_p;
unsigned int *mcastFilterSize_p;
+ long ret;
+ unsigned long ret_attr;
dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
dev->unit_address);
@@ -1449,10 +1535,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
SET_NETDEV_DEV(netdev, &dev->dev);
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
netdev->features |= netdev->hw_features;
- /* TSO is disabled by default */
- netdev->hw_features |= NETIF_F_TSO;
+ ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
+
+ /* If running older firmware, TSO should not be enabled by default */
+ if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
+ !old_large_send) {
+ netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ netdev->features |= netdev->hw_features;
+ } else {
+ netdev->hw_features |= NETIF_F_TSO;
+ }
memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 41dedb1fb2ae..4eade67fe30c 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -40,6 +40,8 @@
#define IbmVethMcastRemoveFilter 0x2UL
#define IbmVethMcastClearFilterTable 0x3UL
+#define IBMVETH_ILLAN_LRG_SR_ENABLED 0x0000000000010000UL
+#define IBMVETH_ILLAN_LRG_SND_SUPPORT 0x0000000000008000UL
#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL
#define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00UL
#define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004UL
@@ -59,13 +61,20 @@
static inline long h_send_logical_lan(unsigned long unit_address,
unsigned long desc1, unsigned long desc2, unsigned long desc3,
unsigned long desc4, unsigned long desc5, unsigned long desc6,
- unsigned long corellator_in, unsigned long *corellator_out)
+ unsigned long corellator_in, unsigned long *corellator_out,
+ unsigned long mss, unsigned long large_send_support)
{
long rc;
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
- rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1,
- desc2, desc3, desc4, desc5, desc6, corellator_in);
+ if (large_send_support)
+ rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
+ desc1, desc2, desc3, desc4, desc5, desc6,
+ corellator_in, mss);
+ else
+ rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
+ desc1, desc2, desc3, desc4, desc5, desc6,
+ corellator_in);
*corellator_out = retbuf[0];
@@ -147,11 +156,13 @@ struct ibmveth_adapter {
struct ibmveth_rx_q rx_queue;
int pool_config;
int rx_csum;
+ int large_send;
void *bounce_buffer;
dma_addr_t bounce_buffer_dma;
u64 fw_ipv6_csum_support;
u64 fw_ipv4_csum_support;
+ u64 fw_large_send_support;
/* adapter specific stats */
u64 replenish_task_cycles;
u64 replenish_no_mem;
@@ -182,6 +193,7 @@ struct ibmveth_buf_desc_fields {
#endif
#define IBMVETH_BUF_VALID 0x80000000
#define IBMVETH_BUF_TOGGLE 0x40000000
+#define IBMVETH_BUF_LRG_SND 0x04000000
#define IBMVETH_BUF_NO_CSUM 0x02000000
#define IBMVETH_BUF_CSUM_GOOD 0x01000000
#define IBMVETH_BUF_LEN_MASK 0x00FFFFFF
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index d2657a412768..068789e694c9 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1770,8 +1770,11 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
dma_addr = pci_map_single(nic->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE);
/* If we can't map the skb, have the upper layer try later */
- if (pci_dma_mapping_error(nic->pdev, dma_addr))
+ if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ skb = NULL;
return -ENOMEM;
+ }
/*
* Use the last 4 bytes of the SKB payload packet as the CRC, used for
@@ -2967,6 +2970,11 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic->params.cbs.max * sizeof(struct cb),
sizeof(u32),
0);
+ if (!nic->cbs_pool) {
+ netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n");
+ err = -ENOMEM;
+ goto err_out_pool;
+ }
netif_info(nic, probe, nic->netdev,
"addr 0x%llx, irq %d, MAC addr %pM\n",
(unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
@@ -2974,6 +2982,8 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+err_out_pool:
+ unregister_netdev(netdev);
err_out_free:
e100_free(nic);
err_out_iounmap:
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 4270ad2d4ddf..83e557c7f279 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -559,8 +559,6 @@ static void e1000_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->regdump_len = e1000_get_regs_len(netdev);
- drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
}
static void e1000_get_ringparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 45c8c864104e..b1af0d613caa 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -3900,10 +3900,6 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
return E1000_SUCCESS;
}
- /* If eeprom is not yet detected, do so now */
- if (eeprom->word_size == 0)
- e1000_init_eeprom_params(hw);
-
/* A check for invalid values: offset too large, too many words, and
* not enough words.
*/
@@ -4074,10 +4070,6 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
return E1000_SUCCESS;
}
- /* If eeprom is not yet detected, do so now */
- if (eeprom->word_size == 0)
- e1000_init_eeprom_params(hw);
-
/* A check for invalid values: offset too large, too many words, and
* not enough words.
*/
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 74dc15055971..fd7be860c201 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3820,7 +3820,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
if (work_done < budget) {
if (likely(adapter->itr_setting & 3))
e1000_set_itr(adapter);
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
}
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index ad6daa656d3e..6cab1f30d41e 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -648,8 +648,6 @@ static void e1000_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->regdump_len = e1000_get_regs_len(netdev);
- drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
}
static void e1000_get_ringparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 26459853c6be..34c551e322eb 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -106,14 +106,14 @@
#define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000
/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */
-#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
+#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
#define K1_ENTRY_LATENCY 0
#define K1_MIN_TIME 1
#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */
#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */
#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
-
+#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 89d788d8f263..0a854a47d31a 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -48,7 +48,7 @@
#define DRV_EXTRAVERSION "-k"
-#define DRV_VERSION "3.2.5" DRV_EXTRAVERSION
+#define DRV_VERSION "3.2.6" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -1737,12 +1737,6 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
adapter->flags2 &= ~FLAG2_IS_DISCARDING;
-
- writel(0, rx_ring->head);
- if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- e1000e_update_rdt_wa(rx_ring, 0);
- else
- writel(0, rx_ring->tail);
}
static void e1000e_downshift_workaround(struct work_struct *work)
@@ -2447,12 +2441,6 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
-
- writel(0, tx_ring->head);
- if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- e1000e_update_tdt_wa(tx_ring, 0);
- else
- writel(0, tx_ring->tail);
}
/**
@@ -2705,7 +2693,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
if (work_done < weight) {
if (adapter->itr_setting & 3)
e1000_set_itr(adapter);
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (!test_bit(__E1000_DOWN, &adapter->state)) {
if (adapter->msix_entries)
ew32(IMS, adapter->rx_ring->ims_val);
@@ -2954,6 +2942,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
+ writel(0, tx_ring->head);
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_tdt_wa(tx_ring, 0);
+ else
+ writel(0, tx_ring->tail);
+
/* Set the Tx Interrupt Delay register */
ew32(TIDV, adapter->tx_int_delay);
/* Tx irq moderation */
@@ -3275,6 +3269,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
+ writel(0, rx_ring->head);
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(rx_ring, 0);
+ else
+ writel(0, rx_ring->tail);
+
/* Enable Receive Checksum Offload for TCP and UDP */
rxcsum = er32(RXCSUM);
if (adapter->netdev->features & NETIF_F_RXCSUM)
@@ -4280,18 +4280,29 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
cc);
struct e1000_hw *hw = &adapter->hw;
+ u32 systimel_1, systimel_2, systimeh;
cycle_t systim, systim_next;
- /* SYSTIMH latching upon SYSTIML read does not work well. To fix that
- * we don't want to allow overflow of SYSTIML and a change to SYSTIMH
- * to occur between reads, so if we read a vale close to overflow, we
- * wait for overflow to occur and read both registers when its safe.
+ /* SYSTIMH latching upon SYSTIML read does not work well.
+ * This means that if SYSTIML overflows after we read it but before
+ * we read SYSTIMH, the value of SYSTIMH has been incremented and we
+ * will experience a huge non linear increment in the systime value
+ * to fix that we test for overflow and if true, we re-read systime.
*/
- u32 systim_overflow_latch_fix = 0x3FFFFFFF;
-
- do {
- systim = (cycle_t)er32(SYSTIML);
- } while (systim > systim_overflow_latch_fix);
- systim |= (cycle_t)er32(SYSTIMH) << 32;
+ systimel_1 = er32(SYSTIML);
+ systimeh = er32(SYSTIMH);
+ systimel_2 = er32(SYSTIML);
+ /* Check for overflow. If there was no overflow, use the values */
+ if (systimel_1 < systimel_2) {
+ systim = (cycle_t)systimel_1;
+ systim |= (cycle_t)systimeh << 32;
+ } else {
+ /* There was an overflow, read again SYSTIMH, and use
+ * systimel_2
+ */
+ systimeh = er32(SYSTIMH);
+ systim = (cycle_t)systimel_2;
+ systim |= (cycle_t)systimeh << 32;
+ }
if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
u64 incvalue, time_delta, rem, temp;
@@ -4588,6 +4599,7 @@ static int e1000_open(struct net_device *netdev)
return 0;
err_req_irq:
+ pm_qos_remove_request(&adapter->pm_qos_req);
e1000e_release_hw_control(adapter);
e1000_power_down_phy(adapter);
e1000e_free_rx_resources(adapter->rx_ring);
@@ -6316,6 +6328,33 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
return retval;
}
+ /* Ensure that the appropriate bits are set in LPI_CTRL
+ * for EEE in Sx
+ */
+ if ((hw->phy.type >= e1000_phy_i217) &&
+ adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) {
+ u16 lpi_ctrl = 0;
+
+ retval = hw->phy.ops.acquire(hw);
+ if (!retval) {
+ retval = e1e_rphy_locked(hw, I82579_LPI_CTRL,
+ &lpi_ctrl);
+ if (!retval) {
+ if (adapter->eee_advert &
+ hw->dev_spec.ich8lan.eee_lp_ability &
+ I82579_EEE_100_SUPPORTED)
+ lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
+ if (adapter->eee_advert &
+ hw->dev_spec.ich8lan.eee_lp_ability &
+ I82579_EEE_1000_SUPPORTED)
+ lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
+
+ retval = e1e_wphy_locked(hw, I82579_LPI_CTRL,
+ lpi_ctrl);
+ }
+ }
+ hw->phy.ops.release(hw);
+ }
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
@@ -6465,7 +6504,7 @@ static int __e1000_resume(struct pci_dev *pdev)
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
aspm_disable_flag |= PCIE_LINK_STATE_L1;
if (aspm_disable_flag)
- e1000e_disable_aspm_locked(pdev, aspm_disable_flag);
+ e1000e_disable_aspm(pdev, aspm_disable_flag);
pci_set_master(pdev);
@@ -6743,7 +6782,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
aspm_disable_flag |= PCIE_LINK_STATE_L1;
if (aspm_disable_flag)
- e1000e_disable_aspm(pdev, aspm_disable_flag);
+ e1000e_disable_aspm_locked(pdev, aspm_disable_flag);
err = pci_enable_device_mem(pdev);
if (err) {
@@ -6913,6 +6952,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
#endif
.ndo_set_features = e1000_set_features,
.ndo_fix_features = e1000_fix_features,
+ .ndo_features_check = passthru_features_check,
};
/**
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index b24e5fee17f2..1d5e0b77062a 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -38,8 +38,8 @@
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
-#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
-#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */
+#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
+#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */
#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
@@ -125,7 +125,6 @@
(0x054E4 + ((_i - 16) * 8)))
#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
-#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index c8c8c5baefda..14440200499b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -101,12 +101,19 @@ struct fm10k_tx_queue_stats {
u64 csum_err;
u64 tx_busy;
u64 tx_done_old;
+ u64 csum_good;
};
struct fm10k_rx_queue_stats {
u64 alloc_failed;
u64 csum_err;
u64 errors;
+ u64 csum_good;
+ u64 switch_errors;
+ u64 drops;
+ u64 pp_errors;
+ u64 link_errors;
+ u64 length_errors;
};
struct fm10k_ring {
@@ -251,6 +258,7 @@ struct fm10k_intfc {
#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(1 << 2)
#define FM10K_FLAG_RX_TS_ENABLED (u32)(1 << 3)
#define FM10K_FLAG_SWPRI_CONFIG (u32)(1 << 4)
+#define FM10K_FLAG_DEBUG_STATS (u32)(1 << 5)
int xcast_mode;
/* Tx fast path data */
@@ -277,6 +285,17 @@ struct fm10k_intfc {
u64 rx_drops_nic;
u64 rx_overrun_pf;
u64 rx_overrun_vf;
+
+ /* Debug Statistics */
+ u64 hw_sm_mbx_full;
+ u64 hw_csum_tx_good;
+ u64 hw_csum_rx_good;
+ u64 rx_switch_errors;
+ u64 rx_drops;
+ u64 rx_pp_errors;
+ u64 rx_link_errors;
+ u64 rx_length_errors;
+
u32 tx_timeout_count;
/* RX */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index f45b4d71adb8..5304bc1fbecd 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -37,7 +37,8 @@ static void *fm10k_dbg_desc_seq_start(struct seq_file *s, loff_t *pos)
}
static void *fm10k_dbg_desc_seq_next(struct seq_file *s,
- void __always_unused *v, loff_t *pos)
+ void __always_unused *v,
+ loff_t *pos)
{
struct fm10k_ring *ring = s->private;
@@ -45,7 +46,7 @@ static void *fm10k_dbg_desc_seq_next(struct seq_file *s,
}
static void fm10k_dbg_desc_seq_stop(struct seq_file __always_unused *s,
- __always_unused void *v)
+ void __always_unused *v)
{
/* Do nothing. */
}
@@ -175,7 +176,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector)
return;
/* Generate a folder for each q_vector */
- sprintf(name, "q_vector.%03d", q_vector->v_idx);
+ snprintf(name, sizeof(name), "q_vector.%03d", q_vector->v_idx);
q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc);
if (!q_vector->dbg_q_vector)
@@ -185,7 +186,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector)
for (i = 0; i < q_vector->tx.count; i++) {
struct fm10k_ring *ring = &q_vector->tx.ring[i];
- sprintf(name, "tx_ring.%03d", ring->queue_index);
+ snprintf(name, sizeof(name), "tx_ring.%03d", ring->queue_index);
debugfs_create_file(name, 0600,
q_vector->dbg_q_vector, ring,
@@ -196,7 +197,7 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector)
for (i = 0; i < q_vector->rx.count; i++) {
struct fm10k_ring *ring = &q_vector->rx.ring[i];
- sprintf(name, "rx_ring.%03d", ring->queue_index);
+ snprintf(name, sizeof(name), "rx_ring.%03d", ring->queue_index);
debugfs_create_file(name, 0600,
q_vector->dbg_q_vector, ring,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index c6dc9683429e..2ce0eba5e040 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -76,19 +76,22 @@ static const struct fm10k_stats fm10k_gstrings_global_stats[] = {
FM10K_STAT("mac_rules_used", hw.swapi.mac.used),
FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail),
- FM10K_STAT("mbx_tx_busy", hw.mbx.tx_busy),
- FM10K_STAT("mbx_tx_oversized", hw.mbx.tx_dropped),
- FM10K_STAT("mbx_tx_messages", hw.mbx.tx_messages),
- FM10K_STAT("mbx_tx_dwords", hw.mbx.tx_dwords),
- FM10K_STAT("mbx_rx_messages", hw.mbx.rx_messages),
- FM10K_STAT("mbx_rx_dwords", hw.mbx.rx_dwords),
- FM10K_STAT("mbx_rx_parse_err", hw.mbx.rx_parse_err),
-
FM10K_STAT("tx_hang_count", tx_timeout_count),
FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
};
+static const struct fm10k_stats fm10k_gstrings_debug_stats[] = {
+ FM10K_STAT("hw_sm_mbx_full", hw_sm_mbx_full),
+ FM10K_STAT("hw_csum_tx_good", hw_csum_tx_good),
+ FM10K_STAT("hw_csum_rx_good", hw_csum_rx_good),
+ FM10K_STAT("rx_switch_errors", rx_switch_errors),
+ FM10K_STAT("rx_drops", rx_drops),
+ FM10K_STAT("rx_pp_errors", rx_pp_errors),
+ FM10K_STAT("rx_link_errors", rx_link_errors),
+ FM10K_STAT("rx_length_errors", rx_length_errors),
+};
+
static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
FM10K_STAT("timeout", stats.timeout.count),
FM10K_STAT("ur", stats.ur.count),
@@ -100,14 +103,33 @@ static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
FM10K_STAT("nodesc_drop", stats.nodesc_drop.count),
};
+#define FM10K_MBX_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(struct fm10k_mbx_info, _stat), \
+ .stat_offset = offsetof(struct fm10k_mbx_info, _stat) \
+}
+
+static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = {
+ FM10K_MBX_STAT("mbx_tx_busy", tx_busy),
+ FM10K_MBX_STAT("mbx_tx_oversized", tx_dropped),
+ FM10K_MBX_STAT("mbx_tx_messages", tx_messages),
+ FM10K_MBX_STAT("mbx_tx_dwords", tx_dwords),
+ FM10K_MBX_STAT("mbx_rx_messages", rx_messages),
+ FM10K_MBX_STAT("mbx_rx_dwords", rx_dwords),
+ FM10K_MBX_STAT("mbx_rx_parse_err", rx_parse_err),
+};
+
#define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats)
+#define FM10K_DEBUG_STATS_LEN ARRAY_SIZE(fm10k_gstrings_debug_stats)
#define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats)
+#define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats)
#define FM10K_QUEUE_STATS_LEN(_n) \
( (_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64)))
#define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \
- FM10K_NETDEV_STATS_LEN)
+ FM10K_NETDEV_STATS_LEN + \
+ FM10K_MBX_STATS_LEN)
static const char fm10k_gstrings_test[][ETH_GSTRING_LEN] = {
"Mailbox test (on/offline)"
@@ -120,47 +142,97 @@ enum fm10k_self_test_types {
FM10K_TEST_MAX = FM10K_TEST_LEN
};
-static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+enum {
+ FM10K_PRV_FLAG_DEBUG_STATS,
+ FM10K_PRV_FLAG_LEN,
+};
+
+static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
+ "debug-statistics",
+};
+
+static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
{
struct fm10k_intfc *interface = netdev_priv(dev);
+ struct fm10k_iov_data *iov_data = interface->iov_data;
char *p = (char *)data;
unsigned int i;
+ unsigned int j;
- switch (stringset) {
- case ETH_SS_TEST:
- memcpy(data, *fm10k_gstrings_test,
- FM10K_TEST_LEN * ETH_GSTRING_LEN);
- break;
- case ETH_SS_STATS:
- for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_net_stats[i].stat_string,
+ for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
+ memcpy(p, fm10k_gstrings_net_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, fm10k_gstrings_global_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
+ for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) {
+ memcpy(p, fm10k_gstrings_debug_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_global_stats[i].stat_string,
+ }
+
+ for (i = 0; i < FM10K_MBX_STATS_LEN; i++) {
+ memcpy(p, fm10k_gstrings_mbx_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ if (interface->hw.mac.type != fm10k_mac_vf) {
+ for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
+ memcpy(p, fm10k_gstrings_pf_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ }
- if (interface->hw.mac.type != fm10k_mac_vf) {
- for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_pf_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
+ for (i = 0; i < iov_data->num_vfs; i++) {
+ for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
+ snprintf(p,
+ ETH_GSTRING_LEN,
+ "vf_%u_%s", i,
+ fm10k_gstrings_mbx_stats[j].stat_string);
p += ETH_GSTRING_LEN;
}
}
+ }
- for (i = 0; i < interface->hw.mac.max_queues; i++) {
- sprintf(p, "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < interface->hw.mac.max_queues; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+static void fm10k_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ char *p = (char *)data;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *fm10k_gstrings_test,
+ FM10K_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ fm10k_get_stat_strings(dev, data);
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(p, fm10k_prv_flags,
+ FM10K_PRV_FLAG_LEN * ETH_GSTRING_LEN);
break;
}
}
@@ -168,6 +240,7 @@ static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data)
static int fm10k_get_sset_count(struct net_device *dev, int sset)
{
struct fm10k_intfc *interface = netdev_priv(dev);
+ struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
int stats_len = FM10K_STATIC_STATS_LEN;
@@ -180,7 +253,16 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset)
if (hw->mac.type != fm10k_mac_vf)
stats_len += FM10K_PF_STATS_LEN;
+ if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
+ stats_len += FM10K_DEBUG_STATS_LEN;
+
+ if (iov_data)
+ stats_len += FM10K_MBX_STATS_LEN * iov_data->num_vfs;
+ }
+
return stats_len;
+ case ETH_SS_PRIV_FLAGS:
+ return FM10K_PRV_FLAG_LEN;
default:
return -EOPNOTSUPP;
}
@@ -192,6 +274,7 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev,
{
const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64);
struct fm10k_intfc *interface = netdev_priv(netdev);
+ struct fm10k_iov_data *iov_data = interface->iov_data;
struct net_device_stats *net_stats = &netdev->stats;
char *p;
int i, j;
@@ -211,13 +294,47 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- if (interface->hw.mac.type != fm10k_mac_vf)
+ if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
+ for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) {
+ p = (char *)interface + fm10k_gstrings_debug_stats[i].stat_offset;
+ *(data++) = (fm10k_gstrings_debug_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ }
+
+ for (i = 0; i < FM10K_MBX_STATS_LEN; i++) {
+ p = (char *)&interface->hw.mbx + fm10k_gstrings_mbx_stats[i].stat_offset;
+ *(data++) = (fm10k_gstrings_mbx_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+
+ if (interface->hw.mac.type != fm10k_mac_vf) {
for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
p = (char *)interface +
fm10k_gstrings_pf_stats[i].stat_offset;
*(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+ }
+
+ if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
+ for (i = 0; i < iov_data->num_vfs; i++) {
+ struct fm10k_vf_info *vf_info;
+ vf_info = &iov_data->vf_info[i];
+
+ /* skip stats if we don't have a vf info */
+ if (!vf_info) {
+ data += FM10K_MBX_STATS_LEN;
+ continue;
+ }
+
+ for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
+ p = (char *)&vf_info->mbx + fm10k_gstrings_mbx_stats[j].stat_offset;
+ *(data++) = (fm10k_gstrings_mbx_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ }
+ }
for (i = 0; i < interface->hw.mac.max_queues; i++) {
struct fm10k_ring *ring;
@@ -398,10 +515,6 @@ static void fm10k_get_drvinfo(struct net_device *dev,
sizeof(info->version) - 1);
strncpy(info->bus_info, pci_name(interface->pdev),
sizeof(info->bus_info) - 1);
-
- info->n_stats = fm10k_get_sset_count(dev, ETH_SS_STATS);
-
- info->regdump_len = fm10k_get_regs_len(dev);
}
static void fm10k_get_pauseparam(struct net_device *dev,
@@ -881,6 +994,33 @@ static void fm10k_self_test(struct net_device *dev,
eth_test->flags |= ETH_TEST_FL_FAILED;
}
+static u32 fm10k_get_priv_flags(struct net_device *netdev)
+{
+ struct fm10k_intfc *interface = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (interface->flags & FM10K_FLAG_DEBUG_STATS)
+ priv_flags |= 1 << FM10K_PRV_FLAG_DEBUG_STATS;
+
+ return priv_flags;
+}
+
+static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct fm10k_intfc *interface = netdev_priv(netdev);
+
+ if (priv_flags >= (1 << FM10K_PRV_FLAG_LEN))
+ return -EINVAL;
+
+ if (priv_flags & (1 << FM10K_PRV_FLAG_DEBUG_STATS))
+ interface->flags |= FM10K_FLAG_DEBUG_STATS;
+ else
+ interface->flags &= ~FM10K_FLAG_DEBUG_STATS;
+
+ return 0;
+}
+
+
static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
{
return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
@@ -1094,6 +1234,8 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.get_regs = fm10k_get_regs,
.get_regs_len = fm10k_get_regs_len,
.self_test = fm10k_self_test,
+ .get_priv_flags = fm10k_get_priv_flags,
+ .set_priv_flags = fm10k_set_priv_flags,
.get_rxfh_indir_size = fm10k_get_reta_size,
.get_rxfh_key_size = fm10k_get_rssrk_size,
.get_rxfh = fm10k_get_rssh,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 94571e6e790c..acfb8b1f88a7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -137,8 +137,11 @@ process_mbx:
}
/* guarantee we have free space in the SM mailbox */
- if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU))
+ if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
+ /* keep track of how many times this occurs */
+ interface->hw_sm_mbx_full++;
break;
+ }
/* cleanup mailbox and process received messages */
mbx->ops.process(hw, mbx);
@@ -228,9 +231,6 @@ int fm10k_iov_resume(struct pci_dev *pdev)
hw->iov.ops.set_lport(hw, vf_info, i,
FM10K_VF_FLAG_MULTI_CAPABLE);
- /* assign our default vid to the VF following reset */
- vf_info->sw_vid = hw->mac.default_vid;
-
/* mailbox is disconnected so we don't send a message */
hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 982fdcdc795b..e76a44cf330c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
static inline bool fm10k_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
@@ -398,6 +398,8 @@ static inline void fm10k_rx_checksum(struct fm10k_ring *ring,
return;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ ring->rx_stats.csum_good++;
}
#define FM10K_RSS_L4_TYPES_MASK \
@@ -497,8 +499,11 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
if (rx_desc->w.vlan) {
u16 vid = le16_to_cpu(rx_desc->w.vlan);
- if (vid != rx_ring->vid)
+ if ((vid & VLAN_VID_MASK) != rx_ring->vid)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ else if (vid & VLAN_PRIO_MASK)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vid & VLAN_PRIO_MASK);
}
fm10k_type_trans(rx_ring, rx_desc, skb);
@@ -553,6 +558,18 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
{
if (unlikely((fm10k_test_staterr(rx_desc,
FM10K_RXD_STATUS_RXE)))) {
+#define FM10K_TEST_RXD_BIT(rxd, bit) \
+ ((rxd)->w.csum_err & cpu_to_le16(bit))
+ if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_ERROR))
+ rx_ring->rx_stats.switch_errors++;
+ if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_NO_DESCRIPTOR))
+ rx_ring->rx_stats.drops++;
+ if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_PP_ERROR))
+ rx_ring->rx_stats.pp_errors++;
+ if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_READY))
+ rx_ring->rx_stats.link_errors++;
+ if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_TOO_BIG))
+ rx_ring->rx_stats.length_errors++;
dev_kfree_skb_any(skb);
rx_ring->rx_stats.errors++;
return true;
@@ -576,9 +593,9 @@ static void fm10k_receive_skb(struct fm10k_q_vector *q_vector,
napi_gro_receive(&q_vector->napi, skb);
}
-static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
- struct fm10k_ring *rx_ring,
- int budget)
+static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
+ struct fm10k_ring *rx_ring,
+ int budget)
{
struct sk_buff *skb = rx_ring->skb;
unsigned int total_bytes = 0, total_packets = 0;
@@ -645,7 +662,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
q_vector->rx.total_packets += total_packets;
q_vector->rx.total_bytes += total_bytes;
- return total_packets < budget;
+ return total_packets;
}
#define VXLAN_HLEN (sizeof(struct udphdr) + 8)
@@ -878,6 +895,7 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
/* update TX checksum flag */
first->tx_flags |= FM10K_TX_FLAGS_CSUM;
+ tx_ring->tx_stats.csum_good++;
no_csum:
/* populate Tx descriptor header size and mss */
@@ -1079,9 +1097,7 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
struct fm10k_tx_buffer *first;
int tso;
u32 tx_flags = 0;
-#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
unsigned short f;
-#endif
u16 count = TXD_USE_COUNT(skb_headlen(skb));
/* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
@@ -1089,12 +1105,9 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
* + 2 desc gap to keep tail from touching head
* otherwise try next time
*/
-#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-#else
- count += skb_shinfo(skb)->nr_frags;
-#endif
+
if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
@@ -1409,7 +1422,7 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
struct fm10k_q_vector *q_vector =
container_of(napi, struct fm10k_q_vector, napi);
struct fm10k_ring *ring;
- int per_ring_budget;
+ int per_ring_budget, work_done = 0;
bool clean_complete = true;
fm10k_for_each_ring(ring, q_vector->tx)
@@ -1423,16 +1436,19 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
else
per_ring_budget = budget;
- fm10k_for_each_ring(ring, q_vector->rx)
- clean_complete &= fm10k_clean_rx_irq(q_vector, ring,
- per_ring_budget);
+ fm10k_for_each_ring(ring, q_vector->rx) {
+ int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);
+
+ work_done += work;
+ clean_complete &= !!(work < per_ring_budget);
+ }
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
/* all work done, exit the polling mode */
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* re-enable the q_vector */
fm10k_qv_enable(q_vector);
@@ -1892,7 +1908,7 @@ static void fm10k_init_reta(struct fm10k_intfc *interface)
u32 reta, base;
/* If the netdev is initialized we have to maintain table if possible */
- if (interface->netdev->reg_state) {
+ if (interface->netdev->reg_state != NETREG_UNINITIALIZED) {
for (i = FM10K_RETA_SIZE; i--;) {
reta = interface->reta[i];
if ((((reta << 24) >> 24) < rss_i) &&
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 1a4b52637de9..af09a1b272e6 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -129,8 +129,8 @@ static u16 fm10k_fifo_head_drop(struct fm10k_mbx_fifo *fifo)
* fm10k_fifo_drop_all - Drop all messages in FIFO
* @fifo: pointer to FIFO
*
- * This function resets the head pointer to drop all messages in the FIFO,
- * and ensure the FIFO is empty.
+ * This function resets the head pointer to drop all messages in the FIFO and
+ * ensure the FIFO is empty.
**/
static void fm10k_fifo_drop_all(struct fm10k_mbx_fifo *fifo)
{
@@ -899,6 +899,27 @@ static void fm10k_mbx_create_disconnect_hdr(struct fm10k_mbx_info *mbx)
}
/**
+ * fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mailbox header
+ * @mbx: pointer to mailbox
+ *
+ * This function creates a fake disconnect header for loading into remote
+ * mailbox header. The primary purpose is to prevent errors on immediate
+ * start up after mbx->connect.
+ **/
+static void fm10k_mbx_create_fake_disconnect_hdr(struct fm10k_mbx_info *mbx)
+{
+ u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DISCONNECT, TYPE) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->head, TAIL) |
+ FM10K_MSG_HDR_FIELD_SET(mbx->tail, HEAD);
+ u16 crc = fm10k_crc_16b(&hdr, mbx->local, 1);
+
+ mbx->mbx_lock |= FM10K_MBX_ACK;
+
+ /* load header to memory to be written */
+ mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC);
+}
+
+/**
* fm10k_mbx_create_error_msg - Generate a error message
* @mbx: pointer to mailbox
* @err: local error encountered
@@ -1046,9 +1067,26 @@ static s32 fm10k_mbx_create_reply(struct fm10k_hw *hw,
**/
static void fm10k_mbx_reset_work(struct fm10k_mbx_info *mbx)
{
+ u16 len, head, ack;
+
/* reset our outgoing max size back to Rx limits */
mbx->max_size = mbx->rx.size - 1;
+ /* update mbx->pulled to account for tail_len and ack */
+ head = FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, HEAD);
+ ack = fm10k_mbx_index_len(mbx, head, mbx->tail);
+ mbx->pulled += mbx->tail_len - ack;
+
+ /* now drop any messages which have started or finished transmitting */
+ while (fm10k_fifo_head_len(&mbx->tx) && mbx->pulled) {
+ len = fm10k_fifo_head_drop(&mbx->tx);
+ mbx->tx_dropped++;
+ if (mbx->pulled >= len)
+ mbx->pulled -= len;
+ else
+ mbx->pulled = 0;
+ }
+
/* just do a quick resysnc to start of message */
mbx->pushed = 0;
mbx->pulled = 0;
@@ -1418,8 +1456,10 @@ static s32 fm10k_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
/* Place mbx in ready to connect state */
mbx->state = FM10K_STATE_CONNECT;
+ fm10k_mbx_reset_work(mbx);
+
/* initialize header of remote mailbox */
- fm10k_mbx_create_disconnect_hdr(mbx);
+ fm10k_mbx_create_fake_disconnect_hdr(mbx);
fm10k_write_reg(hw, mbx->mbmem_reg ^ mbx->mbmem_len, mbx->mbx_hdr);
/* enable interrupt and notify other party of new message */
@@ -1725,7 +1765,7 @@ static void fm10k_sm_mbx_disconnect(struct fm10k_hw *hw,
mbx->state = FM10K_STATE_CLOSED;
mbx->remote = 0;
fm10k_mbx_reset_work(mbx);
- fm10k_mbx_update_max_size(mbx, 0);
+ fm10k_fifo_drop_all(&mbx->tx);
fm10k_write_reg(hw, mbx->mbmem_reg, 0);
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 99228bf46c12..639263d5e833 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -758,6 +758,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_hw *hw = &interface->hw;
s32 err;
+ int i;
/* updates do not apply to VLAN 0 */
if (!vid)
@@ -775,8 +776,25 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
if (!set)
clear_bit(vid, interface->active_vlans);
- /* if default VLAN is already present do nothing */
- if (vid == hw->mac.default_vid)
+ /* disable the default VID on ring if we have an active VLAN */
+ for (i = 0; i < interface->num_rx_queues; i++) {
+ struct fm10k_ring *rx_ring = interface->rx_ring[i];
+ u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1);
+
+ if (test_bit(rx_vid, interface->active_vlans))
+ rx_ring->vid |= FM10K_VLAN_CLEAR;
+ else
+ rx_ring->vid &= ~FM10K_VLAN_CLEAR;
+ }
+
+ /* Do not remove default VID related entries from VLAN and MAC tables */
+ if (!set && vid == hw->mac.default_vid)
+ return 0;
+
+ /* Do not throw an error if the interface is down. We will sync once
+ * we come up
+ */
+ if (test_bit(__FM10K_DOWN, &interface->state))
return 0;
fm10k_mbx_lock(interface);
@@ -996,21 +1014,6 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
int xcast_mode;
u16 vid, glort;
- /* restore our address if perm_addr is set */
- if (hw->mac.type == fm10k_mac_vf) {
- if (is_valid_ether_addr(hw->mac.perm_addr)) {
- ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
- ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
- ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr);
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
- }
-
- if (hw->mac.vlan_override)
- netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
- else
- netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
- }
-
/* record glort for this interface */
glort = interface->glort;
@@ -1045,7 +1048,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
vid, true, 0);
}
- /* update xcast mode before syncronizing addresses */
+ /* update xcast mode before synchronizing addresses */
hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
/* synchronize all of the addresses */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index ce53ff25f88d..74be792f3f1b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -170,6 +170,21 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
/* reassociate interrupts */
fm10k_mbx_request_irq(interface);
+ /* update hardware address for VFs if perm_addr has changed */
+ if (hw->mac.type == fm10k_mac_vf) {
+ if (is_valid_ether_addr(hw->mac.perm_addr)) {
+ ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
+ ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
+ ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr);
+ netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
+ }
+
+ if (hw->mac.vlan_override)
+ netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ else
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
/* reset clock */
fm10k_ts_reset(interface);
@@ -259,8 +274,6 @@ static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
* @interface: board private structure
*
* This function will process both the upstream and downstream mailboxes.
- * It is necessary for us to hold the rtnl_lock while doing this as the
- * mailbox accesses are protected by this lock.
**/
static void fm10k_mbx_subtask(struct fm10k_intfc *interface)
{
@@ -315,6 +328,9 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
{
struct net_device_stats *net_stats = &interface->netdev->stats;
struct fm10k_hw *hw = &interface->hw;
+ u64 hw_csum_tx_good = 0, hw_csum_rx_good = 0, rx_length_errors = 0;
+ u64 rx_switch_errors = 0, rx_drops = 0, rx_pp_errors = 0;
+ u64 rx_link_errors = 0;
u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0;
u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0;
u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0;
@@ -334,6 +350,7 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
tx_csum_errors += tx_ring->tx_stats.csum_err;
bytes += tx_ring->stats.bytes;
pkts += tx_ring->stats.packets;
+ hw_csum_tx_good += tx_ring->tx_stats.csum_good;
}
interface->restart_queue = restart_queue;
@@ -341,6 +358,8 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
net_stats->tx_bytes = bytes;
net_stats->tx_packets = pkts;
interface->tx_csum_errors = tx_csum_errors;
+ interface->hw_csum_tx_good = hw_csum_tx_good;
+
/* gather some stats to the interface struct that are per queue */
for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) {
struct fm10k_ring *rx_ring = interface->rx_ring[i];
@@ -350,12 +369,24 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
alloc_failed += rx_ring->rx_stats.alloc_failed;
rx_csum_errors += rx_ring->rx_stats.csum_err;
rx_errors += rx_ring->rx_stats.errors;
+ hw_csum_rx_good += rx_ring->rx_stats.csum_good;
+ rx_switch_errors += rx_ring->rx_stats.switch_errors;
+ rx_drops += rx_ring->rx_stats.drops;
+ rx_pp_errors += rx_ring->rx_stats.pp_errors;
+ rx_link_errors += rx_ring->rx_stats.link_errors;
+ rx_length_errors += rx_ring->rx_stats.length_errors;
}
net_stats->rx_bytes = bytes;
net_stats->rx_packets = pkts;
interface->alloc_failed = alloc_failed;
interface->rx_csum_errors = rx_csum_errors;
+ interface->hw_csum_rx_good = hw_csum_rx_good;
+ interface->rx_switch_errors = rx_switch_errors;
+ interface->rx_drops = rx_drops;
+ interface->rx_pp_errors = rx_pp_errors;
+ interface->rx_link_errors = rx_link_errors;
+ interface->rx_length_errors = rx_length_errors;
hw->mac.ops.update_hw_stats(hw, &interface->stats);
@@ -483,7 +514,7 @@ static void fm10k_service_task(struct work_struct *work)
interface = container_of(work, struct fm10k_intfc, service_task);
- /* tasks always capable of running, but must be rtnl protected */
+ /* tasks run even when interface is down */
fm10k_mbx_subtask(interface);
fm10k_detach_subtask(interface);
fm10k_reset_subtask(interface);
@@ -663,6 +694,10 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
/* assign default VLAN to queue */
ring->vid = hw->mac.default_vid;
+ /* if we have an active VLAN, disable default VID */
+ if (test_bit(hw->mac.default_vid, interface->active_vlans))
+ ring->vid |= FM10K_VLAN_CLEAR;
+
/* Map interrupt */
if (ring->q_vector) {
rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
@@ -861,10 +896,12 @@ void fm10k_netpoll(struct net_device *netdev)
#endif
#define FM10K_ERR_MSG(type) case (type): error = #type; break
-static void fm10k_print_fault(struct fm10k_intfc *interface, int type,
+static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
struct fm10k_fault *fault)
{
struct pci_dev *pdev = interface->pdev;
+ struct fm10k_hw *hw = &interface->hw;
+ struct fm10k_iov_data *iov_data = interface->iov_data;
char *error;
switch (type) {
@@ -918,6 +955,30 @@ static void fm10k_print_fault(struct fm10k_intfc *interface, int type,
"%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n",
error, fault->address, fault->specinfo,
PCI_SLOT(fault->func), PCI_FUNC(fault->func));
+
+ /* For VF faults, clear out the respective LPORT, reset the queue
+ * resources, and then reconnect to the mailbox. This allows the
+ * VF in question to resume behavior. For transient faults that are
+ * the result of non-malicious behavior this will log the fault and
+ * allow the VF to resume functionality. Obviously for malicious VFs
+ * they will be able to attempt malicious behavior again. In this
+ * case, the system administrator will need to step in and manually
+ * remove or disable the VF in question.
+ */
+ if (fault->func && iov_data) {
+ int vf = fault->func - 1;
+ struct fm10k_vf_info *vf_info = &iov_data->vf_info[vf];
+
+ hw->iov.ops.reset_lport(hw, vf_info);
+ hw->iov.ops.reset_resources(hw, vf_info);
+
+ /* reset_lport disables the VF, so re-enable it */
+ hw->iov.ops.set_lport(hw, vf_info, vf,
+ FM10K_VF_FLAG_MULTI_CAPABLE);
+
+ /* reset_resources will disconnect from the mbx */
+ vf_info->mbx.ops.connect(hw, &vf_info->mbx);
+ }
}
static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
@@ -941,7 +1002,7 @@ static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
continue;
}
- fm10k_print_fault(interface, type, &fault);
+ fm10k_handle_fault(interface, type, &fault);
}
}
@@ -1705,22 +1766,86 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
static void fm10k_slot_warn(struct fm10k_intfc *interface)
{
- struct device *dev = &interface->pdev->dev;
+ enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
+ enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
struct fm10k_hw *hw = &interface->hw;
+ int max_gts = 0, expected_gts = 0;
+
+ if (pcie_get_minimum_link(interface->pdev, &speed, &width) ||
+ speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
+ dev_warn(&interface->pdev->dev,
+ "Unable to determine PCI Express bandwidth.\n");
+ return;
+ }
+
+ switch (speed) {
+ case PCIE_SPEED_2_5GT:
+ /* 8b/10b encoding reduces max throughput by 20% */
+ max_gts = 2 * width;
+ break;
+ case PCIE_SPEED_5_0GT:
+ /* 8b/10b encoding reduces max throughput by 20% */
+ max_gts = 4 * width;
+ break;
+ case PCIE_SPEED_8_0GT:
+ /* 128b/130b encoding has less than 2% impact on throughput */
+ max_gts = 8 * width;
+ break;
+ default:
+ dev_warn(&interface->pdev->dev,
+ "Unable to determine PCI Express bandwidth.\n");
+ return;
+ }
+
+ dev_info(&interface->pdev->dev,
+ "PCI Express bandwidth of %dGT/s available\n",
+ max_gts);
+ dev_info(&interface->pdev->dev,
+ "(Speed:%s, Width: x%d, Encoding Loss:%s, Payload:%s)\n",
+ (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
+ speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
+ speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
+ "Unknown"),
+ hw->bus.width,
+ (speed == PCIE_SPEED_2_5GT ? "20%" :
+ speed == PCIE_SPEED_5_0GT ? "20%" :
+ speed == PCIE_SPEED_8_0GT ? "<2%" :
+ "Unknown"),
+ (hw->bus.payload == fm10k_bus_payload_128 ? "128B" :
+ hw->bus.payload == fm10k_bus_payload_256 ? "256B" :
+ hw->bus.payload == fm10k_bus_payload_512 ? "512B" :
+ "Unknown"));
- if (hw->mac.ops.is_slot_appropriate(hw))
+ switch (hw->bus_caps.speed) {
+ case fm10k_bus_speed_2500:
+ /* 8b/10b encoding reduces max throughput by 20% */
+ expected_gts = 2 * hw->bus_caps.width;
+ break;
+ case fm10k_bus_speed_5000:
+ /* 8b/10b encoding reduces max throughput by 20% */
+ expected_gts = 4 * hw->bus_caps.width;
+ break;
+ case fm10k_bus_speed_8000:
+ /* 128b/130b encoding has less than 2% impact on throughput */
+ expected_gts = 8 * hw->bus_caps.width;
+ break;
+ default:
+ dev_warn(&interface->pdev->dev,
+ "Unable to determine expected PCI Express bandwidth.\n");
return;
+ }
- dev_warn(dev,
- "For optimal performance, a %s %s slot is recommended.\n",
- (hw->bus_caps.width == fm10k_bus_width_pcie_x1 ? "x1" :
- hw->bus_caps.width == fm10k_bus_width_pcie_x4 ? "x4" :
- "x8"),
- (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s" :
- hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s" :
- "8.0GT/s"));
- dev_warn(dev,
- "A slot with more lanes and/or higher speed is suggested.\n");
+ if (max_gts < expected_gts) {
+ dev_warn(&interface->pdev->dev,
+ "This device requires %dGT/s of bandwidth for optimal performance.\n",
+ expected_gts);
+ dev_warn(&interface->pdev->dev,
+ "A %sslot with x%d lanes is suggested.\n",
+ (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " :
+ hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " :
+ hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""),
+ hw->bus_caps.width);
+ }
}
/**
@@ -1739,7 +1864,6 @@ static int fm10k_probe(struct pci_dev *pdev,
{
struct net_device *netdev;
struct fm10k_intfc *interface;
- struct fm10k_hw *hw;
int err;
err = pci_enable_device_mem(pdev);
@@ -1783,7 +1907,6 @@ static int fm10k_probe(struct pci_dev *pdev,
interface->netdev = netdev;
interface->pdev = pdev;
- hw = &interface->hw;
interface->uc_addr = ioremap(pci_resource_start(pdev, 0),
FM10K_UC_ADDR_SIZE);
@@ -1825,24 +1948,12 @@ static int fm10k_probe(struct pci_dev *pdev,
/* Register PTP interface */
fm10k_ptp_register(interface);
- /* print bus type/speed/width info */
- dev_info(&pdev->dev, "(PCI Express:%s Width: %s Payload: %s)\n",
- (hw->bus.speed == fm10k_bus_speed_8000 ? "8.0GT/s" :
- hw->bus.speed == fm10k_bus_speed_5000 ? "5.0GT/s" :
- hw->bus.speed == fm10k_bus_speed_2500 ? "2.5GT/s" :
- "Unknown"),
- (hw->bus.width == fm10k_bus_width_pcie_x8 ? "x8" :
- hw->bus.width == fm10k_bus_width_pcie_x4 ? "x4" :
- hw->bus.width == fm10k_bus_width_pcie_x1 ? "x1" :
- "Unknown"),
- (hw->bus.payload == fm10k_bus_payload_128 ? "128B" :
- hw->bus.payload == fm10k_bus_payload_256 ? "256B" :
- hw->bus.payload == fm10k_bus_payload_512 ? "512B" :
- "Unknown"));
-
/* print warning for non-optimal configurations */
fm10k_slot_warn(interface);
+ /* report MAC address for logging */
+ dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
+
/* enable SR-IOV after registering netdev to enforce PF/VF ordering */
fm10k_iov_configure(pdev, 0);
@@ -1983,6 +2094,16 @@ static int fm10k_resume(struct pci_dev *pdev)
if (err)
return err;
+ /* assume host is not ready, to prevent race with watchdog in case we
+ * actually don't have connection to the switch
+ */
+ interface->host_ready = false;
+ fm10k_watchdog_host_not_ready(interface);
+
+ /* clear the service task disable bit to allow service task to start */
+ clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ fm10k_service_event_schedule(interface);
+
/* restore SR-IOV interface */
fm10k_iov_resume(pdev);
@@ -2010,6 +2131,15 @@ static int fm10k_suspend(struct pci_dev *pdev,
fm10k_iov_suspend(pdev);
+ /* the watchdog tasks may read registers, which will appear like a
+ * surprise-remove event once the PCI device is disabled. This will
+ * cause us to close the netdevice, so we don't retain the open/closed
+ * state post-resume. Prevent this by disabling the service task while
+ * suspended, until we actually resume.
+ */
+ set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ cancel_work_sync(&interface->service_task);
+
rtnl_lock();
if (netif_running(netdev))
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 3ca0233b3ea2..8c0bdc4e4edd 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -59,6 +59,11 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
return FM10K_ERR_DMA_PENDING;
+ /* verify the switch is ready for reset */
+ reg = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
+ if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY))
+ goto out;
+
/* Inititate data path reset */
reg |= FM10K_DMA_CTRL_DATAPATH_RESET;
fm10k_write_reg(hw, FM10K_DMA_CTRL, reg);
@@ -72,6 +77,7 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
if (!(reg & FM10K_IP_NOTINRESET))
err = FM10K_ERR_RESET_FAILED;
+out:
return err;
}
@@ -185,19 +191,6 @@ static s32 fm10k_init_hw_pf(struct fm10k_hw *hw)
}
/**
- * fm10k_is_slot_appropriate_pf - Indicate appropriate slot for this SKU
- * @hw: pointer to hardware structure
- *
- * Looks at the PCIe bus info to confirm whether or not this slot can support
- * the necessary bandwidth for this device.
- **/
-static bool fm10k_is_slot_appropriate_pf(struct fm10k_hw *hw)
-{
- return (hw->bus.speed == hw->bus_caps.speed) &&
- (hw->bus.width == hw->bus_caps.width);
-}
-
-/**
* fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table
* @hw: pointer to hardware structure
* @vid: VLAN ID to add to table
@@ -1162,6 +1155,24 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
}
/**
+ * fm10k_iov_select_vid - Select correct default VID
+ * @hw: Pointer to hardware structure
+ * @vid: VID to correct
+ *
+ * Will report an error if VID is out of range. For VID = 0, it will return
+ * either the pf_vid or sw_vid depending on which one is set.
+ */
+static inline s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
+{
+ if (!vid)
+ return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid;
+ else if (vf_info->pf_vid && vid != vf_info->pf_vid)
+ return FM10K_ERR_PARAM;
+ else
+ return vid;
+}
+
+/**
* fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
* @hw: Pointer to hardware structure
* @results: Pointer array to message, results[0] is pointer to message
@@ -1175,9 +1186,10 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info *mbx)
{
struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
- int err = 0;
u8 mac[ETH_ALEN];
u32 *result;
+ int err = 0;
+ bool set;
u16 vlan;
u32 vid;
@@ -1193,19 +1205,21 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
if (err)
return err;
- /* if VLAN ID is 0, set the default VLAN ID instead of 0 */
- if (!vid || (vid == FM10K_VLAN_CLEAR)) {
- if (vf_info->pf_vid)
- vid |= vf_info->pf_vid;
- else
- vid |= vf_info->sw_vid;
- } else if (vid != vf_info->pf_vid) {
+ /* verify upper 16 bits are zero */
+ if (vid >> 16)
return FM10K_ERR_PARAM;
- }
+
+ set = !(vid & FM10K_VLAN_CLEAR);
+ vid &= ~FM10K_VLAN_CLEAR;
+
+ err = fm10k_iov_select_vid(vf_info, vid);
+ if (err < 0)
+ return err;
+ else
+ vid = err;
/* update VSI info for VF in regards to VLAN table */
- err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi,
- !(vid & FM10K_VLAN_CLEAR));
+ err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
}
if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
@@ -1221,19 +1235,18 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
memcmp(mac, vf_info->mac, ETH_ALEN))
return FM10K_ERR_PARAM;
- /* if VLAN ID is 0, set the default VLAN ID instead of 0 */
- if (!vlan || (vlan == FM10K_VLAN_CLEAR)) {
- if (vf_info->pf_vid)
- vlan |= vf_info->pf_vid;
- else
- vlan |= vf_info->sw_vid;
- } else if (vf_info->pf_vid) {
- return FM10K_ERR_PARAM;
- }
+ set = !(vlan & FM10K_VLAN_CLEAR);
+ vlan &= ~FM10K_VLAN_CLEAR;
+
+ err = fm10k_iov_select_vid(vf_info, vlan);
+ if (err < 0)
+ return err;
+ else
+ vlan = err;
/* notify switch of request for new unicast address */
- err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, mac, vlan,
- !(vlan & FM10K_VLAN_CLEAR), 0);
+ err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
+ mac, vlan, set, 0);
}
if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
@@ -1248,19 +1261,18 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
return FM10K_ERR_PARAM;
- /* if VLAN ID is 0, set the default VLAN ID instead of 0 */
- if (!vlan || (vlan == FM10K_VLAN_CLEAR)) {
- if (vf_info->pf_vid)
- vlan |= vf_info->pf_vid;
- else
- vlan |= vf_info->sw_vid;
- } else if (vf_info->pf_vid) {
- return FM10K_ERR_PARAM;
- }
+ set = !(vlan & FM10K_VLAN_CLEAR);
+ vlan &= ~FM10K_VLAN_CLEAR;
+
+ err = fm10k_iov_select_vid(vf_info, vlan);
+ if (err < 0)
+ return err;
+ else
+ vlan = err;
/* notify switch of request for new multicast address */
- err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, mac, vlan,
- !(vlan & FM10K_VLAN_CLEAR));
+ err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
+ mac, vlan, set);
}
return err;
@@ -1849,7 +1861,6 @@ static struct fm10k_mac_ops mac_ops_pf = {
.init_hw = &fm10k_init_hw_pf,
.start_hw = &fm10k_start_hw_generic,
.stop_hw = &fm10k_stop_hw_generic,
- .is_slot_appropriate = &fm10k_is_slot_appropriate_pf,
.update_vlan = &fm10k_update_vlan_pf,
.read_mac_addr = &fm10k_read_mac_addr_pf,
.update_uc_addr = &fm10k_update_uc_addr_pf,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 2a17d82fa37d..318a212f0a78 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -521,7 +521,6 @@ struct fm10k_mac_ops {
s32 (*stop_hw)(struct fm10k_hw *);
s32 (*get_bus_info)(struct fm10k_hw *);
s32 (*get_host_state)(struct fm10k_hw *, bool *);
- bool (*is_slot_appropriate)(struct fm10k_hw *);
s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool);
s32 (*read_mac_addr)(struct fm10k_hw *);
s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *,
@@ -763,6 +762,12 @@ enum fm10k_rxdesc_xc {
#define FM10K_RXD_STATUS_L4E 0x4000 /* L4 csum error */
#define FM10K_RXD_STATUS_IPE 0x8000 /* IPv4 csum error */
+#define FM10K_RXD_ERR_SWITCH_ERROR 0x0001 /* Switch found bad packet */
+#define FM10K_RXD_ERR_NO_DESCRIPTOR 0x0002 /* No descriptor available */
+#define FM10K_RXD_ERR_PP_ERROR 0x0004 /* RAM error during processing */
+#define FM10K_RXD_ERR_SWITCH_READY 0x0008 /* Link transition mid-packet */
+#define FM10K_RXD_ERR_TOO_BIG 0x0010 /* Pkt too big for single buf */
+
struct fm10k_ftag {
__be16 swpri_type_user;
__be16 vlan;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index 94f0f6a146d9..36c8b0aa08fd 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -131,19 +131,6 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
return 0;
}
-/**
- * fm10k_is_slot_appropriate_vf - Indicate appropriate slot for this SKU
- * @hw: pointer to hardware structure
- *
- * Looks at the PCIe bus info to confirm whether or not this slot can support
- * the necessary bandwidth for this device. Since the VF has no control over
- * the "slot" it is in, always indicate that the slot is appropriate.
- **/
-static bool fm10k_is_slot_appropriate_vf(struct fm10k_hw *hw)
-{
- return true;
-}
-
/* This structure defines the attibutes to be parsed below */
const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[] = {
FM10K_TLV_ATTR_U32(FM10K_MAC_VLAN_MSG_VLAN),
@@ -552,7 +539,6 @@ static struct fm10k_mac_ops mac_ops_vf = {
.init_hw = &fm10k_init_hw_vf,
.start_hw = &fm10k_start_hw_generic,
.stop_hw = &fm10k_stop_hw_vf,
- .is_slot_appropriate = &fm10k_is_slot_appropriate_vf,
.update_vlan = &fm10k_update_vlan_vf,
.read_mac_addr = &fm10k_read_mac_addr_vf,
.update_uc_addr = &fm10k_update_uc_addr_vf,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index ec76c3fa3a04..4dd3e26129b4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -71,7 +71,6 @@
#define I40E_MAX_VEB 16
#define I40E_MAX_NUM_DESCRIPTORS 4096
-#define I40E_MAX_REGISTER 0x800000
#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
#define I40E_DEFAULT_NUM_DESCRIPTORS 512
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
@@ -79,10 +78,13 @@
#define I40E_MIN_MSIX 2
#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
-#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
+/* max 16 qps */
+#define i40e_default_queues_per_vmdq(pf) \
+ (((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1)
#define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
-#define I40E_MAX_QUEUES_PER_TC 64 /* should be a power of 2 */
+#define i40e_pf_get_max_q_per_tc(pf) \
+ (((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64)
#define I40E_FDIR_RING 0
#define I40E_FDIR_RING_COUNT 32
#ifdef I40E_FCOE
@@ -91,19 +93,26 @@
#endif /* I40E_FCOE */
#define I40E_MAX_AQ_BUF_SIZE 4096
#define I40E_AQ_LEN 256
-#define I40E_AQ_WORK_LIMIT 32
+#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
#define I40E_MAX_USER_PRIORITY 8
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
-#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9)
+#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
/* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_NPAR_FLAG (1 << 0)
+#define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0)
+#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
+#define I40E_PRIV_FLAGS_FD_ATR BIT(2)
+#define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
#define I40E_NVM_VERSION_HI_SHIFT 12
#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_OEM_VER_BUILD_MASK 0xffff
+#define I40E_OEM_VER_PATCH_MASK 0xff
+#define I40E_OEM_VER_BUILD_SHIFT 8
+#define I40E_OEM_VER_SHIFT 24
/* The values in here are decimal coded as hex as is the case in the NVM map*/
#define I40E_CURRENT_NVM_VERSION_HI 0x2
@@ -240,7 +249,6 @@ struct i40e_pf {
struct pci_dev *pdev;
struct i40e_hw hw;
unsigned long state;
- unsigned long link_check_timeout;
struct msix_entry *msix_entries;
bool fc_autoneg_status;
@@ -289,36 +297,45 @@ struct i40e_pf {
struct work_struct service_task;
u64 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED (u64)(1 << 1)
-#define I40E_FLAG_MSI_ENABLED (u64)(1 << 2)
-#define I40E_FLAG_MSIX_ENABLED (u64)(1 << 3)
-#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4)
-#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5)
-#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6)
-#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8)
-#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9)
+#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1)
+#define I40E_FLAG_MSI_ENABLED BIT_ULL(2)
+#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
+#define I40E_FLAG_RX_1BUF_ENABLED BIT_ULL(4)
+#define I40E_FLAG_RX_PS_ENABLED BIT_ULL(5)
+#define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
+#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8)
+#define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9)
+#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10)
#ifdef I40E_FCOE
-#define I40E_FLAG_FCOE_ENABLED (u64)(1 << 11)
+#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11)
#endif /* I40E_FCOE */
-#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12)
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13)
-#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14)
-#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15)
-#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17)
-#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18)
-#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19)
-#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20)
-#define I40E_FLAG_FD_SB_ENABLED (u64)(1 << 21)
-#define I40E_FLAG_FD_ATR_ENABLED (u64)(1 << 22)
-#define I40E_FLAG_PTP (u64)(1 << 25)
-#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13)
+#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
+#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
+#define I40E_FLAG_PROCESS_MDD_EVENT BIT_ULL(17)
+#define I40E_FLAG_PROCESS_VFLR_EVENT BIT_ULL(18)
+#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19)
+#define I40E_FLAG_DCB_ENABLED BIT_ULL(20)
+#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21)
+#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22)
+#define I40E_FLAG_PTP BIT_ULL(25)
+#define I40E_FLAG_MFP_ENABLED BIT_ULL(26)
#ifdef CONFIG_I40E_VXLAN
-#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
+#define I40E_FLAG_VXLAN_FILTER_SYNC BIT_ULL(27)
#endif
-#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28)
-#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
+#define I40E_FLAG_PORT_ID_VALID BIT_ULL(28)
+#define I40E_FLAG_DCB_CAPABLE BIT_ULL(29)
+#define I40E_FLAG_RSS_AQ_CAPABLE BIT_ULL(31)
+#define I40E_FLAG_HW_ATR_EVICT_CAPABLE BIT_ULL(32)
+#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE BIT_ULL(33)
+#define I40E_FLAG_128_QP_RSS_CAPABLE BIT_ULL(34)
+#define I40E_FLAG_WB_ON_ITR_CAPABLE BIT_ULL(35)
+#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(37)
+#define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(38)
+#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(39)
#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
+#define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42)
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
@@ -362,6 +379,7 @@ struct i40e_pf {
#ifdef CONFIG_DEBUG_FS
struct dentry *i40e_dbg_pf;
#endif /* CONFIG_DEBUG_FS */
+ bool cur_promisc;
u16 instance; /* A unique number per i40e_pf instance in the system */
@@ -398,6 +416,9 @@ struct i40e_pf {
/* These are only valid in NPAR modes */
u32 npar_max_bw;
u32 npar_min_bw;
+
+ u32 ioremap_len;
+ u32 fd_inv;
};
struct i40e_mac_filter {
@@ -432,6 +453,8 @@ struct i40e_veb {
bool stat_offsets_loaded;
struct i40e_eth_stats stats;
struct i40e_eth_stats stats_offsets;
+ struct i40e_veb_tc_stats tc_stats;
+ struct i40e_veb_tc_stats tc_stats_offsets;
};
/* struct that defines a VSI, associated with a dev */
@@ -443,10 +466,12 @@ struct i40e_vsi {
u32 current_netdev_flags;
unsigned long state;
-#define I40E_VSI_FLAG_FILTER_CHANGED (1<<0)
-#define I40E_VSI_FLAG_VEB_OWNER (1<<1)
+#define I40E_VSI_FLAG_FILTER_CHANGED BIT(0)
+#define I40E_VSI_FLAG_VEB_OWNER BIT(1)
unsigned long flags;
+ /* Per VSI lock to protect elements/list (MAC filter) */
+ spinlock_t mac_filter_list_lock;
struct list_head mac_filter_list;
/* VSI stats */
@@ -461,6 +486,7 @@ struct i40e_vsi {
#endif
u32 tx_restart;
u32 tx_busy;
+ u64 tx_linearize;
u32 rx_buf_failed;
u32 rx_page_failed;
@@ -476,6 +502,7 @@ struct i40e_vsi {
*/
u16 rx_itr_setting;
u16 tx_itr_setting;
+ u16 int_rate_limit; /* value in usecs */
u16 rss_table_size;
u16 rss_size;
@@ -521,6 +548,7 @@ struct i40e_vsi {
u16 idx; /* index in pf->vsi[] */
u16 veb_idx; /* index of VEB parent */
struct kobject *kobj; /* sysfs object */
+ bool current_isup; /* Sync 'link up' logging */
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
@@ -550,6 +578,9 @@ struct i40e_q_vector {
cpumask_t affinity_mask;
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
+ bool arm_wb_state;
+#define ITR_COUNTDOWN_START 100
+ u8 itr_countdown; /* when 0 should adjust ITR */
} ____cacheline_internodealigned_in_smp;
/* lan device */
@@ -559,22 +590,29 @@ struct i40e_device {
};
/**
- * i40e_fw_version_str - format the FW and NVM version strings
+ * i40e_nvm_version_str - format the NVM version strings
* @hw: ptr to the hardware info
**/
-static inline char *i40e_fw_version_str(struct i40e_hw *hw)
+static inline char *i40e_nvm_version_str(struct i40e_hw *hw)
{
static char buf[32];
+ u32 full_ver;
+ u8 ver, patch;
+ u16 build;
+
+ full_ver = hw->nvm.oem_ver;
+ ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
+ build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT)
+ & I40E_OEM_VER_BUILD_MASK);
+ patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
snprintf(buf, sizeof(buf),
- "f%d.%d.%05d a%d.%d n%x.%02x e%x",
- hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
- hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ "%x.%02x 0x%x %d.%d.%d",
(hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
I40E_NVM_VERSION_HI_SHIFT,
(hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
I40E_NVM_VERSION_LO_SHIFT,
- (hw->nvm.eetrack & 0xffffff));
+ hw->nvm.eetrack, ver, build, patch);
return buf;
}
@@ -653,7 +691,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
bool is_vf, bool is_netdev);
void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
bool is_vf, bool is_netdev);
-int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1);
int i40e_vsi_release(struct i40e_vsi *vsi);
@@ -686,7 +724,24 @@ static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {}
static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/
-void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+/**
+ * i40e_irq_dynamic_enable - Enable default interrupt generation settings
+ * @vsi: pointer to a vsi
+ * @vector: enable a particular Hw Interrupt vector, without base_vector
+ **/
+static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_DYN_CTLN(vector + vsi->base_vector - 1), val);
+ /* skip the flush */
+}
+
void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector);
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
@@ -725,7 +780,7 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt);
u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf);
void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi);
void i40e_fcoe_vsi_setup(struct i40e_pf *pf);
-int i40e_init_pf_fcoe(struct i40e_pf *pf);
+void i40e_init_pf_fcoe(struct i40e_pf *pf);
int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi);
void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi);
int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
@@ -757,4 +812,5 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf);
i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf);
i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf);
+void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 3e0d20037675..0ff8f01e57ee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -386,7 +386,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
hw->aq.asq.next_to_use = 0;
hw->aq.asq.next_to_clean = 0;
- hw->aq.asq.count = hw->aq.num_asq_entries;
/* allocate the ring memory */
ret_code = i40e_alloc_adminq_asq_ring(hw);
@@ -404,6 +403,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* success! */
+ hw->aq.asq.count = hw->aq.num_asq_entries;
goto init_adminq_exit;
init_adminq_free_rings:
@@ -445,7 +445,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
hw->aq.arq.next_to_use = 0;
hw->aq.arq.next_to_clean = 0;
- hw->aq.arq.count = hw->aq.num_arq_entries;
/* allocate the ring memory */
ret_code = i40e_alloc_adminq_arq_ring(hw);
@@ -463,6 +462,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* success! */
+ hw->aq.arq.count = hw->aq.num_arq_entries;
goto init_adminq_exit;
init_adminq_free_rings:
@@ -482,8 +482,12 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
- if (hw->aq.asq.count == 0)
- return I40E_ERR_NOT_READY;
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (hw->aq.asq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_asq_out;
+ }
/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.asq.head, 0);
@@ -492,16 +496,13 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
wr32(hw, hw->aq.asq.bal, 0);
wr32(hw, hw->aq.asq.bah, 0);
- /* make sure lock is available */
- mutex_lock(&hw->aq.asq_mutex);
-
hw->aq.asq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
i40e_free_asq_bufs(hw);
+shutdown_asq_out:
mutex_unlock(&hw->aq.asq_mutex);
-
return ret_code;
}
@@ -515,8 +516,12 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
- if (hw->aq.arq.count == 0)
- return I40E_ERR_NOT_READY;
+ mutex_lock(&hw->aq.arq_mutex);
+
+ if (hw->aq.arq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_arq_out;
+ }
/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.arq.head, 0);
@@ -525,16 +530,13 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
wr32(hw, hw->aq.arq.bal, 0);
wr32(hw, hw->aq.arq.bah, 0);
- /* make sure lock is available */
- mutex_lock(&hw->aq.arq_mutex);
-
hw->aq.arq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
i40e_free_arq_bufs(hw);
+shutdown_arq_out:
mutex_unlock(&hw->aq.arq_mutex);
-
return ret_code;
}
@@ -551,8 +553,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
**/
i40e_status i40e_init_adminq(struct i40e_hw *hw)
{
- i40e_status ret_code;
+ u16 cfg_ptr, oem_hi, oem_lo;
u16 eetrack_lo, eetrack_hi;
+ i40e_status ret_code;
int retry = 0;
/* verify input for valid configuration */
@@ -611,6 +614,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+ i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
+ i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
+ &oem_hi);
+ i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
+ &oem_lo);
+ hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
@@ -657,6 +666,9 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
/* destroy the locks */
+ if (hw->nvm_buff.va)
+ i40e_free_virt_mem(hw, &hw->nvm_buff);
+
return ret_code;
}
@@ -678,8 +690,7 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
details = I40E_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
- "%s: ntc %d head %d.\n", __func__, ntc,
- rd32(hw, hw->aq.asq.head));
+ "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
if (details->callback) {
I40E_ADMINQ_CALLBACK cb_func =
@@ -742,19 +753,23 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
u16 retval = 0;
u32 val = 0;
- val = rd32(hw, hw->aq.asq.head);
- if (val >= hw->aq.num_asq_entries) {
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (hw->aq.asq.count == 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
- "AQTX: head overrun at %d\n", val);
+ "AQTX: Admin queue not initialized.\n");
status = I40E_ERR_QUEUE_EMPTY;
- goto asq_send_command_exit;
+ goto asq_send_command_error;
}
- if (hw->aq.asq.count == 0) {
+ hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
+ val = rd32(hw, hw->aq.asq.head);
+ if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
- "AQTX: Admin queue not initialized.\n");
+ "AQTX: head overrun at %d\n", val);
status = I40E_ERR_QUEUE_EMPTY;
- goto asq_send_command_exit;
+ goto asq_send_command_error;
}
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
@@ -779,8 +794,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
desc->flags &= ~cpu_to_le16(details->flags_dis);
desc->flags |= cpu_to_le16(details->flags_ena);
- mutex_lock(&hw->aq.asq_mutex);
-
if (buff_size > hw->aq.asq_buf_size) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
@@ -889,6 +902,10 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
"AQTX: desc and buffer writeback:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+ /* save writeback aq if requested */
+ if (details->wb_desc)
+ *details->wb_desc = *desc_on_ring;
+
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
@@ -900,7 +917,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
asq_send_command_error:
mutex_unlock(&hw->aq.asq_mutex);
-asq_send_command_exit:
return status;
}
@@ -946,6 +962,13 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
/* take the lock before we start messing with the ring */
mutex_lock(&hw->aq.arq_mutex);
+ if (hw->aq.arq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Admin queue not initialized.\n");
+ ret_code = I40E_ERR_QUEUE_EMPTY;
+ goto clean_arq_element_err;
+ }
+
/* set next_to_use to head */
ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
if (ntu == ntc) {
@@ -1007,6 +1030,8 @@ clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+
+clean_arq_element_err:
mutex_unlock(&hw->aq.arq_mutex);
if (i40e_is_nvm_update_op(&e->desc)) {
@@ -1014,6 +1039,19 @@ clean_arq_element_out:
i40e_release_nvm(hw);
hw->aq.nvm_release_on_done = false;
}
+
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
}
return ret_code;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 28e519a50de4..12fbbddea299 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -69,6 +69,7 @@ struct i40e_asq_cmd_details {
u16 flags_dis;
bool async;
bool postpone;
+ struct i40e_aq_desc *wb_desc;
};
#define I40E_ADMINQ_DETAILS(R, i) \
@@ -108,9 +109,10 @@ struct i40e_adminq_info {
/**
* i40e_aq_rc_to_posix - convert errors to user-land codes
- * aq_rc: AdminQ error code to convert
+ * aq_ret: AdminQ handler error code can override aq_rc
+ * aq_rc: AdminQ firmware error code to convert
**/
-static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
+static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
{
int aq_to_posix[] = {
0, /* I40E_AQ_RC_OK */
@@ -142,8 +144,9 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
return -EAGAIN;
- if (aq_rc >= ARRAY_SIZE(aq_to_posix))
+ if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
return -ERANGE;
+
return aq_to_posix[aq_rc];
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 929e3d72a01e..6584b6cd73fd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -34,7 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0002
+#define I40E_FW_API_VERSION_MINOR 0x0004
struct i40e_aq_desc {
__le16 flags;
@@ -132,12 +132,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
- i40e_aqc_opc_set_cppm_configuration = 0x0103,
- i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
- i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
-
/* LAA */
- i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -262,7 +257,10 @@ enum i40e_admin_queue_opc {
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
- i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+ i40e_aqc_opc_set_rss_key = 0x0B02,
+ i40e_aqc_opc_set_rss_lut = 0x0B03,
+ i40e_aqc_opc_get_rss_key = 0x0B04,
+ i40e_aqc_opc_get_rss_lut = 0x0B05,
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
@@ -274,8 +272,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
/* debug commands */
- i40e_aqc_opc_debug_get_deviceid = 0xFF00,
- i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
@@ -509,7 +505,8 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_SAN_ADDR_VALID 0x20
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
-#define I40E_AQC_ADDR_VALID_MASK 0xf0
+#define I40E_AQC_MC_MAG_EN_VALID 0x100
+#define I40E_AQC_ADDR_VALID_MASK 0x1F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -532,7 +529,9 @@ struct i40e_aqc_mac_address_write {
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
-#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000
+#define I40E_AQC_WRITE_TYPE_MASK 0xC000
+
__le16 mac_sah;
__le32 mac_sal;
u8 reserved[8];
@@ -826,8 +825,12 @@ struct i40e_aqc_vsi_properties_data {
I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
@@ -1068,6 +1071,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
__le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF
#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
u8 reserved[8];
};
@@ -1718,11 +1722,13 @@ struct i40e_aqc_get_link_status {
u8 phy_type; /* i40e_aq_phy_type */
u8 link_speed; /* i40e_aq_link_speed */
u8 link_info;
-#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_UP 0x01 /* obsolete */
+#define I40E_AQ_LINK_UP_FUNCTION 0x01
#define I40E_AQ_LINK_FAULT 0x02
#define I40E_AQ_LINK_FAULT_TX 0x04
#define I40E_AQ_LINK_FAULT_RX 0x08
#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_LINK_UP_PORT 0x20
#define I40E_AQ_MEDIA_AVAILABLE 0x40
#define I40E_AQ_SIGNAL_DETECT 0x80
u8 an_info;
@@ -2058,12 +2064,28 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8
#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+
#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0
#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3
#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xB
+#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10
+#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct. Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
u8 reserved1;
u8 oper_num_tc;
@@ -2071,9 +2093,9 @@ struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
u8 reserved2;
u8 oper_tc_bw[8];
u8 oper_pfc_en;
- u8 reserved3;
+ u8 reserved3[2];
__le16 oper_app_prio;
- u8 reserved4;
+ u8 reserved4[2];
__le16 tlv_status;
};
@@ -2110,6 +2132,13 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
struct i40e_aqc_lldp_set_local_mib {
#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
u8 type;
u8 reserved0;
__le16 length;
@@ -2177,6 +2206,46 @@ struct i40e_aqc_del_udp_tunnel_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+
/* tunnel key structure 0x0B10 */
struct i40e_aqc_tunnel_key_structure {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 0bae22da014d..2d74c6e4d7b6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -51,9 +51,20 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_20G_KR2:
+ case I40E_DEV_ID_20G_KR2_A:
hw->mac.type = I40E_MAC_XL710;
break;
+ case I40E_DEV_ID_SFP_X722:
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ hw->mac.type = I40E_MAC_X722;
+ break;
+ case I40E_DEV_ID_X722_VF:
+ case I40E_DEV_ID_X722_VF_HV:
+ hw->mac.type = I40E_MAC_X722_VF;
+ break;
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
hw->mac.type = I40E_MAC_VF;
@@ -72,6 +83,212 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
}
/**
+ * i40e_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case I40E_AQ_RC_OK:
+ return "OK";
+ case I40E_AQ_RC_EPERM:
+ return "I40E_AQ_RC_EPERM";
+ case I40E_AQ_RC_ENOENT:
+ return "I40E_AQ_RC_ENOENT";
+ case I40E_AQ_RC_ESRCH:
+ return "I40E_AQ_RC_ESRCH";
+ case I40E_AQ_RC_EINTR:
+ return "I40E_AQ_RC_EINTR";
+ case I40E_AQ_RC_EIO:
+ return "I40E_AQ_RC_EIO";
+ case I40E_AQ_RC_ENXIO:
+ return "I40E_AQ_RC_ENXIO";
+ case I40E_AQ_RC_E2BIG:
+ return "I40E_AQ_RC_E2BIG";
+ case I40E_AQ_RC_EAGAIN:
+ return "I40E_AQ_RC_EAGAIN";
+ case I40E_AQ_RC_ENOMEM:
+ return "I40E_AQ_RC_ENOMEM";
+ case I40E_AQ_RC_EACCES:
+ return "I40E_AQ_RC_EACCES";
+ case I40E_AQ_RC_EFAULT:
+ return "I40E_AQ_RC_EFAULT";
+ case I40E_AQ_RC_EBUSY:
+ return "I40E_AQ_RC_EBUSY";
+ case I40E_AQ_RC_EEXIST:
+ return "I40E_AQ_RC_EEXIST";
+ case I40E_AQ_RC_EINVAL:
+ return "I40E_AQ_RC_EINVAL";
+ case I40E_AQ_RC_ENOTTY:
+ return "I40E_AQ_RC_ENOTTY";
+ case I40E_AQ_RC_ENOSPC:
+ return "I40E_AQ_RC_ENOSPC";
+ case I40E_AQ_RC_ENOSYS:
+ return "I40E_AQ_RC_ENOSYS";
+ case I40E_AQ_RC_ERANGE:
+ return "I40E_AQ_RC_ERANGE";
+ case I40E_AQ_RC_EFLUSHED:
+ return "I40E_AQ_RC_EFLUSHED";
+ case I40E_AQ_RC_BAD_ADDR:
+ return "I40E_AQ_RC_BAD_ADDR";
+ case I40E_AQ_RC_EMODE:
+ return "I40E_AQ_RC_EMODE";
+ case I40E_AQ_RC_EFBIG:
+ return "I40E_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * i40e_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+ switch (stat_err) {
+ case 0:
+ return "OK";
+ case I40E_ERR_NVM:
+ return "I40E_ERR_NVM";
+ case I40E_ERR_NVM_CHECKSUM:
+ return "I40E_ERR_NVM_CHECKSUM";
+ case I40E_ERR_PHY:
+ return "I40E_ERR_PHY";
+ case I40E_ERR_CONFIG:
+ return "I40E_ERR_CONFIG";
+ case I40E_ERR_PARAM:
+ return "I40E_ERR_PARAM";
+ case I40E_ERR_MAC_TYPE:
+ return "I40E_ERR_MAC_TYPE";
+ case I40E_ERR_UNKNOWN_PHY:
+ return "I40E_ERR_UNKNOWN_PHY";
+ case I40E_ERR_LINK_SETUP:
+ return "I40E_ERR_LINK_SETUP";
+ case I40E_ERR_ADAPTER_STOPPED:
+ return "I40E_ERR_ADAPTER_STOPPED";
+ case I40E_ERR_INVALID_MAC_ADDR:
+ return "I40E_ERR_INVALID_MAC_ADDR";
+ case I40E_ERR_DEVICE_NOT_SUPPORTED:
+ return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+ case I40E_ERR_MASTER_REQUESTS_PENDING:
+ return "I40E_ERR_MASTER_REQUESTS_PENDING";
+ case I40E_ERR_INVALID_LINK_SETTINGS:
+ return "I40E_ERR_INVALID_LINK_SETTINGS";
+ case I40E_ERR_AUTONEG_NOT_COMPLETE:
+ return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+ case I40E_ERR_RESET_FAILED:
+ return "I40E_ERR_RESET_FAILED";
+ case I40E_ERR_SWFW_SYNC:
+ return "I40E_ERR_SWFW_SYNC";
+ case I40E_ERR_NO_AVAILABLE_VSI:
+ return "I40E_ERR_NO_AVAILABLE_VSI";
+ case I40E_ERR_NO_MEMORY:
+ return "I40E_ERR_NO_MEMORY";
+ case I40E_ERR_BAD_PTR:
+ return "I40E_ERR_BAD_PTR";
+ case I40E_ERR_RING_FULL:
+ return "I40E_ERR_RING_FULL";
+ case I40E_ERR_INVALID_PD_ID:
+ return "I40E_ERR_INVALID_PD_ID";
+ case I40E_ERR_INVALID_QP_ID:
+ return "I40E_ERR_INVALID_QP_ID";
+ case I40E_ERR_INVALID_CQ_ID:
+ return "I40E_ERR_INVALID_CQ_ID";
+ case I40E_ERR_INVALID_CEQ_ID:
+ return "I40E_ERR_INVALID_CEQ_ID";
+ case I40E_ERR_INVALID_AEQ_ID:
+ return "I40E_ERR_INVALID_AEQ_ID";
+ case I40E_ERR_INVALID_SIZE:
+ return "I40E_ERR_INVALID_SIZE";
+ case I40E_ERR_INVALID_ARP_INDEX:
+ return "I40E_ERR_INVALID_ARP_INDEX";
+ case I40E_ERR_INVALID_FPM_FUNC_ID:
+ return "I40E_ERR_INVALID_FPM_FUNC_ID";
+ case I40E_ERR_QP_INVALID_MSG_SIZE:
+ return "I40E_ERR_QP_INVALID_MSG_SIZE";
+ case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+ return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+ case I40E_ERR_INVALID_FRAG_COUNT:
+ return "I40E_ERR_INVALID_FRAG_COUNT";
+ case I40E_ERR_QUEUE_EMPTY:
+ return "I40E_ERR_QUEUE_EMPTY";
+ case I40E_ERR_INVALID_ALIGNMENT:
+ return "I40E_ERR_INVALID_ALIGNMENT";
+ case I40E_ERR_FLUSHED_QUEUE:
+ return "I40E_ERR_FLUSHED_QUEUE";
+ case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+ case I40E_ERR_INVALID_IMM_DATA_SIZE:
+ return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+ case I40E_ERR_TIMEOUT:
+ return "I40E_ERR_TIMEOUT";
+ case I40E_ERR_OPCODE_MISMATCH:
+ return "I40E_ERR_OPCODE_MISMATCH";
+ case I40E_ERR_CQP_COMPL_ERROR:
+ return "I40E_ERR_CQP_COMPL_ERROR";
+ case I40E_ERR_INVALID_VF_ID:
+ return "I40E_ERR_INVALID_VF_ID";
+ case I40E_ERR_INVALID_HMCFN_ID:
+ return "I40E_ERR_INVALID_HMCFN_ID";
+ case I40E_ERR_BACKING_PAGE_ERROR:
+ return "I40E_ERR_BACKING_PAGE_ERROR";
+ case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case I40E_ERR_INVALID_PBLE_INDEX:
+ return "I40E_ERR_INVALID_PBLE_INDEX";
+ case I40E_ERR_INVALID_SD_INDEX:
+ return "I40E_ERR_INVALID_SD_INDEX";
+ case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+ return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+ case I40E_ERR_INVALID_SD_TYPE:
+ return "I40E_ERR_INVALID_SD_TYPE";
+ case I40E_ERR_MEMCPY_FAILED:
+ return "I40E_ERR_MEMCPY_FAILED";
+ case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+ return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+ case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+ return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+ case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+ case I40E_ERR_SRQ_ENABLED:
+ return "I40E_ERR_SRQ_ENABLED";
+ case I40E_ERR_ADMIN_QUEUE_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_ERROR";
+ case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+ case I40E_ERR_BUF_TOO_SHORT:
+ return "I40E_ERR_BUF_TOO_SHORT";
+ case I40E_ERR_ADMIN_QUEUE_FULL:
+ return "I40E_ERR_ADMIN_QUEUE_FULL";
+ case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+ return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+ case I40E_ERR_BAD_IWARP_CQE:
+ return "I40E_ERR_BAD_IWARP_CQE";
+ case I40E_ERR_NVM_BLANK_MODE:
+ return "I40E_ERR_NVM_BLANK_MODE";
+ case I40E_ERR_NOT_IMPLEMENTED:
+ return "I40E_ERR_NOT_IMPLEMENTED";
+ case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+ case I40E_ERR_DIAG_TEST_FAILED:
+ return "I40E_ERR_DIAG_TEST_FAILED";
+ case I40E_ERR_NOT_READY:
+ return "I40E_ERR_NOT_READY";
+ case I40E_NOT_SUPPORTED:
+ return "I40E_NOT_SUPPORTED";
+ case I40E_ERR_FIRMWARE_API_VERSION:
+ return "I40E_ERR_FIRMWARE_API_VERSION";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
* @mask: debug mask
@@ -114,25 +331,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
len = buf_len;
/* write the full 16-byte chunks */
for (i = 0; i < (len - 16); i += 16)
- i40e_debug(hw, mask,
- "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- i, buf[i], buf[i + 1], buf[i + 2],
- buf[i + 3], buf[i + 4], buf[i + 5],
- buf[i + 6], buf[i + 7], buf[i + 8],
- buf[i + 9], buf[i + 10], buf[i + 11],
- buf[i + 12], buf[i + 13], buf[i + 14],
- buf[i + 15]);
+ i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i);
/* write whatever's left over without overrunning the buffer */
- if (i < len) {
- char d_buf[80];
- int j = 0;
-
- memset(d_buf, 0, sizeof(d_buf));
- j += sprintf(d_buf, "\t0x%04X ", i);
- while (i < len)
- j += sprintf(&d_buf[j], " %02X", buf[i++]);
- i40e_debug(hw, mask, "%s\n", d_buf);
- }
+ if (i < len)
+ i40e_debug(hw, mask, "\t0x%04X %*ph\n",
+ i, len - i, buf + i);
}
}
@@ -177,6 +380,164 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
return status;
}
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp =
+ (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_lut);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ cpu_to_le16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= cpu_to_le16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= cpu_to_le16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ false);
+}
+
+/**
+ * i40e_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_key *cmd_resp =
+ (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_key);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ cpu_to_le16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40e_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
* packet type.
@@ -563,6 +924,7 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
switch (hw->mac.type) {
case I40E_MAC_XL710:
+ case I40E_MAC_X722:
break;
default:
return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -582,6 +944,9 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
else
hw->pf_id = (u8)(func_rid & 0x7);
+ if (hw->mac.type == I40E_MAC_X722)
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+
status = i40e_init_nvm(hw);
return status;
}
@@ -659,7 +1024,7 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
if (flags & I40E_AQC_LAN_ADDR_VALID)
- memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+ ether_addr_copy(mac_addr, addrs.pf_lan_mac);
return status;
}
@@ -682,7 +1047,7 @@ i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
return status;
if (flags & I40E_AQC_PORT_ADDR_VALID)
- memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+ ether_addr_copy(mac_addr, addrs.port_mac);
else
status = I40E_ERR_INVALID_MAC_ADDR;
@@ -740,7 +1105,7 @@ i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
return status;
if (flags & I40E_AQC_SAN_ADDR_VALID)
- memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac));
+ ether_addr_copy(mac_addr, addrs.pf_san_mac);
else
status = I40E_ERR_INVALID_MAC_ADDR;
@@ -881,7 +1246,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- for (cnt = 0; cnt < grst_del + 2; cnt++) {
+ for (cnt = 0; cnt < grst_del + 10; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
break;
@@ -1187,9 +1552,9 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
blink = false;
if (blink)
- gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+ gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
else
- gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+ gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
break;
@@ -1241,6 +1606,9 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
status = I40E_ERR_UNKNOWN_PHY;
+ if (report_init)
+ hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
+
return status;
}
@@ -1341,14 +1709,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
*aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
}
/* Update the link info */
- status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ status = i40e_update_link_info(hw);
if (status) {
/* Wait a little bit (on 40G cards it sometimes takes a really
* long time for link to come back from the atomic reset)
* and try once more
*/
msleep(1000);
- status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ status = i40e_update_link_info(hw);
}
if (status)
*aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
@@ -1859,27 +2227,54 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
/**
* i40e_get_link_status - get status of the HW network link
* @hw: pointer to the hw struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
*
- * Returns true if link is up, false if link is down.
+ * Variable link_up true if link is up, false if link is down.
+ * The variable link_up is invalid if returned value of status != 0
*
* Side effect: LinkStatusEvent reporting becomes enabled
**/
-bool i40e_get_link_status(struct i40e_hw *hw)
+i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
{
i40e_status status = 0;
- bool link_status = false;
if (hw->phy.get_link_info) {
- status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ status = i40e_update_link_info(hw);
if (status)
- goto i40e_get_link_status_exit;
+ i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
+ status);
}
- link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+ *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+
+ return status;
+}
+
+/**
+ * i40e_updatelink_status - update status of the HW network link
+ * @hw: pointer to the hw struct
+ **/
+i40e_status i40e_update_link_info(struct i40e_hw *hw)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ i40e_status status = 0;
+
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ if (status)
+ return status;
+
+ if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
+ status = i40e_aq_get_phy_capabilities(hw, false, false,
+ &abilities, NULL);
+ if (status)
+ return status;
-i40e_get_link_status_exit:
- return link_status;
+ memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+ sizeof(hw->phy.link_info.module_type));
+ }
+
+ return status;
}
/**
@@ -1986,6 +2381,7 @@ i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
*vebs_free = le16_to_cpu(cmd_resp->vebs_free);
if (floating) {
u16 flags = le16_to_cpu(cmd_resp->veb_flags);
+
if (flags & I40E_AQC_ADD_VEB_FLOATING)
*floating = true;
else
@@ -2391,7 +2787,7 @@ i40e_aq_erase_nvm_exit:
#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44
#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45
#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46
-#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1
+#define I40E_DEV_FUNC_CAP_FLEX10 0xF1
#define I40E_DEV_FUNC_CAP_CEM 0xF2
#define I40E_DEV_FUNC_CAP_IWARP 0x51
#define I40E_DEV_FUNC_CAP_LED 0x61
@@ -2416,6 +2812,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
u32 valid_functions, num_functions;
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
+ u8 major_rev;
u32 i = 0;
u16 id;
@@ -2433,6 +2830,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
number = le32_to_cpu(cap->number);
logical_id = le32_to_cpu(cap->logical_id);
phys_id = le32_to_cpu(cap->phys_id);
+ major_rev = cap->major_rev;
switch (id) {
case I40E_DEV_FUNC_CAP_SWITCH_MODE:
@@ -2507,9 +2905,21 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
case I40E_DEV_FUNC_CAP_MSIX_VF:
p->num_msix_vectors_vf = number;
break;
- case I40E_DEV_FUNC_CAP_MFP_MODE_1:
- if (number == 1)
- p->mfp_mode_1 = true;
+ case I40E_DEV_FUNC_CAP_FLEX10:
+ if (major_rev == 1) {
+ if (number == 1) {
+ p->flex10_enable = true;
+ p->flex10_capable = true;
+ }
+ } else {
+ /* Capability revision >= 2 */
+ if (number & 1)
+ p->flex10_enable = true;
+ if (number & 2)
+ p->flex10_capable = true;
+ }
+ p->flex10_mode = logical_id;
+ p->flex10_status = phys_id;
break;
case I40E_DEV_FUNC_CAP_CEM:
if (number == 1)
@@ -2557,7 +2967,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
/* Software override ensuring FCoE is disabled if npar or mfp
* mode because it is not supported in these modes.
*/
- if (p->npar_enable || p->mfp_mode_1)
+ if (p->npar_enable || p->flex10_enable)
p->fcoe = false;
/* count the enabled ports (aka the "not disabled" ports) */
@@ -3386,7 +3796,7 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
}
if (mac_addr)
- memcpy(cmd->mac, mac_addr, ETH_ALEN);
+ ether_addr_copy(cmd->mac, mac_addr);
cmd->etype = cpu_to_le16(ethtype);
cmd->flags = cpu_to_le16(flags);
@@ -3405,6 +3815,28 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
}
/**
+ * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid to add ethertype filter from
+ **/
+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ u16 seid)
+{
+ u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
+ u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
+ i40e_status status;
+
+ status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
+ seid, 0, true, NULL,
+ NULL);
+ if (status)
+ hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
+}
+
+/**
* i40e_aq_alternate_read
* @hw: pointer to the hardware structure
* @reg_addr0: address of first dword to be read
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 2547aa21b2ca..2691277c0055 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -292,6 +292,190 @@ static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
}
/**
+ * i40e_parse_cee_pgcfg_tlv
+ * @tlv: CEE DCBX PG CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses CEE DCBX PG CFG TLV
+ **/
+static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ etscfg = &dcbcfg->etscfg;
+
+ if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+ etscfg->willing = 1;
+
+ etscfg->cbs = 0;
+ /* Priority Group Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >>
+ I40E_CEE_PGID_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* PG Percentage Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* Number of TCs supported (1 octet) */
+ etscfg->maxtcs = buf[offset];
+}
+
+/**
+ * i40e_parse_cee_pfccfg_tlv
+ * @tlv: CEE DCBX PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses CEE DCBX PFC CFG TLV
+ **/
+static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+ dcbcfg->pfc.willing = 1;
+
+ /* ------------------------
+ * | PFC Enable | PFC TCs |
+ * ------------------------
+ * | 1 octet | 1 octet |
+ */
+ dcbcfg->pfc.pfcenable = buf[0];
+ dcbcfg->pfc.pfccap = buf[1];
+}
+
+/**
+ * i40e_parse_cee_app_tlv
+ * @tlv: CEE DCBX APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses CEE DCBX APP PRIO TLV
+ **/
+static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 length, typelength, offset = 0;
+ struct i40e_cee_app_prio *app;
+ u8 i, up, selector;
+
+ typelength = ntohs(tlv->hdr.typelen);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+
+ dcbcfg->numapps = length / sizeof(*app);
+ if (!dcbcfg->numapps)
+ return;
+
+ for (i = 0; i < dcbcfg->numapps; i++) {
+ app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset);
+ for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) {
+ if (app->prio_map & BIT(up))
+ break;
+ }
+ dcbcfg->app[i].priority = up;
+
+ /* Get Selector from lower 2 bits, and convert to IEEE */
+ selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK);
+ if (selector == I40E_CEE_APP_SEL_ETHTYPE)
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ else if (selector == I40E_CEE_APP_SEL_TCPIP)
+ dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+ else
+ /* Keep selector as it is for unknown types */
+ dcbcfg->app[i].selector = selector;
+
+ dcbcfg->app[i].protocolid = ntohs(app->protocol);
+ /* Move to next app */
+ offset += sizeof(*app);
+ }
+}
+
+/**
+ * i40e_parse_cee_tlv
+ * @tlv: CEE DCBX TLV
+ * @dcbcfg: Local store to update DCBX config data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 len, tlvlen, sublen, typelength;
+ struct i40e_cee_feat_tlv *sub_tlv;
+ u8 subtype, feat_tlv_count = 0;
+ u32 ouisubtype;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ /* Return if not CEE DCBX */
+ if (subtype != I40E_CEE_DCBX_TYPE)
+ return;
+
+ typelength = ntohs(tlv->typelength);
+ tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ len = sizeof(tlv->typelength) + sizeof(ouisubtype) +
+ sizeof(struct i40e_cee_ctrl_tlv);
+ /* Return if no CEE DCBX Feature TLVs */
+ if (tlvlen <= len)
+ return;
+
+ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len);
+ while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) {
+ typelength = ntohs(sub_tlv->hdr.typelen);
+ sublen = (u16)((typelength &
+ I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ switch (subtype) {
+ case I40E_CEE_SUBTYPE_PG_CFG:
+ i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case I40E_CEE_SUBTYPE_PFC_CFG:
+ i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case I40E_CEE_SUBTYPE_APP_PRI:
+ i40e_parse_cee_app_tlv(sub_tlv, dcbcfg);
+ break;
+ default:
+ return; /* Invalid Sub-type return */
+ }
+ feat_tlv_count++;
+ /* Move to next sub TLV */
+ sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv +
+ sizeof(sub_tlv->hdr.typelen) +
+ sublen);
+ }
+}
+
+/**
* i40e_parse_org_tlv
* @tlv: Organization specific TLV
* @dcbcfg: Local store to update ETS REC data
@@ -312,6 +496,9 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
case I40E_IEEE_8021QAZ_OUI:
i40e_parse_ieee_tlv(tlv, dcbcfg);
break;
+ case I40E_CEE_DCBX_OUI:
+ i40e_parse_cee_tlv(tlv, dcbcfg);
+ break;
default:
break;
}
@@ -502,15 +689,18 @@ static void i40e_cee_to_dcb_config(
/* CEE PG data to ETS config */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
for (i = 0; i < 4; i++) {
tc = (u8)((cee_cfg->oper_prio_tc[i] &
- I40E_CEE_PGID_PRIO_1_MASK) >>
- I40E_CEE_PGID_PRIO_1_SHIFT);
- dcbcfg->etscfg.prioritytable[i*2] = tc;
- tc = (u8)((cee_cfg->oper_prio_tc[i] &
I40E_CEE_PGID_PRIO_0_MASK) >>
I40E_CEE_PGID_PRIO_0_SHIFT);
- dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+ dcbcfg->etscfg.prioritytable[i * 2] = tc;
+ tc = (u8)((cee_cfg->oper_prio_tc[i] &
+ I40E_CEE_PGID_PRIO_1_MASK) >>
+ I40E_CEE_PGID_PRIO_1_SHIFT);
+ dcbcfg->etscfg.prioritytable[i * 2 + 1] = tc;
}
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
@@ -531,37 +721,85 @@ static void i40e_cee_to_dcb_config(
dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
- status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
- I40E_AQC_CEE_APP_STATUS_SHIFT;
+ i = 0;
+ status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >>
+ I40E_AQC_CEE_FCOE_STATUS_SHIFT;
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
- /* Add APPs if Error is False and Oper/Sync is True */
+ /* Add FCoE APP if Error is False and Oper/Sync is True */
if (!err && sync && oper) {
- /* CEE operating configuration supports FCoE/iSCSI/FIP only */
- dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
-
/* FCoE APP */
- dcbcfg->app[0].priority =
+ dcbcfg->app[i].priority =
(app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
I40E_AQC_CEE_APP_FCOE_SHIFT;
- dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
- dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE;
+ i++;
+ }
+ status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >>
+ I40E_AQC_CEE_ISCSI_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add iSCSI APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
/* iSCSI APP */
- dcbcfg->app[1].priority =
+ dcbcfg->app[i].priority =
(app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
I40E_AQC_CEE_APP_ISCSI_SHIFT;
- dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
- dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
+ dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI;
+ i++;
+ }
+ status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >>
+ I40E_AQC_CEE_FIP_STATUS_SHIFT;
+ err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FIP APP if Error is False and Oper/Sync is True */
+ if (!err && sync && oper) {
/* FIP APP */
- dcbcfg->app[2].priority =
+ dcbcfg->app[i].priority =
(app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
I40E_AQC_CEE_APP_FIP_SHIFT;
- dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
- dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
+ dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+ dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP;
+ i++;
}
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_get_ieee_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get IEEE mode DCB configuration from the Firmware
+ **/
+static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+{
+ i40e_status ret = 0;
+
+ /* IEEE mode */
+ hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+ /* Get Local DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->local_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+ ret = 0;
+
+out:
+ return ret;
}
/**
@@ -579,7 +817,7 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
/* If Firmware version < v4.33 IEEE only */
if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
(hw->aq.fw_maj_ver < 4))
- goto ieee;
+ return i40e_get_ieee_dcb_config(hw);
/* If Firmware version == v4.33 use old CEE struct */
if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) {
@@ -588,6 +826,8 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
if (!ret) {
/* CEE mode */
hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ hw->local_dcbx_config.tlv_status =
+ le16_to_cpu(cee_v1_cfg.tlv_status);
i40e_cee_to_dcb_v1_config(&cee_v1_cfg,
&hw->local_dcbx_config);
}
@@ -597,6 +837,8 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
if (!ret) {
/* CEE mode */
hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ hw->local_dcbx_config.tlv_status =
+ le32_to_cpu(cee_cfg.tlv_status);
i40e_cee_to_dcb_config(&cee_cfg,
&hw->local_dcbx_config);
}
@@ -604,16 +846,14 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
/* CEE mode not enabled try querying IEEE data */
if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
- goto ieee;
- else
+ return i40e_get_ieee_dcb_config(hw);
+
+ if (ret)
goto out;
-ieee:
- /* IEEE mode */
- hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
- /* Get Local DCB Config */
+ /* Get CEE DCB Desired Config */
ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
- &hw->local_dcbx_config);
+ &hw->desired_dcbx_config);
if (ret)
goto out;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index e137e3fac8ee..92d01042c1f6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -44,6 +44,15 @@
#define I40E_IEEE_SUBTYPE_PFC_CFG 11
#define I40E_IEEE_SUBTYPE_APP_PRI 12
+#define I40E_CEE_DCBX_OUI 0x001b21
+#define I40E_CEE_DCBX_TYPE 2
+
+#define I40E_CEE_SUBTYPE_CTRL 1
+#define I40E_CEE_SUBTYPE_PG_CFG 2
+#define I40E_CEE_SUBTYPE_PFC_CFG 3
+#define I40E_CEE_SUBTYPE_APP_PRI 4
+
+#define I40E_CEE_MAX_FEAT_TYPE 3
/* Defines for LLDP TLV header */
#define I40E_LLDP_TLV_LEN_SHIFT 0
#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
@@ -58,9 +67,9 @@
#define I40E_IEEE_ETS_MAXTC_SHIFT 0
#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
#define I40E_IEEE_ETS_CBS_SHIFT 6
-#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT)
#define I40E_IEEE_ETS_WILLING_SHIFT 7
-#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT)
#define I40E_IEEE_ETS_PRIO_0_SHIFT 0
#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
@@ -79,9 +88,9 @@
#define I40E_IEEE_PFC_CAP_SHIFT 0
#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
#define I40E_IEEE_PFC_MBC_SHIFT 6
-#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT)
#define I40E_IEEE_PFC_WILLING_SHIFT 7
-#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
+#define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT)
/* Defines for IEEE APP TLV */
#define I40E_IEEE_APP_SEL_SHIFT 0
@@ -98,6 +107,36 @@ struct i40e_lldp_org_tlv {
__be32 ouisubtype;
u8 tlvinfo[1];
};
+
+struct i40e_cee_tlv_hdr {
+ __be16 typelen;
+ u8 operver;
+ u8 maxver;
+};
+
+struct i40e_cee_ctrl_tlv {
+ struct i40e_cee_tlv_hdr hdr;
+ __be32 seqno;
+ __be32 ackno;
+};
+
+struct i40e_cee_feat_tlv {
+ struct i40e_cee_tlv_hdr hdr;
+ u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80
+#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40
+#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20
+ u8 subtype;
+ u8 tlvinfo[1];
+};
+
+struct i40e_cee_app_prio {
+ __be16 protocol;
+ u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
+#define I40E_CEE_APP_SELECTOR_MASK 0x03
+ __be16 lower_oui;
+ u8 prio_map;
+};
#pragma pack()
i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index bd5079d5c1b6..886e667f2f1c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -187,7 +187,7 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
/* Set up all the App TLVs if DCBx is negotiated */
for (i = 0; i < dcbxcfg->numapps; i++) {
prio = dcbxcfg->app[i].priority;
- tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]);
+ tc_map = BIT(dcbxcfg->etscfg.prioritytable[prio]);
/* Add APP only if the TC is enabled for this VSI */
if (tc_map & vsi->tc_config.enabled_tc) {
@@ -236,14 +236,13 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf,
struct i40e_dcb_app_priority_table *app)
{
int v, err;
+
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] && pf->vsi[v]->netdev) {
err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
- if (err)
- dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
- __func__, pf->vsi[v]->seid,
- err, app->selector,
- app->protocolid, app->priority);
+ dev_dbg(&pf->pdev->dev, "Deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
+ pf->vsi[v]->seid, err, app->selector,
+ app->protocolid, app->priority);
}
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index da0faf478af0..d4b7af9a2fc8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -404,82 +404,82 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
nstat = i40e_get_vsi_stats_struct(vsi);
dev_info(&pf->pdev->dev,
" net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
- (long unsigned int)nstat->rx_packets,
- (long unsigned int)nstat->rx_bytes,
- (long unsigned int)nstat->rx_errors,
- (long unsigned int)nstat->rx_dropped);
+ (unsigned long int)nstat->rx_packets,
+ (unsigned long int)nstat->rx_bytes,
+ (unsigned long int)nstat->rx_errors,
+ (unsigned long int)nstat->rx_dropped);
dev_info(&pf->pdev->dev,
" net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
- (long unsigned int)nstat->tx_packets,
- (long unsigned int)nstat->tx_bytes,
- (long unsigned int)nstat->tx_errors,
- (long unsigned int)nstat->tx_dropped);
+ (unsigned long int)nstat->tx_packets,
+ (unsigned long int)nstat->tx_bytes,
+ (unsigned long int)nstat->tx_errors,
+ (unsigned long int)nstat->tx_dropped);
dev_info(&pf->pdev->dev,
" net_stats: multicast = %lu, collisions = %lu\n",
- (long unsigned int)nstat->multicast,
- (long unsigned int)nstat->collisions);
+ (unsigned long int)nstat->multicast,
+ (unsigned long int)nstat->collisions);
dev_info(&pf->pdev->dev,
" net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
- (long unsigned int)nstat->rx_length_errors,
- (long unsigned int)nstat->rx_over_errors,
- (long unsigned int)nstat->rx_crc_errors);
+ (unsigned long int)nstat->rx_length_errors,
+ (unsigned long int)nstat->rx_over_errors,
+ (unsigned long int)nstat->rx_crc_errors);
dev_info(&pf->pdev->dev,
" net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
- (long unsigned int)nstat->rx_frame_errors,
- (long unsigned int)nstat->rx_fifo_errors,
- (long unsigned int)nstat->rx_missed_errors);
+ (unsigned long int)nstat->rx_frame_errors,
+ (unsigned long int)nstat->rx_fifo_errors,
+ (unsigned long int)nstat->rx_missed_errors);
dev_info(&pf->pdev->dev,
" net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
- (long unsigned int)nstat->tx_aborted_errors,
- (long unsigned int)nstat->tx_carrier_errors,
- (long unsigned int)nstat->tx_fifo_errors);
+ (unsigned long int)nstat->tx_aborted_errors,
+ (unsigned long int)nstat->tx_carrier_errors,
+ (unsigned long int)nstat->tx_fifo_errors);
dev_info(&pf->pdev->dev,
" net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
- (long unsigned int)nstat->tx_heartbeat_errors,
- (long unsigned int)nstat->tx_window_errors);
+ (unsigned long int)nstat->tx_heartbeat_errors,
+ (unsigned long int)nstat->tx_window_errors);
dev_info(&pf->pdev->dev,
" net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
- (long unsigned int)nstat->rx_compressed,
- (long unsigned int)nstat->tx_compressed);
+ (unsigned long int)nstat->rx_compressed,
+ (unsigned long int)nstat->tx_compressed);
dev_info(&pf->pdev->dev,
" net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
- (long unsigned int)vsi->net_stats_offsets.rx_packets,
- (long unsigned int)vsi->net_stats_offsets.rx_bytes,
- (long unsigned int)vsi->net_stats_offsets.rx_errors,
- (long unsigned int)vsi->net_stats_offsets.rx_dropped);
+ (unsigned long int)vsi->net_stats_offsets.rx_packets,
+ (unsigned long int)vsi->net_stats_offsets.rx_bytes,
+ (unsigned long int)vsi->net_stats_offsets.rx_errors,
+ (unsigned long int)vsi->net_stats_offsets.rx_dropped);
dev_info(&pf->pdev->dev,
" net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
- (long unsigned int)vsi->net_stats_offsets.tx_packets,
- (long unsigned int)vsi->net_stats_offsets.tx_bytes,
- (long unsigned int)vsi->net_stats_offsets.tx_errors,
- (long unsigned int)vsi->net_stats_offsets.tx_dropped);
+ (unsigned long int)vsi->net_stats_offsets.tx_packets,
+ (unsigned long int)vsi->net_stats_offsets.tx_bytes,
+ (unsigned long int)vsi->net_stats_offsets.tx_errors,
+ (unsigned long int)vsi->net_stats_offsets.tx_dropped);
dev_info(&pf->pdev->dev,
" net_stats_offsets: multicast = %lu, collisions = %lu\n",
- (long unsigned int)vsi->net_stats_offsets.multicast,
- (long unsigned int)vsi->net_stats_offsets.collisions);
+ (unsigned long int)vsi->net_stats_offsets.multicast,
+ (unsigned long int)vsi->net_stats_offsets.collisions);
dev_info(&pf->pdev->dev,
" net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
- (long unsigned int)vsi->net_stats_offsets.rx_length_errors,
- (long unsigned int)vsi->net_stats_offsets.rx_over_errors,
- (long unsigned int)vsi->net_stats_offsets.rx_crc_errors);
+ (unsigned long int)vsi->net_stats_offsets.rx_length_errors,
+ (unsigned long int)vsi->net_stats_offsets.rx_over_errors,
+ (unsigned long int)vsi->net_stats_offsets.rx_crc_errors);
dev_info(&pf->pdev->dev,
" net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
- (long unsigned int)vsi->net_stats_offsets.rx_frame_errors,
- (long unsigned int)vsi->net_stats_offsets.rx_fifo_errors,
- (long unsigned int)vsi->net_stats_offsets.rx_missed_errors);
+ (unsigned long int)vsi->net_stats_offsets.rx_frame_errors,
+ (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors,
+ (unsigned long int)vsi->net_stats_offsets.rx_missed_errors);
dev_info(&pf->pdev->dev,
" net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
- (long unsigned int)vsi->net_stats_offsets.tx_aborted_errors,
- (long unsigned int)vsi->net_stats_offsets.tx_carrier_errors,
- (long unsigned int)vsi->net_stats_offsets.tx_fifo_errors);
+ (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors,
+ (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors,
+ (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors);
dev_info(&pf->pdev->dev,
" net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
- (long unsigned int)vsi->net_stats_offsets.tx_heartbeat_errors,
- (long unsigned int)vsi->net_stats_offsets.tx_window_errors);
+ (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors,
+ (unsigned long int)vsi->net_stats_offsets.tx_window_errors);
dev_info(&pf->pdev->dev,
" net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
- (long unsigned int)vsi->net_stats_offsets.rx_compressed,
- (long unsigned int)vsi->net_stats_offsets.tx_compressed);
+ (unsigned long int)vsi->net_stats_offsets.rx_compressed,
+ (unsigned long int)vsi->net_stats_offsets.tx_compressed);
dev_info(&pf->pdev->dev,
" tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
vsi->tx_restart, vsi->tx_busy,
@@ -487,6 +487,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+
if (!rx_ring)
continue;
@@ -527,7 +528,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" rx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, rx_ring->size,
- (long unsigned int)rx_ring->dma);
+ (unsigned long int)rx_ring->dma);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: vsi = %p, q_vector = %p\n",
i, rx_ring->vsi,
@@ -535,6 +536,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+
if (!tx_ring)
continue;
@@ -573,7 +575,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" tx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, tx_ring->size,
- (long unsigned int)tx_ring->dma);
+ (unsigned long int)tx_ring->dma);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: vsi = %p, q_vector = %p\n",
i, tx_ring->vsi,
@@ -743,6 +745,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
ring = &(hw->aq.asq);
for (i = 0; i < ring->count; i++) {
struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+
dev_info(&pf->pdev->dev,
" at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
i, d->flags, d->opcode, d->datalen, d->retval,
@@ -755,6 +758,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
ring = &(hw->aq.arq);
for (i = 0; i < ring->count; i++) {
struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+
dev_info(&pf->pdev->dev,
" ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
i, d->flags, d->opcode, d->datalen, d->retval,
@@ -949,24 +953,6 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
}
}
-/**
- * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR
- * @pf: the PF that would be altered
- * @flag: flag that needs enabling or disabling
- * @enable: Enable/disable FD SD/ATR
- **/
-static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
-{
- if (enable) {
- pf->flags |= flag;
- } else {
- pf->flags &= ~flag;
- pf->auto_disable_flags |= flag;
- }
- dev_info(&pf->pdev->dev, "requesting a PF reset\n");
- i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
-}
-
#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
/**
* i40e_dbg_command_write - write into command datum
@@ -1038,7 +1024,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
} else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
- sscanf(&cmd_buf[7], "%i", &vsi_seid);
+ cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev,
+ "del vsi: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ }
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
if (!vsi) {
dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
@@ -1145,8 +1137,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
+ spin_lock_bh(&vsi->mac_filter_list_lock);
f = i40e_add_filter(vsi, ma, vlan, false, false);
- ret = i40e_sync_vsi_filters(vsi);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ ret = i40e_sync_vsi_filters(vsi, true);
if (f && !ret)
dev_info(&pf->pdev->dev,
"add macaddr: %pM vlan=%d added to VSI %d\n",
@@ -1182,8 +1176,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_del_filter(vsi, ma, vlan, false, false);
- ret = i40e_sync_vsi_filters(vsi);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ ret = i40e_sync_vsi_filters(vsi, true);
if (!ret)
dev_info(&pf->pdev->dev,
"del macaddr: %pM vlan=%d removed from VSI %d\n",
@@ -1471,23 +1467,24 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
- i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "corer", 5) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
- i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "globr", 5) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
- i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "empr", 4) == 0) {
dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
- i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "read", 4) == 0) {
u32 address;
u32 value;
+
cnt = sscanf(&cmd_buf[4], "%i", &address);
if (cnt != 1) {
dev_info(&pf->pdev->dev, "read <reg>\n");
@@ -1495,9 +1492,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
/* check the range on address */
- if (address >= I40E_MAX_REGISTER) {
- dev_info(&pf->pdev->dev, "read reg address 0x%08x too large\n",
- address);
+ if (address > (pf->ioremap_len - sizeof(u32))) {
+ dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
+ address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
goto command_write_done;
}
@@ -1507,6 +1504,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if (strncmp(cmd_buf, "write", 5) == 0) {
u32 address, value;
+
cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
if (cnt != 2) {
dev_info(&pf->pdev->dev, "write <reg> <value>\n");
@@ -1514,9 +1512,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
/* check the range on address */
- if (address >= I40E_MAX_REGISTER) {
- dev_info(&pf->pdev->dev, "write reg address 0x%08x too large\n",
- address);
+ if (address > (pf->ioremap_len - sizeof(u32))) {
+ dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
+ address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
goto command_write_done;
}
wr32(&pf->hw, address, value);
@@ -1528,6 +1526,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
if (cnt == 0) {
int i;
+
for (i = 0; i < pf->num_alloc_vsi; i++)
i40e_vsi_reset_stats(pf->vsi[i]);
dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
@@ -1726,8 +1725,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
for (i = 0; i < packet_len; i++) {
- sscanf(&asc_packet[j], "%2hhx ",
- &raw_packet[i]);
+ cnt = sscanf(&asc_packet[j], "%2hhx ", &raw_packet[i]);
+ if (!cnt)
+ break;
j += 3;
}
dev_info(&pf->pdev->dev, "FD raw packet dump\n");
@@ -1745,16 +1745,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
raw_packet = NULL;
kfree(asc_packet);
asc_packet = NULL;
- } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) {
- i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
- } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
- i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
} else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
i40e_get_current_fd_count(pf));
} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
+
ret = i40e_aq_stop_lldp(&pf->hw, false, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -1779,6 +1776,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
#endif /* CONFIG_I40E_DCB */
} else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
int ret;
+
ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
pf->hw.mac.addr,
I40E_ETH_P_LLDP, 0,
@@ -1807,6 +1805,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
u16 llen, rlen;
int ret;
u8 *buff;
+
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
goto command_write_done;
@@ -1833,6 +1832,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
u16 llen, rlen;
int ret;
u8 *buff;
+
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
goto command_write_done;
@@ -1858,6 +1858,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
} else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
int ret;
+
ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
true, NULL);
if (ret) {
@@ -1868,6 +1869,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
int ret;
+
ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
false, NULL);
if (ret) {
@@ -1969,8 +1971,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
- dev_info(&pf->pdev->dev, " fd-atr off\n");
- dev_info(&pf->pdev->dev, " fd-atr on\n");
dev_info(&pf->pdev->dev, " fd current cnt");
dev_info(&pf->pdev->dev, " lldp start\n");
dev_info(&pf->pdev->dev, " lldp stop\n");
@@ -2105,6 +2105,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
}
} else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
int mtu;
+
cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
&vsi_seid, &mtu);
if (cnt != 2) {
@@ -2220,7 +2221,6 @@ void i40e_dbg_pf_init(struct i40e_pf *pf)
create_failed:
dev_info(dev, "debugfs dir/file for %s failed\n", name);
debugfs_remove_recursive(pf->i40e_dbg_pf);
- return;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
new file mode 100644
index 000000000000..c601ca4a610c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -0,0 +1,55 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DEVIDS_H_
+#define _I40E_DEVIDS_H_
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
+#define I40E_DEV_ID_20G_KR2 0x1587
+#define I40E_DEV_ID_20G_KR2_A 0x1588
+#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_X722_VF 0x37CD
+#define I40E_DEV_ID_X722_VF_HV 0x37D9
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
+
+#endif /* _I40E_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index 56438bd579e6..f141e78d409e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -144,11 +144,8 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
if (!ret_code &&
((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
- (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
- ret_code = i40e_validate_nvm_checksum(hw, NULL);
- } else {
- ret_code = I40E_ERR_DIAG_TEST_FAILED;
- }
-
- return ret_code;
+ BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
+ return i40e_validate_nvm_checksum(hw, NULL);
+ else
+ return I40E_ERR_DIAG_TEST_FAILED;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 9a68c65b17ea..3f385ffe420f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -87,11 +87,9 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
+ I40E_VSI_STAT("tx_linearize", tx_linearize),
};
-static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
- struct ethtool_rxnfc *cmd);
-
/* These PF_STATs might look like duplicates of some NETDEV_STATs,
* but they are separate. This device supports Virtualization, and
* as such might have several netdevs supporting VMDq and FCoE going
@@ -114,7 +112,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
- I40E_PF_STAT("crc_errors", stats.crc_errors),
+ I40E_PF_STAT("rx_crc_errors", stats.crc_errors),
I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
@@ -148,7 +146,9 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
+ I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status),
I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
+ I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status),
/* LPI stats */
I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
@@ -195,7 +195,14 @@ static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
/ sizeof(u64))
+#define I40E_VEB_TC_STATS_LEN ( \
+ (FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_packets) + \
+ FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_bytes) + \
+ FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \
+ FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \
+ / sizeof(u64))
#define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats)
+#define I40E_VEB_STATS_TOTAL (I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN)
#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
I40E_PFC_STATS_LEN + \
I40E_VSI_STATS_LEN((n)))
@@ -220,10 +227,12 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
"NPAR",
+ "LinkPolling",
+ "flow-director-atr",
+ "veb-stats",
};
-#define I40E_PRIV_FLAGS_STR_LEN \
- (sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN)
+#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
/**
* i40e_partition_setting_complaint - generic complaint for MFP restriction
@@ -244,7 +253,8 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf)
**/
static void i40e_get_settings_link_up(struct i40e_hw *hw,
struct ethtool_cmd *ecmd,
- struct net_device *netdev)
+ struct net_device *netdev,
+ struct i40e_pf *pf)
{
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
u32 link_speed = hw_link_info->link_speed;
@@ -263,65 +273,49 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
case I40E_PHY_TYPE_40GBASE_AOC:
ecmd->supported = SUPPORTED_40000baseCR4_Full;
break;
- case I40E_PHY_TYPE_40GBASE_KR4:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_40000baseKR4_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_40000baseKR4_Full;
- break;
case I40E_PHY_TYPE_40GBASE_SR4:
ecmd->supported = SUPPORTED_40000baseSR4_Full;
break;
case I40E_PHY_TYPE_40GBASE_LR4:
ecmd->supported = SUPPORTED_40000baseLR4_Full;
break;
- case I40E_PHY_TYPE_20GBASE_KR2:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_20000baseKR2_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_20000baseKR2_Full;
- break;
- case I40E_PHY_TYPE_10GBASE_KX4:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseKX4_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_10000baseKX4_Full;
- break;
- case I40E_PHY_TYPE_10GBASE_KR:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseKR_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_10000baseKR_Full;
- break;
case I40E_PHY_TYPE_10GBASE_SR:
case I40E_PHY_TYPE_10GBASE_LR:
case I40E_PHY_TYPE_1000BASE_SX:
case I40E_PHY_TYPE_1000BASE_LX:
- ecmd->supported = SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full;
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ if (hw_link_info->module_type[2] &
+ I40E_MODULE_TYPE_1000BASE_SX ||
+ hw_link_info->module_type[2] &
+ I40E_MODULE_TYPE_1000BASE_LX) {
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
+ if (hw_link_info->requested_speeds &
+ I40E_LINK_SPEED_1GB)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ }
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
ecmd->advertising |= ADVERTISED_10000baseT_Full;
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
- break;
- case I40E_PHY_TYPE_1000BASE_KX:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_1000baseKX_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_1000baseKX_Full;
break;
case I40E_PHY_TYPE_10GBASE_T:
case I40E_PHY_TYPE_1000BASE_T:
- case I40E_PHY_TYPE_100BASE_TX:
ecmd->supported = SUPPORTED_Autoneg |
SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full;
+ SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_Autoneg;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
ecmd->advertising |= ADVERTISED_10000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ break;
+ case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
+ ecmd->supported = SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_Autoneg |
+ ADVERTISED_1000baseT_Full;
+ break;
+ case I40E_PHY_TYPE_100BASE_TX:
+ ecmd->supported = SUPPORTED_Autoneg |
+ SUPPORTED_100baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
ecmd->advertising |= ADVERTISED_100baseT_Full;
break;
@@ -341,12 +335,24 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
break;
case I40E_PHY_TYPE_SGMII:
ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full;
+ SUPPORTED_1000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ecmd->advertising |= ADVERTISED_1000baseT_Full;
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (pf->hw.mac.type == I40E_MAC_X722) {
+ ecmd->supported |= SUPPORTED_100baseT_Full;
+ if (hw_link_info->requested_speeds &
+ I40E_LINK_SPEED_100MB)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ }
+ break;
+ /* Backplane is set based on supported phy types in get_settings
+ * so don't set anything here but don't warn either
+ */
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ case I40E_PHY_TYPE_20GBASE_KR2:
+ case I40E_PHY_TYPE_10GBASE_KR:
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ case I40E_PHY_TYPE_1000BASE_KX:
break;
default:
/* if we got here and link is up something bad is afoot */
@@ -385,64 +391,73 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
* Reports link settings that can be determined when link is down
**/
static void i40e_get_settings_link_down(struct i40e_hw *hw,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd,
+ struct i40e_pf *pf)
{
- struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types;
/* link is down and the driver needs to fall back on
- * device ID to determine what kinds of info to display,
- * it's mostly a guess that may change when link is up
+ * supported phy types to figure out what info to display
*/
- switch (hw->device_id) {
- case I40E_DEV_ID_QSFP_A:
- case I40E_DEV_ID_QSFP_B:
- case I40E_DEV_ID_QSFP_C:
- /* pluggable QSFP */
- ecmd->supported = SUPPORTED_40000baseSR4_Full |
- SUPPORTED_40000baseCR4_Full |
- SUPPORTED_40000baseLR4_Full;
- ecmd->advertising = ADVERTISED_40000baseSR4_Full |
- ADVERTISED_40000baseCR4_Full |
- ADVERTISED_40000baseLR4_Full;
- break;
- case I40E_DEV_ID_KX_B:
- /* backplane 40G */
- ecmd->supported = SUPPORTED_40000baseKR4_Full;
- ecmd->advertising = ADVERTISED_40000baseKR4_Full;
- break;
- case I40E_DEV_ID_KX_C:
- /* backplane 10G */
- ecmd->supported = SUPPORTED_10000baseKR_Full;
- ecmd->advertising = ADVERTISED_10000baseKR_Full;
- break;
- case I40E_DEV_ID_10G_BASE_T:
- ecmd->supported = SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full;
- /* Figure out what has been requested */
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ ecmd->supported = 0x0;
+ ecmd->advertising = 0x0;
+ if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
+ ecmd->supported |= SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ ecmd->advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_1000baseT_Full;
+ if (pf->hw.mac.type == I40E_MAC_X722) {
+ ecmd->supported |= SUPPORTED_100baseT_Full;
ecmd->advertising |= ADVERTISED_100baseT_Full;
- break;
- case I40E_DEV_ID_20G_KR2:
- /* backplane 20G */
- ecmd->supported = SUPPORTED_20000baseKR2_Full;
- ecmd->advertising = ADVERTISED_20000baseKR2_Full;
- break;
- default:
- /* all the rest are 10G/1G */
- ecmd->supported = SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full;
- /* Figure out what has been requested */
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
- break;
+ }
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
+ phy_types & I40E_CAP_PHY_TYPE_XFI ||
+ phy_types & I40E_CAP_PHY_TYPE_SFI ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
+ ecmd->supported |= SUPPORTED_10000baseT_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
+ ecmd->supported |= SUPPORTED_Autoneg |
+ SUPPORTED_10000baseT_Full;
+ ecmd->advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
+ phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
+ ecmd->supported |= SUPPORTED_40000baseCR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
+ ecmd->supported |= SUPPORTED_Autoneg |
+ SUPPORTED_40000baseCR4_Full;
+ ecmd->advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_40000baseCR4_Full;
}
+ if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
+ !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
+ ecmd->supported |= SUPPORTED_Autoneg |
+ SUPPORTED_100baseT_Full;
+ ecmd->advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_100baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
+ ecmd->supported |= SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ ecmd->advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_1000baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
+ ecmd->supported |= SUPPORTED_40000baseSR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
+ ecmd->supported |= SUPPORTED_40000baseLR4_Full;
/* With no link speed and duplex are unknown */
ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
@@ -466,12 +481,43 @@ static int i40e_get_settings(struct net_device *netdev,
bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
if (link_up)
- i40e_get_settings_link_up(hw, ecmd, netdev);
+ i40e_get_settings_link_up(hw, ecmd, netdev, pf);
else
- i40e_get_settings_link_down(hw, ecmd);
+ i40e_get_settings_link_down(hw, ecmd, pf);
/* Now set the settings that don't rely on link being up/down */
+ /* For backplane, supported and advertised are only reliant on the
+ * phy types the NVM specifies are supported.
+ */
+ if (hw->device_id == I40E_DEV_ID_KX_B ||
+ hw->device_id == I40E_DEV_ID_KX_C ||
+ hw->device_id == I40E_DEV_ID_20G_KR2 ||
+ hw->device_id == I40E_DEV_ID_20G_KR2_A) {
+ ecmd->supported = SUPPORTED_Autoneg;
+ ecmd->advertising = ADVERTISED_Autoneg;
+ if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
+ ecmd->supported |= SUPPORTED_40000baseKR4_Full;
+ ecmd->advertising |= ADVERTISED_40000baseKR4_Full;
+ }
+ if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
+ ecmd->supported |= SUPPORTED_20000baseKR2_Full;
+ ecmd->advertising |= ADVERTISED_20000baseKR2_Full;
+ }
+ if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
+ ecmd->supported |= SUPPORTED_10000baseKR_Full;
+ ecmd->advertising |= ADVERTISED_10000baseKR_Full;
+ }
+ if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
+ ecmd->supported |= SUPPORTED_10000baseKX4_Full;
+ ecmd->advertising |= ADVERTISED_10000baseKX4_Full;
+ }
+ if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
+ ecmd->supported |= SUPPORTED_1000baseKX_Full;
+ ecmd->advertising |= ADVERTISED_1000baseKX_Full;
+ }
+ }
+
/* Set autoneg settings */
ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
@@ -571,6 +617,14 @@ static int i40e_set_settings(struct net_device *netdev,
hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
return -EOPNOTSUPP;
+ if (hw->device_id == I40E_DEV_ID_KX_B ||
+ hw->device_id == I40E_DEV_ID_KX_C ||
+ hw->device_id == I40E_DEV_ID_20G_KR2 ||
+ hw->device_id == I40E_DEV_ID_20G_KR2_A) {
+ netdev_info(netdev, "Changing settings is not supported on backplane.\n");
+ return -EOPNOTSUPP;
+ }
+
/* get our own copy of the bits to check against */
memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
i40e_get_settings(netdev, &safe_ecmd);
@@ -607,28 +661,31 @@ static int i40e_set_settings(struct net_device *netdev,
/* Check autoneg */
if (autoneg == AUTONEG_ENABLE) {
- /* If autoneg is not supported, return error */
- if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
- netdev_info(netdev, "Autoneg not supported on this phy\n");
- return -EINVAL;
- }
/* If autoneg was not already enabled */
if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
+ /* If autoneg is not supported, return error */
+ if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
+ netdev_info(netdev, "Autoneg not supported on this phy\n");
+ return -EINVAL;
+ }
+ /* Autoneg is allowed to change */
config.abilities = abilities.abilities |
I40E_AQ_PHY_ENABLE_AN;
change = true;
}
} else {
- /* If autoneg is supported 10GBASE_T is the only phy that
- * can disable it, so otherwise return error
- */
- if (safe_ecmd.supported & SUPPORTED_Autoneg &&
- hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) {
- netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
- return -EINVAL;
- }
/* If autoneg is currently enabled */
if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
+ /* If autoneg is supported 10GBASE_T is the only PHY
+ * that can disable it, so otherwise return error
+ */
+ if (safe_ecmd.supported & SUPPORTED_Autoneg &&
+ hw->phy.link_info.phy_type !=
+ I40E_PHY_TYPE_10GBASE_T) {
+ netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
+ return -EINVAL;
+ }
+ /* Autoneg is allowed to change */
config.abilities = abilities.abilities &
~I40E_AQ_PHY_ENABLE_AN;
change = true;
@@ -655,6 +712,13 @@ static int i40e_set_settings(struct net_device *netdev,
advertise & ADVERTISED_40000baseLR4_Full)
config.link_speed |= I40E_LINK_SPEED_40GB;
+ /* If speed didn't get set, set it to what it currently is.
+ * This is needed because if advertise is 0 (as it is when autoneg
+ * is disabled) then speed won't get set.
+ */
+ if (!config.link_speed)
+ config.link_speed = abilities.link_speed;
+
if (change || (abilities.link_speed != config.link_speed)) {
/* copy over the rest of the abilities */
config.phy_type = abilities.phy_type;
@@ -671,7 +735,7 @@ static int i40e_set_settings(struct net_device *netdev,
/* Tell the OS link is going down, the link will go
* back up when fw says it is ready asynchronously
*/
- netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n");
+ i40e_print_link_message(vsi, false);
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
}
@@ -679,15 +743,17 @@ static int i40e_set_settings(struct net_device *netdev,
/* make the aq call */
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status) {
- netdev_info(netdev, "Set phy config failed with error %d.\n",
- status);
+ netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
return -EAGAIN;
}
- status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ status = i40e_update_link_info(hw);
if (status)
- netdev_info(netdev, "Updating link info failed with error %d\n",
- status);
+ netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -707,8 +773,9 @@ static int i40e_nway_reset(struct net_device *netdev)
ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
if (ret) {
- netdev_info(netdev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@@ -812,7 +879,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
/* Tell the OS link is going down, the link will go back up when fw
* says it is ready asynchronously
*/
- netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n");
+ i40e_print_link_message(vsi, false);
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
@@ -820,18 +887,21 @@ static int i40e_set_pauseparam(struct net_device *netdev,
status = i40e_set_fc(hw, &aq_failures, link_up);
if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
- netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n",
- status, hw->aq.asq_last_status);
+ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
- netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n",
- status, hw->aq.asq_last_status);
+ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
- netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n",
- status, hw->aq.asq_last_status);
+ netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
@@ -933,9 +1003,7 @@ static int i40e_get_eeprom(struct net_device *netdev,
cmd = (struct i40e_nvm_access *)eeprom;
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
- if (ret_val &&
- ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) ||
- (hw->debug_mask & I40E_DEBUG_NVM)))
+ if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
dev_info(&pf->pdev->dev,
"NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
ret_val, hw->aq.asq_last_status, errno,
@@ -1009,7 +1077,7 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
/* register returns value in power of 2, 64Kbyte chunks. */
- val = (64 * 1024) * (1 << val);
+ val = (64 * 1024) * BIT(val);
return val;
}
@@ -1039,10 +1107,7 @@ static int i40e_set_eeprom(struct net_device *netdev,
cmd = (struct i40e_nvm_access *)eeprom;
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
- if (ret_val &&
- ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM &&
- hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) ||
- (hw->debug_mask & I40E_DEBUG_NVM)))
+ if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
dev_info(&pf->pdev->dev,
"NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
ret_val, hw->aq.asq_last_status, errno,
@@ -1062,11 +1127,10 @@ static void i40e_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, i40e_driver_version_str,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
+ strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
}
static void i40e_get_ringparam(struct net_device *netdev,
@@ -1151,6 +1215,11 @@ static int i40e_set_ringparam(struct net_device *netdev,
/* clone ring and setup updated count */
tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_count;
+ /* the desc and bi pointers will be reallocated in the
+ * setup call
+ */
+ tx_rings[i].desc = NULL;
+ tx_rings[i].rx_bi = NULL;
err = i40e_setup_tx_descriptors(&tx_rings[i]);
if (err) {
while (i) {
@@ -1181,6 +1250,11 @@ static int i40e_set_ringparam(struct net_device *netdev,
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_count;
+ /* the desc and bi pointers will be reallocated in the
+ * setup call
+ */
+ rx_rings[i].desc = NULL;
+ rx_rings[i].rx_bi = NULL;
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err) {
while (i) {
@@ -1248,8 +1322,9 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
int len = I40E_PF_STATS_LEN(netdev);
- if (pf->lan_veb != I40E_NO_VEB)
- len += I40E_VEB_STATS_LEN;
+ if ((pf->lan_veb != I40E_NO_VEB) &&
+ (pf->flags & I40E_FLAG_VEB_STATS_ENABLED))
+ len += I40E_VEB_STATS_TOTAL;
return len;
} else {
return I40E_VSI_STATS_LEN(netdev);
@@ -1321,14 +1396,22 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
return;
- if (pf->lan_veb != I40E_NO_VEB) {
+ if ((pf->lan_veb != I40E_NO_VEB) &&
+ (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
struct i40e_veb *veb = pf->veb[pf->lan_veb];
+
for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
p = (char *)veb;
p += i40e_gstrings_veb_stats[j].stat_offset;
data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+ for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) {
+ data[i++] = veb->tc_stats.tc_tx_packets[j];
+ data[i++] = veb->tc_stats.tc_tx_bytes[j];
+ data[i++] = veb->tc_stats.tc_rx_packets[j];
+ data[i++] = veb->tc_stats.tc_rx_bytes[j];
+ }
}
for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
@@ -1394,12 +1477,27 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
return;
- if (pf->lan_veb != I40E_NO_VEB) {
+ if ((pf->lan_veb != I40E_NO_VEB) &&
+ (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
snprintf(p, ETH_GSTRING_LEN, "veb.%s",
i40e_gstrings_veb_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "veb.tc_%u_tx_packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "veb.tc_%u_tx_bytes", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "veb.tc_%u_rx_packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "veb.tc_%u_rx_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
}
for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
snprintf(p, ETH_GSTRING_LEN, "port.%s",
@@ -1462,20 +1560,11 @@ static int i40e_get_ts_info(struct net_device *dev,
else
info->phc_index = -1;
- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
-
- info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
}
@@ -1484,9 +1573,18 @@ static int i40e_link_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
+ i40e_status status;
+ bool link_up = false;
netif_info(pf, hw, netdev, "link test\n");
- if (i40e_get_link_status(&pf->hw))
+ status = i40e_get_link_status(&pf->hw, &link_up);
+ if (status) {
+ netif_err(pf, drv, netdev, "link query timed out, please retry test\n");
+ *data = 1;
+ return *data;
+ }
+
+ if (link_up)
*data = 0;
else
*data = 1;
@@ -1555,8 +1653,23 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf)
int i;
for (i = 0; i < pf->num_alloc_vfs; i++)
- if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE)
+ if (test_bit(I40E_VF_STAT_ACTIVE, &vfs[i].vf_states))
+ return true;
+ return false;
+}
+
+static inline bool i40e_active_vmdqs(struct i40e_pf *pf)
+{
+ struct i40e_vsi **vsi = pf->vsi;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (!vsi[i])
+ continue;
+ if (vsi[i]->type == I40E_VSI_VMDQ2)
return true;
+ }
+
return false;
}
@@ -1573,9 +1686,9 @@ static void i40e_diag_test(struct net_device *netdev,
set_bit(__I40E_TESTING, &pf->state);
- if (i40e_active_vfs(pf)) {
+ if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
dev_warn(&pf->pdev->dev,
- "Please take active VFS offline and restart the adapter before running NIC diagnostics\n");
+ "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
data[I40E_ETH_TEST_REG] = 1;
data[I40E_ETH_TEST_EEPROM] = 1;
data[I40E_ETH_TEST_INTR] = 1;
@@ -1591,11 +1704,13 @@ static void i40e_diag_test(struct net_device *netdev,
/* indicate we're in test mode */
dev_close(netdev);
else
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ /* This reset does not affect link - if it is
+ * changed to a type of reset that does affect
+ * link then the following link test would have
+ * to be moved to before the reset
+ */
+ i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
- /* Link test performed before hardware reset
- * so autoneg doesn't interfere with test result
- */
if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1613,7 +1728,7 @@ static void i40e_diag_test(struct net_device *netdev,
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__I40E_TESTING, &pf->state);
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
if (if_running)
dev_open(netdev);
@@ -1646,7 +1761,7 @@ static void i40e_get_wol(struct net_device *netdev,
/* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
- if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
+ if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) {
wol->supported = 0;
wol->wolopts = 0;
} else {
@@ -1679,7 +1794,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
/* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
- if (((1 << hw->port) & wol_nvm_bits))
+ if (BIT(hw->port) & wol_nvm_bits)
return -EOPNOTSUPP;
/* only magic packet is supported */
@@ -1745,6 +1860,14 @@ static int i40e_get_coalesce(struct net_device *netdev,
ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+ /* we use the _usecs_high to store/set the interrupt rate limit
+ * that the hardware supports, that almost but not quite
+ * fits the original intent of the ethtool variable,
+ * the rx_coalesce_usecs_high limits total interrupts
+ * per second from both tx/rx sources.
+ */
+ ec->rx_coalesce_usecs_high = vsi->int_rate_limit;
+ ec->tx_coalesce_usecs_high = vsi->int_rate_limit;
return 0;
}
@@ -1763,6 +1886,17 @@ static int i40e_set_coalesce(struct net_device *netdev,
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
vsi->work_limit = ec->tx_max_coalesced_frames_irq;
+ /* tx_coalesce_usecs_high is ignored, use rx-usecs-high instead */
+ if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) {
+ netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n");
+ return -EINVAL;
+ }
+
+ if (ec->rx_coalesce_usecs_high >= INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
+ netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-235\n");
+ return -EINVAL;
+ }
+
vector = vsi->base_vector;
if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
(ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
@@ -1776,6 +1910,8 @@ static int i40e_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
+ vsi->int_rate_limit = ec->rx_coalesce_usecs_high;
+
if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
(ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
vsi->tx_itr_setting = ec->tx_coalesce_usecs;
@@ -1800,11 +1936,14 @@ static int i40e_set_coalesce(struct net_device *netdev,
vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ u16 intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit);
+
q_vector = vsi->q_vectors[i];
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
+ wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl);
i40e_flush(hw);
}
@@ -2025,10 +2164,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case TCP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break;
default:
return -EINVAL;
@@ -2037,10 +2176,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case TCP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break;
default:
return -EINVAL;
@@ -2049,12 +2188,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
default:
return -EINVAL;
@@ -2063,12 +2202,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
default:
return -EINVAL;
@@ -2081,7 +2220,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
break;
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -2090,15 +2229,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
break;
case IPV4_FLOW:
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
break;
case IPV6_FLOW:
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
break;
default:
return -EINVAL;
@@ -2509,7 +2648,7 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
* @indir: indirection table
* @key: hash key
*
- * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
**/
static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
@@ -2567,10 +2706,51 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
ret_flags |= pf->hw.func_caps.npar_enable ?
I40E_PRIV_FLAGS_NPAR_FLAG : 0;
+ ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
+ I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
+ ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ?
+ I40E_PRIV_FLAGS_FD_ATR : 0;
+ ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
+ I40E_PRIV_FLAGS_VEB_STATS : 0;
return ret_flags;
}
+/**
+ * i40e_set_priv_flags - set private flags
+ * @dev: network interface device structure
+ * @flags: bit flags to be set
+ **/
+static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
+ pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_LINK_POLLING_ENABLED;
+
+ /* allow the user to control the state of the Flow
+ * Director ATR (Application Targeted Routing) feature
+ * of the driver
+ */
+ if (flags & I40E_PRIV_FLAGS_FD_ATR) {
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ } else {
+ pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+ }
+
+ if (flags & I40E_PRIV_FLAGS_VEB_STATS)
+ pf->flags |= I40E_FLAG_VEB_STATS_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+
+ return 0;
+}
+
static const struct ethtool_ops i40e_ethtool_ops = {
.get_settings = i40e_get_settings,
.set_settings = i40e_set_settings,
@@ -2607,6 +2787,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.set_channels = i40e_set_channels,
.get_ts_info = i40e_get_ts_info,
.get_priv_flags = i40e_get_priv_flags,
+ .set_priv_flags = i40e_set_priv_flags,
};
void i40e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index c8b621e0e7cd..fe5d9bf3ed6d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -272,10 +272,8 @@ out:
/**
* i40e_fcoe_sw_init - sets up the HW for FCoE
* @pf: pointer to PF
- *
- * Returns 0 if FCoE is supported otherwise the error code
**/
-int i40e_init_pf_fcoe(struct i40e_pf *pf)
+void i40e_init_pf_fcoe(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 val;
@@ -286,20 +284,20 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
pf->fcoe_hmc_filt_num = 0;
if (!pf->hw.func_caps.fcoe) {
- dev_info(&pf->pdev->dev, "FCoE capability is disabled\n");
- return 0;
+ dev_dbg(&pf->pdev->dev, "FCoE capability is disabled\n");
+ return;
}
if (!pf->hw.func_caps.dcb) {
dev_warn(&pf->pdev->dev,
"Hardware is not DCB capable not enabling FCoE.\n");
- return 0;
+ return;
}
/* enable FCoE hash filter */
val = rd32(hw, I40E_PFQF_HENA(1));
- val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32);
- val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32);
+ val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32);
+ val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32);
val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
wr32(hw, I40E_PFQF_HENA(1), val);
@@ -308,10 +306,10 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
/* Reserve 4K DDP contexts and 20K filter size for FCoE */
- pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) *
- I40E_DMA_CNTX_BASE_SIZE;
+ pf->fcoe_hmc_cntx_num = BIT(I40E_DMA_CNTX_SIZE_4K) *
+ I40E_DMA_CNTX_BASE_SIZE;
pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num +
- (1 << I40E_HASH_FILTER_SIZE_16K) *
+ BIT(I40E_HASH_FILTER_SIZE_16K) *
I40E_HASH_FILTER_BASE_SIZE;
/* FCoE object: max 16K filter buckets and 4K DMA contexts */
@@ -326,7 +324,6 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
wr32(hw, I40E_GLFCOE_RCTL, val);
dev_info(&pf->pdev->dev, "FCoE is supported.\n");
- return 0;
}
/**
@@ -348,7 +345,7 @@ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app.protocolid == ETH_P_FCOE) {
tc = dcbcfg->etscfg.prioritytable[app.priority];
- enabled_tc |= (1 << tc);
+ enabled_tc |= BIT(tc);
break;
}
}
@@ -1519,10 +1516,12 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
* same PCI function.
*/
netdev->dev_port = 1;
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
/* use san mac */
ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
index 0d49e2d15d40..a93174ddeaba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
@@ -59,9 +59,9 @@
(((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1)
#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT \
- (1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
+ BIT(I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
#define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT \
- (1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
+ BIT(I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e) \
I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 9b987ccc9e82..5ebe12d56ebf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -116,6 +116,7 @@ exit:
* @hw: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
*
* This function:
* 1. Initializes the pd entry
@@ -129,12 +130,14 @@ exit:
**/
i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 pd_index)
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg)
{
i40e_status ret_code = 0;
struct i40e_hmc_pd_table *pd_table;
struct i40e_hmc_pd_entry *pd_entry;
struct i40e_dma_mem mem;
+ struct i40e_dma_mem *page = &mem;
u32 sd_idx, rel_pd_idx;
u64 *pd_addr;
u64 page_desc;
@@ -155,18 +158,24 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
pd_entry = &pd_table->pd_entry[rel_pd_idx];
if (!pd_entry->valid) {
- /* allocate a 4K backing page */
- ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
- I40E_HMC_PAGED_BP_SIZE,
- I40E_HMC_PD_BP_BUF_ALIGNMENT);
- if (ret_code)
- goto exit;
+ if (rsrc_pg) {
+ pd_entry->rsrc_pg = true;
+ page = rsrc_pg;
+ } else {
+ /* allocate a 4K backing page */
+ ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+ I40E_HMC_PAGED_BP_SIZE,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ pd_entry->rsrc_pg = false;
+ }
- pd_entry->bp.addr = mem;
+ pd_entry->bp.addr = *page;
pd_entry->bp.sd_pd_index = pd_index;
pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
/* Set page address and valid bit */
- page_desc = mem.pa | 0x1;
+ page_desc = page->pa | 0x1;
pd_addr = (u64 *)pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
@@ -240,7 +249,8 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
/* free memory here */
- ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+ if (!pd_entry->rsrc_pg)
+ ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr);
if (ret_code)
goto exit;
if (!pd_table->ref_cnt)
@@ -287,21 +297,15 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
u32 idx, bool is_pf)
{
struct i40e_hmc_sd_entry *sd_entry;
- i40e_status ret_code = 0;
+
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
/* get the entry and decrease its ref counter */
sd_entry = &hmc_info->sd_table.sd_entry[idx];
- if (is_pf) {
- I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
- } else {
- ret_code = I40E_NOT_SUPPORTED;
- goto exit;
- }
- ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
- if (ret_code)
- goto exit;
-exit:
- return ret_code;
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+
+ return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr);
}
/**
@@ -341,20 +345,13 @@ i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf)
{
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
+
sd_entry = &hmc_info->sd_table.sd_entry[idx];
- if (is_pf) {
- I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
- } else {
- ret_code = I40E_NOT_SUPPORTED;
- goto exit;
- }
- /* free memory here */
- ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
- if (ret_code)
- goto exit;
-exit:
- return ret_code;
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+
+ return i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 732a02660330..d90669211392 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -62,6 +62,7 @@ struct i40e_hmc_bp {
struct i40e_hmc_pd_entry {
struct i40e_hmc_bp bp;
u32 sd_index;
+ bool rsrc_pg;
bool valid;
};
@@ -126,8 +127,8 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
- (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
- val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
@@ -146,7 +147,7 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
- val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
@@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 pd_index);
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index 0079ad7bcd0e..79ae7beeafe5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -129,7 +129,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
obj->cnt = txq_num;
obj->base = 0;
size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (txq_num > obj->max_cnt) {
@@ -152,7 +152,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (rxq_num > obj->max_cnt) {
@@ -175,7 +175,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (fcoe_cntx_num > obj->max_cnt) {
@@ -198,7 +198,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (fcoe_filt_num > obj->max_cnt) {
@@ -387,7 +387,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
/* update the pd table entry */
ret_code = i40e_add_pd_table_entry(hw,
info->hmc_info,
- i);
+ i, NULL);
if (ret_code) {
pd_error = true;
break;
@@ -431,9 +431,8 @@ exit_sd_error:
pd_idx1 = max(pd_idx,
((j - 1) * I40E_HMC_MAX_BP_COUNT));
pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
- for (i = pd_idx1; i < pd_lmt1; i++) {
+ for (i = pd_idx1; i < pd_lmt1; i++)
i40e_remove_pd_bp(hw, info->hmc_info, i);
- }
i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
break;
case I40E_SD_TYPE_DIRECT:
@@ -763,7 +762,7 @@ static void i40e_write_byte(u8 *hmc_bits,
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = ((u8)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
src_byte = *from;
src_byte &= mask;
@@ -804,7 +803,7 @@ static void i40e_write_word(u8 *hmc_bits,
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = ((u16)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
@@ -854,7 +853,7 @@ static void i40e_write_dword(u8 *hmc_bits,
* to 5 bits so the shift will do nothing
*/
if (ce_info->width < 32)
- mask = ((u32)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
else
mask = ~(u32)0;
@@ -906,7 +905,7 @@ static void i40e_write_qword(u8 *hmc_bits,
* to 6 bits so the shift will do nothing
*/
if (ce_info->width < 64)
- mask = ((u64)1 << ce_info->width) - 1;
+ mask = BIT_ULL(ce_info->width) - 1;
else
mask = ~(u64)0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 48a52b35b614..b825f978d441 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 4
+#define DRV_VERSION_BUILD 46
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -75,7 +75,13 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
/* required last entry */
{0, }
};
@@ -210,10 +216,10 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
ret = i;
pile->search_hint = i + j;
break;
- } else {
- /* not enough, so skip over it and continue looking */
- i += j;
}
+
+ /* not enough, so skip over it and continue looking */
+ i += j;
}
return ret;
@@ -296,25 +302,69 @@ static void i40e_tx_timeout(struct net_device *netdev)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ struct i40e_ring *tx_ring = NULL;
+ unsigned int i, hung_queue = 0;
+ u32 head, val;
pf->tx_timeout_count++;
+ /* find the stopped queue the same way the stack does */
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ struct netdev_queue *q;
+ unsigned long trans_start;
+
+ q = netdev_get_tx_queue(netdev, i);
+ trans_start = q->trans_start ? : netdev->trans_start;
+ if (netif_xmit_stopped(q) &&
+ time_after(jiffies,
+ (trans_start + netdev->watchdog_timeo))) {
+ hung_queue = i;
+ break;
+ }
+ }
+
+ if (i == netdev->num_tx_queues) {
+ netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
+ } else {
+ /* now that we have an index, find the tx_ring struct */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
+ if (hung_queue ==
+ vsi->tx_rings[i]->queue_index) {
+ tx_ring = vsi->tx_rings[i];
+ break;
+ }
+ }
+ }
+ }
+
if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
- pf->tx_timeout_recovery_level = 1;
+ pf->tx_timeout_recovery_level = 1; /* reset after some time */
+ else if (time_before(jiffies,
+ (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
+ return; /* don't do any new action before the next timeout */
+
+ if (tx_ring) {
+ head = i40e_get_head(tx_ring);
+ /* Read interrupt register */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ val = rd32(&pf->hw,
+ I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
+ tx_ring->vsi->base_vector - 1));
+ else
+ val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
+
+ netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
+ vsi->seid, hung_queue, tx_ring->next_to_clean,
+ head, tx_ring->next_to_use,
+ readl(tx_ring->tail), val);
+ }
+
pf->tx_timeout_last_recovery = jiffies;
- netdev_info(netdev, "tx_timeout recovery level %d\n",
- pf->tx_timeout_recovery_level);
+ netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
+ pf->tx_timeout_recovery_level, hung_queue);
switch (pf->tx_timeout_recovery_level) {
- case 0:
- /* disable and re-enable queues for the VSI */
- if (in_interrupt()) {
- set_bit(__I40E_REINIT_REQUESTED, &pf->state);
- set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
- } else {
- i40e_vsi_reinit_locked(vsi);
- }
- break;
case 1:
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
break;
@@ -326,10 +376,9 @@ static void i40e_tx_timeout(struct net_device *netdev)
break;
default:
netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
- set_bit(__I40E_DOWN_REQUESTED, &pf->state);
- set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
break;
}
+
i40e_service_event_schedule(pf);
pf->tx_timeout_recovery_level++;
}
@@ -428,6 +477,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
stats->tx_errors = vsi_stats->tx_errors;
stats->tx_dropped = vsi_stats->tx_dropped;
stats->rx_errors = vsi_stats->rx_errors;
+ stats->rx_dropped = vsi_stats->rx_dropped;
stats->rx_crc_errors = vsi_stats->rx_crc_errors;
stats->rx_length_errors = vsi_stats->rx_length_errors;
@@ -453,11 +503,11 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
if (vsi->rx_rings && vsi->rx_rings[0]) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
- memset(&vsi->rx_rings[i]->stats, 0 ,
+ memset(&vsi->rx_rings[i]->stats, 0,
sizeof(vsi->rx_rings[i]->stats));
- memset(&vsi->rx_rings[i]->rx_stats, 0 ,
+ memset(&vsi->rx_rings[i]->rx_stats, 0,
sizeof(vsi->rx_rings[i]->rx_stats));
- memset(&vsi->tx_rings[i]->stats, 0 ,
+ memset(&vsi->tx_rings[i]->stats, 0,
sizeof(vsi->tx_rings[i]->stats));
memset(&vsi->tx_rings[i]->tx_stats, 0,
sizeof(vsi->tx_rings[i]->tx_stats));
@@ -520,7 +570,7 @@ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
if (likely(new_data >= *offset))
*stat = new_data - *offset;
else
- *stat = (new_data + ((u64)1 << 48)) - *offset;
+ *stat = (new_data + BIT_ULL(48)) - *offset;
*stat &= 0xFFFFFFFFFFFFULL;
}
@@ -543,7 +593,7 @@ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
if (likely(new_data >= *offset))
*stat = (u32)(new_data - *offset);
else
- *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
+ *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
}
/**
@@ -621,11 +671,15 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
struct i40e_hw *hw = &pf->hw;
struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */
- int idx = 0;
+ struct i40e_veb_tc_stats *veb_oes;
+ struct i40e_veb_tc_stats *veb_es;
+ int i, idx = 0;
idx = veb->stats_idx;
es = &veb->stats;
oes = &veb->stats_offsets;
+ veb_es = &veb->tc_stats;
+ veb_oes = &veb->tc_stats_offsets;
/* Gather up the stats that the hw collects */
i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
@@ -661,6 +715,28 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
veb->stat_offsets_loaded,
&oes->tx_broadcast, &es->tx_broadcast);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
+ I40E_GLVEBTC_RPCL(i, idx),
+ veb->stat_offsets_loaded,
+ &veb_oes->tc_rx_packets[i],
+ &veb_es->tc_rx_packets[i]);
+ i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
+ I40E_GLVEBTC_RBCL(i, idx),
+ veb->stat_offsets_loaded,
+ &veb_oes->tc_rx_bytes[i],
+ &veb_es->tc_rx_bytes[i]);
+ i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
+ I40E_GLVEBTC_TPCL(i, idx),
+ veb->stat_offsets_loaded,
+ &veb_oes->tc_tx_packets[i],
+ &veb_es->tc_tx_packets[i]);
+ i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
+ I40E_GLVEBTC_TBCL(i, idx),
+ veb->stat_offsets_loaded,
+ &veb_oes->tc_tx_bytes[i],
+ &veb_es->tc_tx_bytes[i]);
+ }
veb->stat_offsets_loaded = true;
}
@@ -725,7 +801,6 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
struct i40e_hw_port_stats *nsd = &pf->stats;
struct i40e_hw *hw = &pf->hw;
u64 xoff = 0;
- u16 i, v;
if ((hw->fc.current_mode != I40E_FC_FULL) &&
(hw->fc.current_mode != I40E_FC_RX_PAUSE))
@@ -740,18 +815,6 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
if (!(nsd->link_xoff_rx - xoff))
return;
- /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- struct i40e_vsi *vsi = pf->vsi[v];
-
- if (!vsi || !vsi->tx_rings[0])
- continue;
-
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *ring = vsi->tx_rings[i];
- clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
- }
- }
}
/**
@@ -767,7 +830,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
struct i40e_dcbx_config *dcb_cfg;
struct i40e_hw *hw = &pf->hw;
- u16 i, v;
+ u16 i;
u8 tc;
dcb_cfg = &hw->local_dcbx_config;
@@ -780,6 +843,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
u64 prio_xoff = nsd->priority_xoff_rx[i];
+
i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
pf->stat_offsets_loaded,
&osd->priority_xoff_rx[i],
@@ -792,23 +856,6 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
tc = dcb_cfg->etscfg.prioritytable[i];
xoff[tc] = true;
}
-
- /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- struct i40e_vsi *vsi = pf->vsi[v];
-
- if (!vsi || !vsi->tx_rings[0])
- continue;
-
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *ring = vsi->tx_rings[i];
-
- tc = ring->dcb_tc;
- if (xoff[tc])
- clear_bit(__I40E_HANG_CHECK_ARMED,
- &ring->state);
- }
- }
}
/**
@@ -833,6 +880,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
u32 rx_page, rx_buf;
u64 bytes, packets;
unsigned int start;
+ u64 tx_linearize;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
u16 q;
@@ -851,7 +899,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
*/
rx_b = rx_p = 0;
tx_b = tx_p = 0;
- tx_restart = tx_busy = 0;
+ tx_restart = tx_busy = tx_linearize = 0;
rx_page = 0;
rx_buf = 0;
rcu_read_lock();
@@ -868,6 +916,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_p += packets;
tx_restart += p->tx_stats.restart_queue;
tx_busy += p->tx_stats.tx_busy;
+ tx_linearize += p->tx_stats.tx_linearize;
/* Rx queue is part of the same block as Tx queue */
p = &p[1];
@@ -884,6 +933,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rcu_read_unlock();
vsi->tx_restart = tx_restart;
vsi->tx_busy = tx_busy;
+ vsi->tx_linearize = tx_linearize;
vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf;
@@ -1123,6 +1173,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
pf->stat_offsets_loaded,
&osd->rx_lpi_count, &nsd->rx_lpi_count);
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
+ !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+ nsd->fd_sb_status = true;
+ else
+ nsd->fd_sb_status = false;
+
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
+ !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ nsd->fd_atr_status = true;
+ else
+ nsd->fd_atr_status = false;
+
pf->stat_offsets_loaded = true;
}
@@ -1215,7 +1277,7 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
* so we have to go through all the list in order to make sure
*/
list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if (f->vlan >= 0)
+ if (f->vlan >= 0 || vsi->info.pvid)
return true;
}
@@ -1240,6 +1302,8 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
struct i40e_mac_filter *f;
list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (vsi->info.pvid)
+ f->vlan = le16_to_cpu(vsi->info.pvid);
if (!i40e_find_filter(vsi, macaddr, f->vlan,
is_vf, is_netdev)) {
if (!i40e_add_filter(vsi, macaddr, f->vlan,
@@ -1264,7 +1328,7 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
{
struct i40e_aqc_remove_macvlan_element_data element;
struct i40e_pf *pf = vsi->back;
- i40e_status aq_ret;
+ i40e_status ret;
/* Only appropriate for the PF main VSI */
if (vsi->type != I40E_VSI_MAIN)
@@ -1275,8 +1339,8 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
element.vlan_tag = 0;
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
- aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
- if (aq_ret)
+ ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+ if (ret)
return -ENOENT;
return 0;
@@ -1291,6 +1355,9 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
* @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns ptr to the filter object or NULL when no memory available.
+ *
+ * NOTE: This function is expected to be called with mac_filter_list_lock
+ * being held.
**/
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
u8 *macaddr, s16 vlan,
@@ -1349,6 +1416,9 @@ add_filter_out:
* @vlan: the vlan
* @is_vf: make sure it's a VF filter, else doesn't matter
* @is_netdev: make sure it's a netdev filter, else doesn't matter
+ *
+ * NOTE: This function is expected to be called with mac_filter_list_lock
+ * being held.
**/
void i40e_del_filter(struct i40e_vsi *vsi,
u8 *macaddr, s16 vlan,
@@ -1376,6 +1446,7 @@ void i40e_del_filter(struct i40e_vsi *vsi,
} else {
/* make sure we don't remove a filter in use by VF or netdev */
int min_f = 0;
+
min_f += (f->is_vf ? 1 : 0);
min_f += (f->is_netdev ? 1 : 0);
@@ -1434,6 +1505,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
+
ret = i40e_aq_mac_address_write(&vsi->back->hw,
I40E_AQC_WRITE_TYPE_LAA_WOL,
addr->sa_data, NULL);
@@ -1453,8 +1525,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
} else {
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
false, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
}
if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
@@ -1465,13 +1539,15 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
} else {
+ spin_lock_bh(&vsi->mac_filter_list_lock);
f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
false, false);
if (f)
f->is_laa = true;
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
}
- i40e_sync_vsi_filters(vsi);
+ i40e_sync_vsi_filters(vsi, false);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
return 0;
@@ -1514,7 +1590,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
/* Find numtc from enabled TC bitmap */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & (1 << i)) /* TC is enabled */
+ if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
numtc++;
}
if (!numtc) {
@@ -1533,14 +1609,18 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
* vectors available and so we need to lower the used
* q count.
*/
- qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+ else
+ qcount = vsi->alloc_queue_pairs;
num_tc_qps = qcount / numtc;
- num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
+ num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
/* Setup queue offset/count for all TCs for given VSI */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
/* See if the given TC is enabled for the given VSI */
- if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
+ if (vsi->tc_config.enabled_tc & BIT_ULL(i)) {
+ /* TC is enabled */
int pow, num_qps;
switch (vsi->type) {
@@ -1566,7 +1646,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
/* find the next higher power-of-2 of num queue pairs */
num_qps = qcount;
pow = 0;
- while (num_qps && ((1 << pow) < qcount)) {
+ while (num_qps && (BIT_ULL(pow) < qcount)) {
pow++;
num_qps >>= 1;
}
@@ -1596,7 +1676,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
if (vsi->req_queue_pairs > 0)
vsi->num_queue_pairs = vsi->req_queue_pairs;
- else
+ else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
vsi->num_queue_pairs = pf->num_lan_msix;
}
@@ -1637,6 +1717,8 @@ static void i40e_set_rx_mode(struct net_device *netdev)
struct netdev_hw_addr *mca;
struct netdev_hw_addr *ha;
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+
/* add addr if not already in the filter list */
netdev_for_each_uc_addr(uca, netdev) {
if (!i40e_find_mac(vsi, uca->addr, false, true)) {
@@ -1662,37 +1744,29 @@ static void i40e_set_rx_mode(struct net_device *netdev)
/* remove filter if not in netdev list */
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- bool found = false;
if (!f->is_netdev)
continue;
- if (is_multicast_ether_addr(f->macaddr)) {
- netdev_for_each_mc_addr(mca, netdev) {
- if (ether_addr_equal(mca->addr, f->macaddr)) {
- found = true;
- break;
- }
- }
- } else {
- netdev_for_each_uc_addr(uca, netdev) {
- if (ether_addr_equal(uca->addr, f->macaddr)) {
- found = true;
- break;
- }
- }
+ netdev_for_each_mc_addr(mca, netdev)
+ if (ether_addr_equal(mca->addr, f->macaddr))
+ goto bottom_of_search_loop;
- for_each_dev_addr(netdev, ha) {
- if (ether_addr_equal(ha->addr, f->macaddr)) {
- found = true;
- break;
- }
- }
- }
- if (!found)
- i40e_del_filter(
- vsi, f->macaddr, I40E_VLAN_ANY, false, true);
+ netdev_for_each_uc_addr(uca, netdev)
+ if (ether_addr_equal(uca->addr, f->macaddr))
+ goto bottom_of_search_loop;
+
+ for_each_dev_addr(netdev, ha)
+ if (ether_addr_equal(ha->addr, f->macaddr))
+ goto bottom_of_search_loop;
+
+ /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
+ i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
+
+bottom_of_search_loop:
+ continue;
}
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
/* check for other flag changes */
if (vsi->current_netdev_flags != vsi->netdev->flags) {
@@ -1702,24 +1776,101 @@ static void i40e_set_rx_mode(struct net_device *netdev)
}
/**
+ * i40e_mac_filter_entry_clone - Clones a MAC filter entry
+ * @src: source MAC filter entry to be clones
+ *
+ * Returns the pointer to newly cloned MAC filter entry or NULL
+ * in case of error
+ **/
+static struct i40e_mac_filter *i40e_mac_filter_entry_clone(
+ struct i40e_mac_filter *src)
+{
+ struct i40e_mac_filter *f;
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return NULL;
+ *f = *src;
+
+ INIT_LIST_HEAD(&f->list);
+
+ return f;
+}
+
+/**
+ * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: pointer to vsi struct
+ * @from: Pointer to list which contains MAC filter entries - changes to
+ * those entries needs to be undone.
+ *
+ * MAC filter entries from list were slated to be removed from device.
+ **/
+static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
+ struct list_head *from)
+{
+ struct i40e_mac_filter *f, *ftmp;
+
+ list_for_each_entry_safe(f, ftmp, from, list) {
+ f->changed = true;
+ /* Move the element back into MAC filter list*/
+ list_move_tail(&f->list, &vsi->mac_filter_list);
+ }
+}
+
+/**
+ * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: pointer to vsi struct
+ *
+ * MAC filter entries from list were slated to be added from device.
+ **/
+static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f, *ftmp;
+
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ if (!f->changed && f->counter)
+ f->changed = true;
+ }
+}
+
+/**
+ * i40e_cleanup_add_list - Deletes the element from add list and release
+ * memory
+ * @add_list: Pointer to list which contains MAC filter entries
+ **/
+static void i40e_cleanup_add_list(struct list_head *add_list)
+{
+ struct i40e_mac_filter *f, *ftmp;
+
+ list_for_each_entry_safe(f, ftmp, add_list, list) {
+ list_del(&f->list);
+ kfree(f);
+ }
+}
+
+/**
* i40e_sync_vsi_filters - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
+ * @grab_rtnl: whether RTNL needs to be grabbed
*
* Push any outstanding VSI filter changes through the AdminQ.
*
* Returns 0 or error value
**/
-int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
{
- struct i40e_mac_filter *f, *ftmp;
+ struct list_head tmp_del_list, tmp_add_list;
+ struct i40e_mac_filter *f, *ftmp, *fclone;
bool promisc_forced_on = false;
bool add_happened = false;
int filter_list_len = 0;
u32 changed_flags = 0;
- i40e_status aq_ret = 0;
+ bool err_cond = false;
+ i40e_status ret = 0;
struct i40e_pf *pf;
int num_add = 0;
int num_del = 0;
+ int aq_err = 0;
u16 cmd_flags;
/* empty array typed pointers, kcalloc later */
@@ -1735,17 +1886,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
vsi->current_netdev_flags = vsi->netdev->flags;
}
+ INIT_LIST_HEAD(&tmp_del_list);
+ INIT_LIST_HEAD(&tmp_add_list);
+
if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
- filter_list_len = pf->hw.aq.asq_buf_size /
- sizeof(struct i40e_aqc_remove_macvlan_element_data);
- del_list = kcalloc(filter_list_len,
- sizeof(struct i40e_aqc_remove_macvlan_element_data),
- GFP_KERNEL);
- if (!del_list)
- return -ENOMEM;
-
+ spin_lock_bh(&vsi->mac_filter_list_lock);
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
if (!f->changed)
continue;
@@ -1753,6 +1900,58 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
if (f->counter != 0)
continue;
f->changed = false;
+
+ /* Move the element into temporary del_list */
+ list_move_tail(&f->list, &tmp_del_list);
+ }
+
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ if (!f->changed)
+ continue;
+
+ if (f->counter == 0)
+ continue;
+ f->changed = false;
+
+ /* Clone MAC filter entry and add into temporary list */
+ fclone = i40e_mac_filter_entry_clone(f);
+ if (!fclone) {
+ err_cond = true;
+ break;
+ }
+ list_add_tail(&fclone->list, &tmp_add_list);
+ }
+
+ /* if failed to clone MAC filter entry - undo */
+ if (err_cond) {
+ i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+ i40e_undo_add_filter_entries(vsi);
+ }
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+ if (err_cond)
+ i40e_cleanup_add_list(&tmp_add_list);
+ }
+
+ /* Now process 'del_list' outside the lock */
+ if (!list_empty(&tmp_del_list)) {
+ filter_list_len = pf->hw.aq.asq_buf_size /
+ sizeof(struct i40e_aqc_remove_macvlan_element_data);
+ del_list = kcalloc(filter_list_len,
+ sizeof(struct i40e_aqc_remove_macvlan_element_data),
+ GFP_KERNEL);
+ if (!del_list) {
+ i40e_cleanup_add_list(&tmp_add_list);
+
+ /* Undo VSI's MAC filter entry element updates */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+ i40e_undo_add_filter_entries(vsi);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ return -ENOMEM;
+ }
+
+ list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
cmd_flags = 0;
/* add to delete list */
@@ -1765,41 +1964,46 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
del_list[num_del].flags = cmd_flags;
num_del++;
- /* unlink from filter list */
- list_del(&f->list);
- kfree(f);
-
/* flush a full buffer */
if (num_del == filter_list_len) {
- aq_ret = i40e_aq_remove_macvlan(&pf->hw,
- vsi->seid, del_list, num_del,
- NULL);
+ ret = i40e_aq_remove_macvlan(&pf->hw,
+ vsi->seid, del_list, num_del,
+ NULL);
+ aq_err = pf->hw.aq.asq_last_status;
num_del = 0;
memset(del_list, 0, sizeof(*del_list));
- if (aq_ret &&
- pf->hw.aq.asq_last_status !=
- I40E_AQ_RC_ENOENT)
- dev_info(&pf->pdev->dev,
- "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
- aq_ret,
- pf->hw.aq.asq_last_status);
+ if (ret && aq_err != I40E_AQ_RC_ENOENT)
+ dev_err(&pf->pdev->dev,
+ "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, aq_err));
}
+ /* Release memory for MAC filter entries which were
+ * synced up with HW.
+ */
+ list_del(&f->list);
+ kfree(f);
}
+
if (num_del) {
- aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+ ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
del_list, num_del, NULL);
+ aq_err = pf->hw.aq.asq_last_status;
num_del = 0;
- if (aq_ret &&
- pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
+ if (ret && aq_err != I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev,
- "ignoring delete macvlan error, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "ignoring delete macvlan error, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, aq_err));
}
kfree(del_list);
del_list = NULL;
+ }
+
+ if (!list_empty(&tmp_add_list)) {
/* do all the adds now */
filter_list_len = pf->hw.aq.asq_buf_size /
@@ -1807,16 +2011,19 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
add_list = kcalloc(filter_list_len,
sizeof(struct i40e_aqc_add_macvlan_element_data),
GFP_KERNEL);
- if (!add_list)
+ if (!add_list) {
+ /* Purge element from temporary lists */
+ i40e_cleanup_add_list(&tmp_add_list);
+
+ /* Undo add filter entries from VSI MAC filter list */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ i40e_undo_add_filter_entries(vsi);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
+ }
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- if (!f->changed)
- continue;
+ list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
- if (f->counter == 0)
- continue;
- f->changed = false;
add_happened = true;
cmd_flags = 0;
@@ -1833,29 +2040,37 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */
if (num_add == filter_list_len) {
- aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
- add_list, num_add,
- NULL);
+ ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ add_list, num_add,
+ NULL);
+ aq_err = pf->hw.aq.asq_last_status;
num_add = 0;
- if (aq_ret)
+ if (ret)
break;
memset(add_list, 0, sizeof(*add_list));
}
+ /* Entries from tmp_add_list were cloned from MAC
+ * filter list, hence clean those cloned entries
+ */
+ list_del(&f->list);
+ kfree(f);
}
+
if (num_add) {
- aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
- add_list, num_add, NULL);
+ ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ add_list, num_add, NULL);
+ aq_err = pf->hw.aq.asq_last_status;
num_add = 0;
}
kfree(add_list);
add_list = NULL;
- if (add_happened && aq_ret &&
- pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
+ if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
dev_info(&pf->pdev->dev,
- "add filter failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "add filter failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, aq_err));
if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
!test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state)) {
@@ -1870,35 +2085,67 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
bool cur_multipromisc;
+
cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
- aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
- vsi->seid,
- cur_multipromisc,
- NULL);
- if (aq_ret)
+ ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+ vsi->seid,
+ cur_multipromisc,
+ NULL);
+ if (ret)
dev_info(&pf->pdev->dev,
- "set multi promisc failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "set multi promisc failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
}
if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
bool cur_promisc;
+
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state));
- aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
- if (aq_ret)
- dev_info(&pf->pdev->dev,
- "set uni promisc failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
- aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
- if (aq_ret)
+ if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) {
+ /* set defport ON for Main VSI instead of true promisc
+ * this way we will get all unicast/multicast and VLAN
+ * promisc behavior but will not get VF or VMDq traffic
+ * replicated on the Main VSI.
+ */
+ if (pf->cur_promisc != cur_promisc) {
+ pf->cur_promisc = cur_promisc;
+ if (grab_rtnl)
+ i40e_do_reset_safe(pf,
+ BIT(__I40E_PF_RESET_REQUESTED));
+ else
+ i40e_do_reset(pf,
+ BIT(__I40E_PF_RESET_REQUESTED));
+ }
+ } else {
+ ret = i40e_aq_set_vsi_unicast_promiscuous(
+ &vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "set unicast promisc failed, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ ret = i40e_aq_set_vsi_multicast_promiscuous(
+ &vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "set multicast promisc failed, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ }
+ ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (ret)
dev_info(&pf->pdev->dev,
- "set brdcast promisc failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "set brdcast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
}
clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1920,7 +2167,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] &&
(pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
- i40e_sync_vsi_filters(pf->vsi[v]);
+ i40e_sync_vsi_filters(pf->vsi[v], true);
}
}
@@ -1994,8 +2241,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "%s: update vsi failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "update vlan stripping failed, err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
}
}
@@ -2023,8 +2272,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "%s: update vsi failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "update vlan stripping failed, err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
}
}
@@ -2057,6 +2308,9 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(vsi->netdev);
+ /* Locked once because all functions invoked below iterates list*/
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+
if (is_netdev) {
add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
is_vf, is_netdev);
@@ -2064,6 +2318,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
dev_info(&vsi->back->pdev->dev,
"Could not add vlan filter %d for %pM\n",
vid, vsi->netdev->dev_addr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
@@ -2074,6 +2329,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
dev_info(&vsi->back->pdev->dev,
"Could not add vlan filter %d for %pM\n",
vid, f->macaddr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
@@ -2095,6 +2351,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
dev_info(&vsi->back->pdev->dev,
"Could not add filter 0 for %pM\n",
vsi->netdev->dev_addr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
@@ -2103,27 +2360,33 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
/* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
if (vid > 0 && !vsi->info.pvid) {
list_for_each_entry(f, &vsi->mac_filter_list, list) {
- if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev)) {
- i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev);
- add_f = i40e_add_filter(vsi, f->macaddr,
- 0, is_vf, is_netdev);
- if (!add_f) {
- dev_info(&vsi->back->pdev->dev,
- "Could not add filter 0 for %pM\n",
- f->macaddr);
- return -ENOMEM;
- }
+ if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+ is_vf, is_netdev))
+ continue;
+ i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+ is_vf, is_netdev);
+ add_f = i40e_add_filter(vsi, f->macaddr,
+ 0, is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add filter 0 for %pM\n",
+ f->macaddr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ return -ENOMEM;
}
}
}
+ /* Make sure to release before sync_vsi_filter because that
+ * function will lock/unlock as necessary
+ */
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
if (test_bit(__I40E_DOWN, &vsi->back->state) ||
test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
return 0;
- return i40e_sync_vsi_filters(vsi);
+ return i40e_sync_vsi_filters(vsi, false);
}
/**
@@ -2143,6 +2406,9 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(netdev);
+ /* Locked once because all functions invoked below iterates list */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+
if (is_netdev)
i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
@@ -2173,6 +2439,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
dev_info(&vsi->back->pdev->dev,
"Could not add filter %d for %pM\n",
I40E_VLAN_ANY, netdev->dev_addr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
@@ -2181,21 +2448,27 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
list_for_each_entry(f, &vsi->mac_filter_list, list) {
i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
- is_vf, is_netdev);
+ is_vf, is_netdev);
if (!add_f) {
dev_info(&vsi->back->pdev->dev,
"Could not add filter %d for %pM\n",
I40E_VLAN_ANY, f->macaddr);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
}
+ /* Make sure to release before sync_vsi_filter because that
+ * function with lock/unlock as necessary
+ */
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
if (test_bit(__I40E_DOWN, &vsi->back->state) ||
test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
return 0;
- return i40e_sync_vsi_filters(vsi);
+ return i40e_sync_vsi_filters(vsi, false);
}
/**
@@ -2294,7 +2567,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
{
struct i40e_vsi_context ctxt;
- i40e_status aq_ret;
+ i40e_status ret;
vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi->info.pvid = cpu_to_le16(vid);
@@ -2304,11 +2577,13 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
ctxt.seid = vsi->seid;
ctxt.info = vsi->info;
- aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
- if (aq_ret) {
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
dev_info(&vsi->back->pdev->dev,
- "%s: update vsi failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "add pvid failed, err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
return -ENOENT;
}
@@ -2527,8 +2802,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
i40e_flush(hw);
- clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
-
/* cache tail off for easier writes later */
ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
@@ -2590,7 +2863,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
- rx_ctx.showiv = 1;
+ /* this controls whether VLAN is stripped from inner headers */
+ rx_ctx.showiv = 0;
#ifdef I40E_FCOE
rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
#endif
@@ -2696,9 +2970,9 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
#endif /* I40E_FCOE */
/* round up for the chip's needs */
vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
- (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
+ BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
- (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+ BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
/* set up individual rings */
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
@@ -2728,7 +3002,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
}
for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
- if (!(vsi->tc_config.enabled_tc & (1 << n)))
+ if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
continue;
qoffset = vsi->tc_config.tc_info[n].qoffset;
@@ -2799,11 +3073,9 @@ static int i40e_vsi_configure(struct i40e_vsi *vsi)
static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- struct i40e_q_vector *q_vector;
struct i40e_hw *hw = &pf->hw;
u16 vector;
int i, q;
- u32 val;
u32 qp;
/* The interrupt indexing is offset by 1 in the PFINT_ITRn
@@ -2813,7 +3085,9 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
qp = vsi->base_queue;
vector = vsi->base_vector;
for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
- q_vector = vsi->q_vectors[i];
+ struct i40e_q_vector *q_vector = vsi->q_vectors[i];
+
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
@@ -2822,10 +3096,14 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
q_vector->tx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
q_vector->tx.itr);
+ wr32(hw, I40E_PFINT_RATEN(vector - 1),
+ INTRL_USEC_TO_REG(vsi->int_rate_limit));
/* Linked list for the queuepairs assigned to this vector */
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
for (q = 0; q < q_vector->num_ringpairs; q++) {
+ u32 val;
+
val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
(I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
@@ -2877,6 +3155,9 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
I40E_PFINT_ICR0_ENA_VFLR_MASK |
I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED)
+ val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+
if (pf->flags & I40E_FLAG_PTP)
val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
@@ -2902,6 +3183,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
u32 val;
/* set the ITR configuration */
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
@@ -2960,24 +3242,6 @@ void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
}
/**
- * i40e_irq_dynamic_enable - Enable default interrupt generation settings
- * @vsi: pointer to a vsi
- * @vector: enable a particular Hw Interrupt vector
- **/
-void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
-{
- struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- u32 val;
-
- val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
- wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
- /* skip the flush */
-}
-
-/**
* i40e_irq_dynamic_disable - Disable default interrupt generation settings
* @vsi: pointer to a vsi
* @vector: disable a particular Hw Interrupt vector
@@ -3005,7 +3269,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
- napi_schedule(&q_vector->napi);
+ napi_schedule_irqoff(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -3050,8 +3314,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
q_vector);
if (err) {
dev_info(&pf->pdev->dev,
- "%s: request_irq failed, error: %d\n",
- __func__, err);
+ "MSIX request_irq failed, error: %d\n", err);
goto free_queue_irqs;
}
/* assign the mask for this irq */
@@ -3116,8 +3379,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
int i;
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
- for (i = vsi->base_vector;
- i < (vsi->num_q_vectors + vsi->base_vector); i++)
+ for (i = 0; i < vsi->num_q_vectors; i++)
i40e_irq_dynamic_enable(vsi, i);
} else {
i40e_irq_dynamic_enable_icr0(pf);
@@ -3167,11 +3429,21 @@ static irqreturn_t i40e_intr(int irq, void *data)
(icr0 & I40E_PFINT_ICR0_SWINT_MASK))
pf->sw_int_count++;
+ if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
+ (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+ icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+ dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
+ }
+
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_q_vector *q_vector = vsi->q_vectors[0];
/* temporarily disable queue cause for NAPI processing */
u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
+
qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
wr32(hw, I40E_QINT_RQCTL(0), qval);
@@ -3180,7 +3452,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
wr32(hw, I40E_QINT_TQCTL(0), qval);
if (!test_bit(__I40E_DOWN, &pf->state))
- napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
+ napi_schedule_irqoff(&q_vector->napi);
}
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
@@ -3341,10 +3613,9 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
i += tx_ring->count;
tx_ring->next_to_clean = i;
- if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
- i40e_irq_dynamic_enable(vsi,
- tx_ring->q_vector->v_idx + vsi->base_vector);
- }
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
+ i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
+
return budget > 0;
}
@@ -3373,7 +3644,7 @@ static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
* @v_idx: vector index
* @qp_idx: queue pair index
**/
-static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
+static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
{
struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
@@ -3427,7 +3698,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
q_vector->tx.ring = NULL;
while (num_ringpairs--) {
- map_vector_to_qp(vsi, v_start, qp_idx);
+ i40e_map_vector_to_qp(vsi, v_start, qp_idx);
qp_idx++;
qp_remaining--;
}
@@ -3482,14 +3753,12 @@ static void i40e_netpoll(struct net_device *netdev)
if (test_bit(__I40E_DOWN, &vsi->state))
return;
- pf->flags |= I40E_FLAG_IN_NETPOLL;
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
for (i = 0; i < vsi->num_q_vectors; i++)
i40e_msix_clean_rings(0, vsi->q_vectors[i]);
} else {
i40e_intr(pf->pdev->irq, netdev);
}
- pf->flags &= ~I40E_FLAG_IN_NETPOLL;
}
#endif
@@ -3570,9 +3839,8 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
ret = i40e_pf_txq_wait(pf, pf_q, enable);
if (ret) {
dev_info(&pf->pdev->dev,
- "%s: VSI seid %d Tx ring %d %sable timeout\n",
- __func__, vsi->seid, pf_q,
- (enable ? "en" : "dis"));
+ "VSI seid %d Tx ring %d %sable timeout\n",
+ vsi->seid, pf_q, (enable ? "en" : "dis"));
break;
}
}
@@ -3648,9 +3916,8 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
ret = i40e_pf_rxq_wait(pf, pf_q, enable);
if (ret) {
dev_info(&pf->pdev->dev,
- "%s: VSI seid %d Rx ring %d %sable timeout\n",
- __func__, vsi->seid, pf_q,
- (enable ? "en" : "dis"));
+ "VSI seid %d Rx ring %d %sable timeout\n",
+ vsi->seid, pf_q, (enable ? "en" : "dis"));
break;
}
}
@@ -3929,6 +4196,7 @@ static void i40e_vsi_close(struct i40e_vsi *vsi)
i40e_vsi_free_irq(vsi);
i40e_vsi_free_tx_resources(vsi);
i40e_vsi_free_rx_resources(vsi);
+ vsi->current_netdev_flags = 0;
}
/**
@@ -3944,17 +4212,15 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
vsi->type == I40E_VSI_FCOE) {
dev_dbg(&vsi->back->pdev->dev,
- "%s: VSI seid %d skipping FCoE VSI disable\n",
- __func__, vsi->seid);
+ "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
return;
}
set_bit(__I40E_NEEDS_RESTART, &vsi->state);
- if (vsi->netdev && netif_running(vsi->netdev)) {
+ if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
- } else {
+ else
i40e_vsi_close(vsi);
- }
}
/**
@@ -4019,8 +4285,8 @@ static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
ret = i40e_pf_txq_wait(pf, pf_q, false);
if (ret) {
dev_info(&pf->pdev->dev,
- "%s: VSI seid %d Tx ring %d disable timeout\n",
- __func__, vsi->seid, pf_q);
+ "VSI seid %d Tx ring %d disable timeout\n",
+ vsi->seid, pf_q);
return ret;
}
}
@@ -4052,6 +4318,108 @@ static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
}
#endif
+
+/**
+ * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
+ * @q_idx: TX queue number
+ * @vsi: Pointer to VSI struct
+ *
+ * This function checks specified queue for given VSI. Detects hung condition.
+ * Sets hung bit since it is two step process. Before next run of service task
+ * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
+ * hung condition remain unchanged and during subsequent run, this function
+ * issues SW interrupt to recover from hung condition.
+ **/
+static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
+{
+ struct i40e_ring *tx_ring = NULL;
+ struct i40e_pf *pf;
+ u32 head, val, tx_pending;
+ int i;
+
+ pf = vsi->back;
+
+ /* now that we have an index, find the tx_ring struct */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
+ if (q_idx == vsi->tx_rings[i]->queue_index) {
+ tx_ring = vsi->tx_rings[i];
+ break;
+ }
+ }
+ }
+
+ if (!tx_ring)
+ return;
+
+ /* Read interrupt register */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ val = rd32(&pf->hw,
+ I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
+ tx_ring->vsi->base_vector - 1));
+ else
+ val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
+
+ head = i40e_get_head(tx_ring);
+
+ tx_pending = i40e_get_tx_pending(tx_ring);
+
+ /* Interrupts are disabled and TX pending is non-zero,
+ * trigger the SW interrupt (don't wait). Worst case
+ * there will be one extra interrupt which may result
+ * into not cleaning any queues because queues are cleaned.
+ */
+ if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
+ i40e_force_wb(vsi, tx_ring->q_vector);
+}
+
+/**
+ * i40e_detect_recover_hung - Function to detect and recover hung_queues
+ * @pf: pointer to PF struct
+ *
+ * LAN VSI has netdev and netdev has TX queues. This function is to check
+ * each of those TX queues if they are hung, trigger recovery by issuing
+ * SW interrupt.
+ **/
+static void i40e_detect_recover_hung(struct i40e_pf *pf)
+{
+ struct net_device *netdev;
+ struct i40e_vsi *vsi;
+ int i;
+
+ /* Only for LAN VSI */
+ vsi = pf->vsi[pf->lan_vsi];
+
+ if (!vsi)
+ return;
+
+ /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return;
+
+ /* Make sure type is MAIN VSI */
+ if (vsi->type != I40E_VSI_MAIN)
+ return;
+
+ netdev = vsi->netdev;
+ if (!netdev)
+ return;
+
+ /* Bail out if netif_carrier is not OK */
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ /* Go thru' TX queues for netdev */
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ struct netdev_queue *q;
+
+ q = netdev_get_tx_queue(netdev, i);
+ if (q)
+ i40e_detect_recover_hung_queue(i, vsi);
+ }
+}
+
/**
* i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
* @pf: pointer to PF
@@ -4073,7 +4441,7 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
if (app.selector == I40E_APP_SEL_TCPIP &&
app.protocolid == I40E_APP_PROTOID_ISCSI) {
tc = dcbcfg->etscfg.prioritytable[app.priority];
- enabled_tc |= (1 << tc);
+ enabled_tc |= BIT_ULL(tc);
break;
}
}
@@ -4122,7 +4490,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
u8 i;
for (i = 0; i < num_tc; i++)
- enabled_tc |= 1 << i;
+ enabled_tc |= BIT(i);
return enabled_tc;
}
@@ -4157,7 +4525,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
/* At least have TC0 */
enabled_tc = (enabled_tc ? enabled_tc : 0x1);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & (1 << i))
+ if (enabled_tc & BIT_ULL(i))
num_tc++;
}
return num_tc;
@@ -4179,11 +4547,11 @@ static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
/* Find the first enabled TC */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & (1 << i))
+ if (enabled_tc & BIT_ULL(i))
break;
}
- return 1 << i;
+ return BIT(i);
}
/**
@@ -4221,26 +4589,28 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- i40e_status aq_ret;
+ i40e_status ret;
u32 tc_bw_max;
int i;
/* Get the VSI level BW configuration */
- aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
- if (aq_ret) {
+ ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi bw config, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "couldn't get PF vsi bw config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
/* Get the VSI level BW configuration per TC */
- aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
- NULL);
- if (aq_ret) {
+ ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
+ NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi ets bw config, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -4279,16 +4649,16 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
u8 *bw_share)
{
struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
- i40e_status aq_ret;
+ i40e_status ret;
int i;
bw_data.tc_valid_bits = enabled_tc;
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
bw_data.tc_bw_credits[i] = bw_share[i];
- aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
- NULL);
- if (aq_ret) {
+ ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
+ NULL);
+ if (ret) {
dev_info(&vsi->back->pdev->dev,
"AQ command Config VSI BW allocation per TC failed = %d\n",
vsi->back->hw.aq.asq_last_status);
@@ -4337,7 +4707,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
* will set the numtc for netdev as 2 that will be
* referenced by the netdev layer as TC 0 and 1.
*/
- if (vsi->tc_config.enabled_tc & (1 << i))
+ if (vsi->tc_config.enabled_tc & BIT_ULL(i))
netdev_set_tc_queue(netdev,
vsi->tc_config.tc_info[i].netdev_tc,
vsi->tc_config.tc_info[i].qcount,
@@ -4399,7 +4769,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
/* Enable ETS TCs with equal BW Share for now across all VSIs */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & (1 << i))
+ if (enabled_tc & BIT_ULL(i))
bw_share[i] = 1;
}
@@ -4423,8 +4793,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "update vsi failed, aq_err=%d\n",
- vsi->back->hw.aq.asq_last_status);
+ "Update vsi tc config failed, err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
goto out;
}
/* update the local VSI info with updated queue map */
@@ -4435,8 +4807,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "Failed updating vsi bw info, aq_err=%d\n",
- vsi->back->hw.aq.asq_last_status);
+ "Failed updating vsi bw info, err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
goto out;
}
@@ -4469,7 +4843,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
/* Enable ETS TCs with equal BW Share for now */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & (1 << i))
+ if (enabled_tc & BIT_ULL(i))
bw_data.tc_bw_share_credits[i] = 1;
}
@@ -4477,8 +4851,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
&bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "veb bw config failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ "VEB bw config failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -4486,8 +4861,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed getting veb bw config, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ "Failed getting veb bw config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
out:
@@ -4574,8 +4950,9 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
ret = i40e_aq_resume_port_tx(hw, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "AQ command Resume Port Tx failed = %d\n",
- pf->hw.aq.asq_last_status);
+ "Resume Port Tx failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
i40e_service_event_schedule(pf);
@@ -4627,8 +5004,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
}
} else {
dev_info(&pf->pdev->dev,
- "AQ Querying DCB configuration failed: aq_err %d\n",
- pf->hw.aq.asq_last_status);
+ "Query for DCB configuration failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
out:
@@ -4641,11 +5019,14 @@ out:
* i40e_print_link_message - print link up or down
* @vsi: the VSI for which link needs a message
*/
-static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
+void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
{
- char speed[SPEED_SIZE] = "Unknown";
- char fc[FC_SIZE] = "RX/TX";
+ char *speed = "Unknown";
+ char *fc = "Unknown";
+ if (vsi->current_isup == isup)
+ return;
+ vsi->current_isup = isup;
if (!isup) {
netdev_info(vsi->netdev, "NIC Link is Down\n");
return;
@@ -4662,19 +5043,19 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
switch (vsi->back->hw.phy.link_info.link_speed) {
case I40E_LINK_SPEED_40GB:
- strlcpy(speed, "40 Gbps", SPEED_SIZE);
+ speed = "40 G";
break;
case I40E_LINK_SPEED_20GB:
- strncpy(speed, "20 Gbps", SPEED_SIZE);
+ speed = "20 G";
break;
case I40E_LINK_SPEED_10GB:
- strlcpy(speed, "10 Gbps", SPEED_SIZE);
+ speed = "10 G";
break;
case I40E_LINK_SPEED_1GB:
- strlcpy(speed, "1000 Mbps", SPEED_SIZE);
+ speed = "1000 M";
break;
case I40E_LINK_SPEED_100MB:
- strncpy(speed, "100 Mbps", SPEED_SIZE);
+ speed = "100 M";
break;
default:
break;
@@ -4682,20 +5063,20 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
switch (vsi->back->hw.fc.current_mode) {
case I40E_FC_FULL:
- strlcpy(fc, "RX/TX", FC_SIZE);
+ fc = "RX/TX";
break;
case I40E_FC_TX_PAUSE:
- strlcpy(fc, "TX", FC_SIZE);
+ fc = "TX";
break;
case I40E_FC_RX_PAUSE:
- strlcpy(fc, "RX", FC_SIZE);
+ fc = "RX";
break;
default:
- strlcpy(fc, "None", FC_SIZE);
+ fc = "None";
break;
}
- netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
+ netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
speed, fc);
}
@@ -4859,7 +5240,7 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc)
/* Generate TC map for number of tc requested */
for (i = 0; i < tc; i++)
- enabled_tc |= (1 << i);
+ enabled_tc |= BIT_ULL(i);
/* Requesting same TC configuration as already enabled */
if (enabled_tc == vsi->tc_config.enabled_tc)
@@ -4998,7 +5379,7 @@ err_setup_rx:
err_setup_tx:
i40e_vsi_free_tx_resources(vsi);
if (vsi == pf->vsi[pf->lan_vsi])
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
return err;
}
@@ -5066,7 +5447,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
i40e_vc_notify_reset(pf);
/* do the biggest reset indicated */
- if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
+ if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
/* Request a Global Reset
*
@@ -5081,7 +5462,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
- } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
+ } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
/* Request a Core Reset
*
@@ -5093,7 +5474,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
i40e_flush(&pf->hw);
- } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
+ } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
/* Request a PF Reset
*
@@ -5106,7 +5487,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
dev_dbg(&pf->pdev->dev, "PFR requested\n");
i40e_handle_reset_warning(pf);
- } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
+ } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;
/* Find the VSI(s) that requested a re-init */
@@ -5114,22 +5495,21 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
"VSI reinit requested\n");
for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
+
if (vsi != NULL &&
test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
i40e_vsi_reinit_locked(pf->vsi[v]);
clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
}
}
-
- /* no further action needed, so return now */
- return;
- } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
+ } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
int v;
/* Find the VSI(s) that needs to be brought down */
dev_info(&pf->pdev->dev, "VSI down requested\n");
for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
+
if (vsi != NULL &&
test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
set_bit(__I40E_DOWN, &vsi->state);
@@ -5137,13 +5517,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
}
}
-
- /* no further action needed, so return now */
- return;
} else {
dev_info(&pf->pdev->dev,
"bad reset request 0x%08x\n", reset_flags);
- return;
}
}
@@ -5199,8 +5575,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
}
- dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
- need_reconfig);
+ dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
return need_reconfig;
}
@@ -5227,16 +5602,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Ignore if event is not for Nearest Bridge */
type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
- dev_dbg(&pf->pdev->dev,
- "%s: LLDP event mib bridge type 0x%x\n", __func__, type);
+ dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
return ret;
/* Check MIB Type and return if event for Remote MIB update */
type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
dev_dbg(&pf->pdev->dev,
- "%s: LLDP event mib type %s\n", __func__,
- type ? "remote" : "local");
+ "LLDP event mib type %s\n", type ? "remote" : "local");
if (type == I40E_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */
ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
@@ -5253,7 +5626,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Get updated DCBX data from firmware */
ret = i40e_get_dcb_config(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n");
+ dev_info(&pf->pdev->dev,
+ "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto exit;
}
@@ -5418,7 +5794,9 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf)
**/
void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
{
+ struct i40e_fdir_filter *filter;
u32 fcnt_prog, fcnt_avail;
+ struct hlist_node *node;
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
return;
@@ -5447,6 +5825,18 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
}
}
+
+ /* if hw had a problem adding a filter, delete it */
+ if (pf->fd_inv > 0) {
+ hlist_for_each_entry_safe(filter, node,
+ &pf->fdir_filter_list, fdir_node) {
+ if (filter->fd_id == pf->fd_inv) {
+ hlist_del(&filter->fdir_node);
+ kfree(filter);
+ pf->fdir_pf_active_filters--;
+ }
+ }
+ }
}
#define I40E_MIN_FD_FLUSH_INTERVAL 10
@@ -5466,49 +5856,51 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
return;
- if (time_after(jiffies, pf->fd_flush_timestamp +
- (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
- /* If the flush is happening too quick and we have mostly
- * SB rules we should not re-enable ATR for some time.
- */
- min_flush_time = pf->fd_flush_timestamp
- + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
- fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
+ if (!time_after(jiffies, pf->fd_flush_timestamp +
+ (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
+ return;
- if (!(time_after(jiffies, min_flush_time)) &&
- (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
- disable_atr = true;
- }
+ /* If the flush is happening too quick and we have mostly SB rules we
+ * should not re-enable ATR for some time.
+ */
+ min_flush_time = pf->fd_flush_timestamp +
+ (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
+ fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
- pf->fd_flush_timestamp = jiffies;
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- /* flush all filters */
- wr32(&pf->hw, I40E_PFQF_CTL_1,
- I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
- i40e_flush(&pf->hw);
- pf->fd_flush_cnt++;
- pf->fd_add_err = 0;
- do {
- /* Check FD flush status every 5-6msec */
- usleep_range(5000, 6000);
- reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
- if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
- break;
- } while (flush_wait_retry--);
- if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
- dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
- } else {
- /* replay sideband filters */
- i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
- if (!disable_atr)
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
- }
+ if (!(time_after(jiffies, min_flush_time)) &&
+ (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+ disable_atr = true;
}
+
+ pf->fd_flush_timestamp = jiffies;
+ pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ /* flush all filters */
+ wr32(&pf->hw, I40E_PFQF_CTL_1,
+ I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
+ i40e_flush(&pf->hw);
+ pf->fd_flush_cnt++;
+ pf->fd_add_err = 0;
+ do {
+ /* Check FD flush status every 5-6msec */
+ usleep_range(5000, 6000);
+ reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
+ if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
+ break;
+ } while (flush_wait_retry--);
+ if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
+ dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
+ } else {
+ /* replay sideband filters */
+ i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
+ if (!disable_atr)
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
+ }
+
}
/**
@@ -5616,15 +6008,23 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
**/
static void i40e_link_event(struct i40e_pf *pf)
{
- bool new_link, old_link;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
u8 new_link_speed, old_link_speed;
+ i40e_status status;
+ bool new_link, old_link;
/* set this to force the get_link_status call to refresh state */
pf->hw.phy.get_link_info = true;
old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
- new_link = i40e_get_link_status(&pf->hw);
+
+ status = i40e_get_link_status(&pf->hw, &new_link);
+ if (status) {
+ dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
+ status);
+ return;
+ }
+
old_link_speed = pf->hw.phy.link_info_old.link_speed;
new_link_speed = pf->hw.phy.link_info.link_speed;
@@ -5653,68 +6053,6 @@ static void i40e_link_event(struct i40e_pf *pf)
}
/**
- * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
- * @pf: board private structure
- *
- * Set the per-queue flags to request a check for stuck queues in the irq
- * clean functions, then force interrupts to be sure the irq clean is called.
- **/
-static void i40e_check_hang_subtask(struct i40e_pf *pf)
-{
- int i, v;
-
- /* If we're down or resetting, just bail */
- if (test_bit(__I40E_DOWN, &pf->state) ||
- test_bit(__I40E_CONFIG_BUSY, &pf->state))
- return;
-
- /* for each VSI/netdev
- * for each Tx queue
- * set the check flag
- * for each q_vector
- * force an interrupt
- */
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- struct i40e_vsi *vsi = pf->vsi[v];
- int armed = 0;
-
- if (!pf->vsi[v] ||
- test_bit(__I40E_DOWN, &vsi->state) ||
- (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
- continue;
-
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- set_check_for_tx_hang(vsi->tx_rings[i]);
- if (test_bit(__I40E_HANG_CHECK_ARMED,
- &vsi->tx_rings[i]->state))
- armed++;
- }
-
- if (armed) {
- if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
- wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
- (I40E_PFINT_DYN_CTL0_INTENA_MASK |
- I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
- I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
- I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
- I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
- } else {
- u16 vec = vsi->base_vector - 1;
- u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
- I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
- I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
- I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK);
- for (i = 0; i < vsi->num_q_vectors; i++, vec++)
- wr32(&vsi->back->hw,
- I40E_PFINT_DYN_CTLN(vec), val);
- }
- i40e_flush(&vsi->back->hw);
- }
- }
-}
-
-/**
* i40e_watchdog_subtask - periodic checks not using event driven response
* @pf: board private structure
**/
@@ -5733,8 +6071,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
return;
pf->service_timer_previous = jiffies;
- i40e_check_hang_subtask(pf);
- i40e_link_event(pf);
+ if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
+ i40e_link_event(pf);
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
@@ -5743,10 +6081,12 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
if (pf->vsi[i] && pf->vsi[i]->netdev)
i40e_update_stats(pf->vsi[i]);
- /* Update the stats for the active switching components */
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i])
- i40e_update_veb_stats(pf->veb[i]);
+ if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
+ /* Update the stats for the active switching components */
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i])
+ i40e_update_veb_stats(pf->veb[i]);
+ }
i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
}
@@ -5761,23 +6101,23 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
rtnl_lock();
if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
- reset_flags |= (1 << __I40E_REINIT_REQUESTED);
+ reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
}
if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
- reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
+ reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
}
if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
- reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
+ reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
}
if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
- reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
+ reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
}
if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
- reset_flags |= (1 << __I40E_DOWN_REQUESTED);
+ reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
}
@@ -5983,27 +6323,29 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_vsi_context ctxt;
- int aq_ret;
+ int ret;
ctxt.seid = pf->main_vsi_seid;
ctxt.pf_num = pf->hw.pf_id;
ctxt.vf_num = 0;
- aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
- if (aq_ret) {
+ ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "%s couldn't get PF vsi config, err %d, aq_err %d\n",
- __func__, aq_ret, pf->hw.aq.asq_last_status);
+ "couldn't get PF vsi config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return;
}
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
- aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
- if (aq_ret) {
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "%s: update vsi switch failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "update vsi switch failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
}
@@ -6017,27 +6359,29 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_vsi_context ctxt;
- int aq_ret;
+ int ret;
ctxt.seid = pf->main_vsi_seid;
ctxt.pf_num = pf->hw.pf_id;
ctxt.vf_num = 0;
- aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
- if (aq_ret) {
+ ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "%s couldn't get PF vsi config, err %d, aq_err %d\n",
- __func__, aq_ret, pf->hw.aq.asq_last_status);
+ "couldn't get PF vsi config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return;
}
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
- aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
- if (aq_ret) {
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
dev_info(&pf->pdev->dev,
- "%s: update vsi switch failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "update vsi switch failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
}
@@ -6053,8 +6397,9 @@ static void i40e_config_bridge_mode(struct i40e_veb *veb)
{
struct i40e_pf *pf = veb->pf;
- dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
- veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+ if (pf->hw.debug_mask & I40E_DEBUG_LAN)
+ dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
+ veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
if (veb->bridge_mode & BRIDGE_MODE_VEPA)
i40e_disable_pf_switch_lb(pf);
else
@@ -6097,7 +6442,8 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
ret = i40e_add_vsi(ctl_vsi);
if (ret) {
dev_info(&pf->pdev->dev,
- "rebuild of owner VSI failed: %d\n", ret);
+ "rebuild of veb_idx %d owner VSI failed: %d\n",
+ veb->idx, ret);
goto end_reconstitute;
}
i40e_vsi_reset_stats(ctl_vsi);
@@ -6120,6 +6466,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
if (pf->vsi[v]->veb_idx == veb->idx) {
struct i40e_vsi *vsi = pf->vsi[v];
+
vsi->uplink_seid = veb->seid;
ret = i40e_add_vsi(vsi);
if (ret) {
@@ -6176,18 +6523,14 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
buf_len = data_size;
} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
dev_info(&pf->pdev->dev,
- "capability discovery failed: aq=%d\n",
- pf->hw.aq.asq_last_status);
+ "capability discovery failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
return -ENODEV;
}
} while (err);
- if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
- (pf->hw.aq.fw_maj_ver < 2)) {
- pf->hw.func_caps.num_msix_vectors++;
- pf->hw.func_caps.num_msix_vectors_vf++;
- }
-
if (pf->hw.debug_mask & I40E_DEBUG_USER)
dev_info(&pf->pdev->dev,
"pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@ -6363,7 +6706,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
ret = i40e_init_adminq(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
+ dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto clear_recovery;
}
@@ -6373,11 +6718,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
i40e_clear_pxe_mode(hw);
ret = i40e_get_capabilities(pf);
- if (ret) {
- dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
- ret);
+ if (ret)
goto end_core_reset;
- }
ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp,
@@ -6401,9 +6743,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
#endif /* CONFIG_I40E_DCB */
#ifdef I40E_FCOE
- ret = i40e_init_pf_fcoe(pf);
- if (ret)
- dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
+ i40e_init_pf_fcoe(pf);
#endif
/* do basic switch setup */
@@ -6418,12 +6758,16 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
I40E_AQ_EVENT_LINK_UPDOWN |
I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
if (ret)
- dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
+ dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* make sure our flow control settings are restored */
ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
if (ret)
- dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
+ dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Rebuild the VSIs and VEBs that existed before reset.
* They are still in our local switch element arrays, so only
@@ -6484,13 +6828,24 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
msleep(75);
ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (ret)
- dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
}
/* reinit the misc interrupt */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
ret = i40e_setup_misc_vector(pf);
+ /* Add a filter to drop all Flow control frames from any VSI from being
+ * transmitted. By doing so we stop a malicious VF from sending out
+ * PAUSE or PFC frames and potentially controlling traffic for other
+ * PF/VF VSIs.
+ * The FW can still send Flow control frames if enabled.
+ */
+ i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+ pf->main_vsi_seid);
+
/* restart the VSIs that were rebuilt and running before the reset */
i40e_pf_unquiesce_all_vsi(pf);
@@ -6647,8 +7002,8 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- if (pf->pending_vxlan_bitmap & (1 << i)) {
- pf->pending_vxlan_bitmap &= ~(1 << i);
+ if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
+ pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
port = pf->vxlan_ports[i];
if (port)
ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
@@ -6659,10 +7014,12 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
if (ret) {
dev_info(&pf->pdev->dev,
- "%s vxlan port %d, index %d failed, err %d, aq_err %d\n",
+ "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
port ? "add" : "delete",
- ntohs(port), i, ret,
- pf->hw.aq.asq_last_status);
+ ntohs(port), i,
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
pf->vxlan_ports[i] = 0;
}
}
@@ -6687,6 +7044,7 @@ static void i40e_service_task(struct work_struct *work)
return;
}
+ i40e_detect_recover_hung(pf);
i40e_reset_subtask(pf);
i40e_handle_mdd_event(pf);
i40e_vc_process_vflr_event(pf);
@@ -6870,6 +7228,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi->idx = vsi_idx;
vsi->rx_itr_setting = pf->rx_itr_default;
vsi->tx_itr_setting = pf->tx_itr_default;
+ vsi->int_rate_limit = 0;
vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
pf->rss_table_size : 64;
vsi->netdev_registered = false;
@@ -6888,6 +7247,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
/* Setup default MSIX irq handler for VSI */
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
+ /* Initialize VSI lock */
+ spin_lock_init(&vsi->mac_filter_list_lock);
pf->vsi[vsi_idx] = vsi;
ret = vsi_idx;
goto unlock_pf;
@@ -7013,6 +7374,10 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
tx_ring->count = vsi->num_desc;
tx_ring->size = 0;
tx_ring->dcb_tc = 0;
+ if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
+ tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+ if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
+ tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM;
vsi->tx_rings[i] = tx_ring;
rx_ring = &tx_ring[1];
@@ -7411,62 +7776,141 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
}
/**
- * i40e_config_rss - Prepare for RSS if used
+ * i40e_config_rss_aq - Prepare for RSS using AQ commands
+ * @vsi: vsi structure
+ * @seed: RSS hash seed
+ **/
+static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
+{
+ struct i40e_aqc_get_set_rss_key_data rss_key;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ bool pf_lut = false;
+ u8 *rss_lut;
+ int ret, i;
+
+ memset(&rss_key, 0, sizeof(rss_key));
+ memcpy(&rss_key, seed, sizeof(rss_key));
+
+ rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
+ if (!rss_lut)
+ return -ENOMEM;
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ for (i = 0; i < vsi->rss_table_size; i++)
+ rss_lut[i] = i % vsi->rss_size;
+
+ ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS key, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto config_rss_aq_out;
+ }
+
+ if (vsi->type == I40E_VSI_MAIN)
+ pf_lut = true;
+
+ ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
+ vsi->rss_table_size);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS lut, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+config_rss_aq_out:
+ kfree(rss_lut);
+ return ret;
+}
+
+/**
+ * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
+ * @vsi: VSI structure
+ **/
+static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
+{
+ u8 seed[I40E_HKEY_ARRAY_SIZE];
+ struct i40e_pf *pf = vsi->back;
+
+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+ vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
+
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+ return i40e_config_rss_aq(vsi, seed);
+
+ return 0;
+}
+
+/**
+ * i40e_config_rss_reg - Prepare for RSS if used
* @pf: board private structure
+ * @seed: RSS hash seed
**/
-static int i40e_config_rss(struct i40e_pf *pf)
+static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
{
- u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
+ u32 *seed_dw = (u32 *)seed;
+ u32 current_queue = 0;
u32 lut = 0;
int i, j;
- u64 hena;
- u32 reg_val;
- netdev_rss_key_fill(rss_key, sizeof(rss_key));
+ /* Fill out hash function seed */
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]);
+ wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+
+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+ lut = 0;
+ for (j = 0; j < 4; j++) {
+ if (current_queue == vsi->rss_size)
+ current_queue = 0;
+ lut |= ((current_queue) << (8 * j));
+ current_queue++;
+ }
+ wr32(&pf->hw, I40E_PFQF_HLUT(i), lut);
+ }
+ i40e_flush(hw);
+
+ return 0;
+}
+
+/**
+ * i40e_config_rss - Prepare for RSS if used
+ * @pf: board private structure
+ **/
+static int i40e_config_rss(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ u8 seed[I40E_HKEY_ARRAY_SIZE];
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg_val;
+ u64 hena;
+
+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
- hena |= I40E_DEFAULT_RSS_HENA;
+ hena |= i40e_pf_get_default_rss_hena(pf);
+
wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
- /* Check capability and Set table size and register per hw expectation*/
+ /* Determine the RSS table size based on the hardware capabilities */
reg_val = rd32(hw, I40E_PFQF_CTL_0);
- if (pf->rss_table_size == 512)
- reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
- else
- reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
+ reg_val = (pf->rss_table_size == 512) ?
+ (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
+ (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
wr32(hw, I40E_PFQF_CTL_0, reg_val);
- /* Populate the LUT with max no. of queues in round robin fashion */
- for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) {
-
- /* The assumption is that lan qp count will be the highest
- * qp count for any PF VSI that needs RSS.
- * If multiple VSIs need RSS support, all the qp counts
- * for those VSIs should be a power of 2 for RSS to work.
- * If LAN VSI is the only consumer for RSS then this requirement
- * is not necessary.
- */
- if (j == vsi->rss_size)
- j = 0;
- /* lut = 4-byte sliding window of 4 lut entries */
- lut = (lut << 8) | (j &
- ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
- /* On i = 3, we have 4 entries in lut; write to the register */
- if ((i & 3) == 3)
- wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
- }
- i40e_flush(hw);
-
- return 0;
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+ return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed);
+ else
+ return i40e_config_rss_reg(pf, seed);
}
/**
@@ -7533,7 +7977,7 @@ i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
i40e_status status;
/* Set the valid bit for this PF */
- bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id);
+ bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
@@ -7567,8 +8011,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for read access, err %d: aq_err %d\n",
- ret, last_aq_status);
+ "Cannot acquire NVM for read access, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -7583,8 +8028,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
i40e_release_nvm(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n",
- ret, last_aq_status);
+ dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -7596,8 +8042,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for write access, err %d: aq_err %d\n",
- ret, last_aq_status);
+ "Cannot acquire NVM for write access, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
/* Write it back out unchanged to initiate update NVM,
@@ -7615,8 +8062,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
i40e_release_nvm(&pf->hw);
if (ret)
dev_info(&pf->pdev->dev,
- "BW settings NOT SAVED, err %d aq_err %d\n",
- ret, last_aq_status);
+ "BW settings NOT SAVED, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
bw_commit_out:
return ret;
@@ -7648,6 +8096,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
+ I40E_FLAG_LINK_POLLING_ENABLED |
I40E_FLAG_MSIX_ENABLED;
if (iommu_present(&pci_bus_type))
@@ -7662,7 +8111,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* Depending on PF configurations, it is possible that the RSS
* maximum might end up larger than the available queues
*/
- pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+ pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
pf->rss_size = 1;
pf->rss_table_size = pf->hw.func_caps.rss_table_size;
pf->rss_size_max = min_t(int, pf->rss_size_max,
@@ -7673,7 +8122,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
}
/* MFP mode enabled */
- if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
pf->flags |= I40E_FLAG_MFP_ENABLED;
dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
if (i40e_get_npar_bw_setting(pf))
@@ -7690,12 +8139,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
- if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- } else {
+ if (pf->flags & I40E_FLAG_MFP_ENABLED &&
+ pf->hw.num_partitions > 1)
dev_info(&pf->pdev->dev,
"Flow Director Sideband mode Disabled in MFP mode\n");
- }
+ else
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
pf->fdir_pf_filter_count =
pf->hw.func_caps.fd_filters_guaranteed;
pf->hw.fdir_shared_filter_count =
@@ -7703,15 +8152,13 @@ static int i40e_sw_init(struct i40e_pf *pf)
}
if (pf->hw.func_caps.vmdq) {
- pf->flags |= I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
- pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
+ pf->flags |= I40E_FLAG_VMDQ_ENABLED;
+ pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
}
#ifdef I40E_FCOE
- err = i40e_init_pf_fcoe(pf);
- if (err)
- dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
+ i40e_init_pf_fcoe(pf);
#endif /* I40E_FCOE */
#ifdef CONFIG_PCI_IOV
@@ -7723,10 +8170,21 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_MAX_VF_COUNT);
}
#endif /* CONFIG_PCI_IOV */
+ if (pf->hw.mac.type == I40E_MAC_X722) {
+ pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
+ I40E_FLAG_128_QP_RSS_CAPABLE |
+ I40E_FLAG_HW_ATR_EVICT_CAPABLE |
+ I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
+ I40E_FLAG_WB_ON_ITR_CAPABLE |
+ I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE;
+ }
pf->eeprom_version = 0xDEAD;
pf->lan_veb = I40E_NO_VEB;
pf->lan_vsi = I40E_NO_VSI;
+ /* By default FW has this off for performance reasons */
+ pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+
/* set up queue assignment tracking */
size = sizeof(struct i40e_lump_tracking)
+ (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
@@ -7812,7 +8270,7 @@ static int i40e_set_features(struct net_device *netdev,
need_reset = i40e_set_ntuple(pf, features);
if (need_reset)
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
return 0;
}
@@ -7875,10 +8333,8 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
/* New port: add it and mark its index in the bitmap */
pf->vxlan_ports[next_idx] = port;
- pf->pending_vxlan_bitmap |= (1 << next_idx);
+ pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
-
- dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
}
/**
@@ -7906,11 +8362,8 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
* and make it pending
*/
pf->vxlan_ports[idx] = 0;
- pf->pending_vxlan_bitmap |= (1 << idx);
+ pf->pending_vxlan_bitmap |= BIT_ULL(idx);
pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
-
- dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
- ntohs(port));
} else {
netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
ntohs(port));
@@ -7981,7 +8434,6 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
-#ifdef HAVE_BRIDGE_ATTRIBS
/**
* i40e_ndo_bridge_setlink - Set the hardware bridge mode
* @dev: the netdev being configured
@@ -7995,7 +8447,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
* bridge mode enabled.
**/
static int i40e_ndo_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh)
+ struct nlmsghdr *nlh,
+ u16 flags)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
@@ -8062,18 +8515,15 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
* @seq: RTNL message seq #
* @dev: the netdev being configured
* @filter_mask: unused
+ * @nlflags: netlink flags passed in
*
* Return the mode in which the hardware bridge is operating in
* i.e VEB or VEPA.
**/
-#ifdef HAVE_BRIDGE_FILTER
static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
- u32 filter_mask, int nlflags)
-#else
-static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev, int nlflags)
-#endif /* HAVE_BRIDGE_FILTER */
+ u32 __always_unused filter_mask,
+ int nlflags)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
@@ -8097,7 +8547,25 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
nlflags, 0, 0, filter_mask, NULL);
}
-#endif /* HAVE_BRIDGE_ATTRIBS */
+
+#define I40E_MAX_TUNNEL_HDR_LEN 80
+/**
+ * i40e_features_check - Validate encapsulated packet conforms to limits
+ * @skb: skb buff
+ * @dev: This physical port's netdev
+ * @features: Offload features that the stack believes apply
+ **/
+static netdev_features_t i40e_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb->encapsulation &&
+ (skb_inner_mac_header(skb) - skb_transport_header(skb) >
+ I40E_MAX_TUNNEL_HDR_LEN))
+ return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+
+ return features;
+}
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
@@ -8133,10 +8601,9 @@ static const struct net_device_ops i40e_netdev_ops = {
#endif
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
-#ifdef HAVE_BRIDGE_ATTRIBS
+ .ndo_features_check = i40e_features_check,
.ndo_bridge_getlink = i40e_ndo_bridge_getlink,
.ndo_bridge_setlink = i40e_ndo_bridge_setlink,
-#endif /* HAVE_BRIDGE_ATTRIBS */
};
/**
@@ -8166,6 +8633,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
netdev->hw_enc_features |= NETIF_F_IP_CSUM |
NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE |
NETIF_F_TSO;
netdev->features = NETIF_F_SG |
@@ -8173,6 +8641,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_SCTP_CSUM |
NETIF_F_HIGHDMA |
NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER |
@@ -8198,17 +8667,26 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
* default a MAC-VLAN filter that accepts any tagged packet
* which must be replaced by a normal filter.
*/
- if (!i40e_rm_default_mac_filter(vsi, mac_addr))
+ if (!i40e_rm_default_mac_filter(vsi, mac_addr)) {
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_add_filter(vsi, mac_addr,
I40E_VLAN_ANY, false, true);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ }
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
pf->vsi[pf->lan_vsi]->netdev->name);
random_ether_addr(mac_addr);
+
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
}
+
+ spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
@@ -8264,12 +8742,22 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
return 1;
veb = pf->veb[vsi->veb_idx];
+ if (!veb) {
+ dev_info(&pf->pdev->dev,
+ "There is no veb associated with the bridge\n");
+ return -ENOENT;
+ }
+
/* Uplink is a bridge in VEPA mode */
- if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA))
+ if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
return 0;
+ } else {
+ /* Uplink is a bridge in VEB mode */
+ return 1;
+ }
- /* Uplink is a bridge in VEB mode */
- return 1;
+ /* VEPA is now default bridge, so return 0 */
+ return 0;
}
/**
@@ -8282,10 +8770,13 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
static int i40e_add_vsi(struct i40e_vsi *vsi)
{
int ret = -ENODEV;
- struct i40e_mac_filter *f, *ftmp;
+ u8 laa_macaddr[ETH_ALEN];
+ bool found_laa_mac_filter = false;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_vsi_context ctxt;
+ struct i40e_mac_filter *f, *ftmp;
+
u8 enabled_tc = 0x1; /* TC0 enabled */
int f_count = 0;
@@ -8304,8 +8795,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %d, aq_err %d\n",
- ret, pf->hw.aq.asq_last_status);
+ "couldn't get PF vsi config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
return -ENOENT;
}
vsi->info = ctxt.info;
@@ -8327,8 +8820,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ "update vsi failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -8345,9 +8840,11 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_vsi_config_tc(vsi, enabled_tc);
if (ret) {
dev_info(&pf->pdev->dev,
- "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
- enabled_tc, ret,
- pf->hw.aq.asq_last_status);
+ "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
+ enabled_tc,
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
ret = -ENOENT;
}
}
@@ -8438,8 +8935,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "add vsi failed, aq_err=%d\n",
- vsi->back->hw.aq.asq_last_status);
+ "add vsi failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
ret = -ENOENT;
goto err;
}
@@ -8449,32 +8948,41 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
vsi->id = ctxt.vsi_number;
}
+ spin_lock_bh(&vsi->mac_filter_list_lock);
/* If macvlan filters already exist, force them to get loaded */
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
f->changed = true;
f_count++;
+ /* Expected to have only one MAC filter entry for LAA in list */
if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
- struct i40e_aqc_remove_macvlan_element_data element;
+ ether_addr_copy(laa_macaddr, f->macaddr);
+ found_laa_mac_filter = true;
+ }
+ }
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
- memset(&element, 0, sizeof(element));
- ether_addr_copy(element.mac_addr, f->macaddr);
- element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
- ret = i40e_aq_remove_macvlan(hw, vsi->seid,
- &element, 1, NULL);
- if (ret) {
- /* some older FW has a different default */
- element.flags |=
- I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
- i40e_aq_remove_macvlan(hw, vsi->seid,
- &element, 1, NULL);
- }
+ if (found_laa_mac_filter) {
+ struct i40e_aqc_remove_macvlan_element_data element;
- i40e_aq_mac_address_write(hw,
- I40E_AQC_WRITE_TYPE_LAA_WOL,
- f->macaddr, NULL);
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, laa_macaddr);
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ ret = i40e_aq_remove_macvlan(hw, vsi->seid,
+ &element, 1, NULL);
+ if (ret) {
+ /* some older FW has a different default */
+ element.flags |=
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ i40e_aq_remove_macvlan(hw, vsi->seid,
+ &element, 1, NULL);
}
+
+ i40e_aq_mac_address_write(hw,
+ I40E_AQC_WRITE_TYPE_LAA_WOL,
+ laa_macaddr, NULL);
}
+
if (f_count) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
pf->flags |= I40E_FLAG_FILTER_SYNC;
@@ -8484,8 +8992,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get vsi bw info, err %d, aq_err %d\n",
- ret, pf->hw.aq.asq_last_status);
+ "couldn't get vsi bw info, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* VSI is already added so not tearing that up */
ret = 0;
}
@@ -8536,10 +9045,13 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
i40e_vsi_disable_irq(vsi);
}
+ spin_lock_bh(&vsi->mac_filter_list_lock);
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, f->vlan,
f->is_vf, f->is_netdev);
- i40e_sync_vsi_filters(vsi);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+ i40e_sync_vsi_filters(vsi, false);
i40e_vsi_delete(vsi);
i40e_vsi_free_q_vectors(vsi);
@@ -8615,6 +9127,11 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
goto vector_setup_out;
}
+ /* In Legacy mode, we do not have to get any other vector since we
+ * piggyback on the misc/ICR0 for queue interrupts.
+ */
+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ return ret;
if (vsi->num_q_vectors)
vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
vsi->num_q_vectors, vsi->idx);
@@ -8658,7 +9175,7 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
if (ret < 0) {
dev_info(&pf->pdev->dev,
- "failed to get tracking for %d queues for VSI %d err=%d\n",
+ "failed to get tracking for %d queues for VSI %d err %d\n",
vsi->alloc_queue_pairs, vsi->seid, ret);
goto err_vsi;
}
@@ -8759,8 +9276,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
if (veb) {
if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
dev_info(&vsi->back->pdev->dev,
- "%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
- __func__);
+ "New VSI creation error, uplink seid of LAN VSI expected.\n");
return NULL;
}
/* We come up by default in VEPA mode if SRIOV is not
@@ -8857,6 +9373,10 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
break;
}
+ if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
+ (vsi->type == I40E_VSI_VMDQ2)) {
+ ret = i40e_vsi_config_rss(vsi);
+ }
return vsi;
err_rings:
@@ -8896,8 +9416,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
&bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "query veb bw config failed, aq_err=%d\n",
- hw->aq.asq_last_status);
+ "query veb bw config failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
goto out;
}
@@ -8905,8 +9426,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
&ets_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "query veb bw ets config failed, aq_err=%d\n",
- hw->aq.asq_last_status);
+ "query veb bw ets config failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
goto out;
}
@@ -9090,36 +9612,40 @@ void i40e_veb_release(struct i40e_veb *veb)
**/
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
- bool is_default = false;
+ struct i40e_pf *pf = veb->pf;
+ bool is_default = veb->pf->cur_promisc;
bool is_cloud = false;
int ret;
/* get a VEB from the hardware */
- ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
+ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
veb->enabled_tc, is_default,
is_cloud, &veb->seid, NULL);
if (ret) {
- dev_info(&veb->pf->pdev->dev,
- "couldn't add VEB, err %d, aq_err %d\n",
- ret, veb->pf->hw.aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "couldn't add VEB, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EPERM;
}
/* get statistics counter */
- ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
+ ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
&veb->stats_idx, NULL, NULL, NULL);
if (ret) {
- dev_info(&veb->pf->pdev->dev,
- "couldn't get VEB statistics idx, err %d, aq_err %d\n",
- ret, veb->pf->hw.aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "couldn't get VEB statistics idx, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EPERM;
}
ret = i40e_veb_get_bw_info(veb);
if (ret) {
- dev_info(&veb->pf->pdev->dev,
- "couldn't get VEB bw info, err %d, aq_err %d\n",
- ret, veb->pf->hw.aq.asq_last_status);
- i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
+ dev_info(&pf->pdev->dev,
+ "couldn't get VEB bw info, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
return -ENOENT;
}
@@ -9325,8 +9851,10 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
&next_seid, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "get switch config failed %d aq_err=%x\n",
- ret, pf->hw.aq.asq_last_status);
+ "get switch config failed err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
kfree(aq_buf);
return -ENOENT;
}
@@ -9367,8 +9895,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
ret = i40e_fetch_switch_configuration(pf, false);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't fetch switch config, err %d, aq_err %d\n",
- ret, pf->hw.aq.asq_last_status);
+ "couldn't fetch switch config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return ret;
}
i40e_pf_reset_stats(pf);
@@ -9397,6 +9926,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
} else {
/* force a reset of TC and queue layout configurations */
u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+
pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
@@ -9420,7 +9950,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_config_rss(pf);
/* fill in link information and enable LSE reporting */
- i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+ i40e_update_link_info(&pf->hw);
i40e_link_event(pf);
/* Initialize user-specific link properties */
@@ -9538,8 +10068,14 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
}
pf->queues_left = queues_left;
+ dev_dbg(&pf->pdev->dev,
+ "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
+ pf->hw.func_caps.num_tx_qp,
+ !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
+ pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps,
+ pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left);
#ifdef I40E_FCOE
- dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
+ dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
#endif
}
@@ -9607,12 +10143,19 @@ static void i40e_print_features(struct i40e_pf *pf)
}
if (pf->flags & I40E_FLAG_DCB_CAPABLE)
buf += sprintf(buf, "DCB ");
+#if IS_ENABLED(CONFIG_VXLAN)
+ buf += sprintf(buf, "VxLAN ");
+#endif
if (pf->flags & I40E_FLAG_PTP)
buf += sprintf(buf, "PTP ");
#ifdef I40E_FCOE
if (pf->flags & I40E_FLAG_FCOE_ENABLED)
buf += sprintf(buf, "FCOE ");
#endif
+ if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+ buf += sprintf(buf, "VEB ");
+ else
+ buf += sprintf(buf, "VEPA ");
BUG_ON(buf > (string + INFO_STRING_LEN));
dev_info(&pf->pdev->dev, "%s\n", string);
@@ -9633,14 +10176,15 @@ static void i40e_print_features(struct i40e_pf *pf)
static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct i40e_aq_get_phy_abilities_resp abilities;
- unsigned long ioremap_len;
struct i40e_pf *pf;
struct i40e_hw *hw;
static u16 pfs_found;
+ u16 wol_nvm_bits;
u16 link_status;
- int err = 0;
+ int err;
u32 len;
u32 i;
+ u8 set_fc_aq_fail;
err = pci_enable_device_mem(pdev);
if (err)
@@ -9686,15 +10230,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw = &pf->hw;
hw->back = pf;
- ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0),
- I40E_MAX_CSR_SPACE);
+ pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
+ I40E_MAX_CSR_SPACE);
- hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len);
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
if (!hw->hw_addr) {
err = -EIO;
dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
(unsigned int)pci_resource_start(pdev, 0),
- (unsigned int)pci_resource_len(pdev, 0), err);
+ pf->ioremap_len, err);
goto err_ioremap;
}
hw->vendor_id = pdev->vendor;
@@ -9743,7 +10287,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_shared_code(hw);
if (err) {
- dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
+ dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
+ err);
goto err_pf_reset;
}
@@ -9751,7 +10296,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->hw.fc.requested_mode = I40E_FC_NONE;
err = i40e_init_adminq(hw);
- dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
+
+ /* provide nvm, fw, api versions */
+ dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
+ hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ i40e_nvm_version_str(hw));
+
if (err) {
dev_info(&pdev->dev,
"The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
@@ -9851,10 +10402,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&pf->service_task, i40e_service_task);
clear_bit(__I40E_SERVICE_SCHED, &pf->state);
pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
- pf->link_check_timeout = jiffies;
- /* WoL defaults to disabled */
- pf->wol_en = false;
+ /* NVM bit on means WoL disabled for the port */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+ if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1)
+ pf->wol_en = false;
+ else
+ pf->wol_en = true;
device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
/* set up the main switch operations */
@@ -9895,6 +10449,25 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
}
+
+ /* Make sure flow control is set according to current settings */
+ err = i40e_set_fc(hw, &set_fc_aq_fail, true);
+ if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
+ dev_dbg(&pf->pdev->dev,
+ "Set fc with err %s aq_err %s on get_phy_cap\n",
+ i40e_stat_str(hw, err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
+ dev_dbg(&pf->pdev->dev,
+ "Set fc with err %s aq_err %s on set_phy_config\n",
+ i40e_stat_str(hw, err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
+ dev_dbg(&pf->pdev->dev,
+ "Set fc with err %s aq_err %s on get_link_info\n",
+ i40e_stat_str(hw, err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+
/* if FDIR VSI was set up, start it now */
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
@@ -9910,15 +10483,19 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
I40E_AQ_EVENT_LINK_UPDOWN |
I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
if (err)
- dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
+ dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
(pf->hw.aq.fw_maj_ver < 4)) {
msleep(75);
err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (err)
- dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
}
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
@@ -9981,35 +10558,82 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_fcoe_vsi_setup(pf);
#endif
- /* Get the negotiated link width and speed from PCI config space */
- pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
+#define PCI_SPEED_SIZE 8
+#define PCI_WIDTH_SIZE 8
+ /* Devices on the IOSF bus do not have this information
+ * and will report PCI Gen 1 x 1 by default so don't bother
+ * checking them.
+ */
+ if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
+ char speed[PCI_SPEED_SIZE] = "Unknown";
+ char width[PCI_WIDTH_SIZE] = "Unknown";
- i40e_set_pci_config_data(hw, link_status);
+ /* Get the negotiated link width and speed from PCI config
+ * space
+ */
+ pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
+ &link_status);
+
+ i40e_set_pci_config_data(hw, link_status);
+
+ switch (hw->bus.speed) {
+ case i40e_bus_speed_8000:
+ strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
+ case i40e_bus_speed_5000:
+ strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
+ case i40e_bus_speed_2500:
+ strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
+ default:
+ break;
+ }
+ switch (hw->bus.width) {
+ case i40e_bus_width_pcie_x8:
+ strncpy(width, "8", PCI_WIDTH_SIZE); break;
+ case i40e_bus_width_pcie_x4:
+ strncpy(width, "4", PCI_WIDTH_SIZE); break;
+ case i40e_bus_width_pcie_x2:
+ strncpy(width, "2", PCI_WIDTH_SIZE); break;
+ case i40e_bus_width_pcie_x1:
+ strncpy(width, "1", PCI_WIDTH_SIZE); break;
+ default:
+ break;
+ }
- dev_info(&pdev->dev, "PCI-Express: %s %s\n",
- (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
- hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
- hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
- "Unknown"),
- (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
- hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
- hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
- hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
- "Unknown"));
+ dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
+ speed, width);
- if (hw->bus.width < i40e_bus_width_pcie_x8 ||
- hw->bus.speed < i40e_bus_speed_8000) {
- dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
- dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+ if (hw->bus.width < i40e_bus_width_pcie_x8 ||
+ hw->bus.speed < i40e_bus_speed_8000) {
+ dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
+ dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+ }
}
/* get the requested speeds from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
if (err)
- dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n",
- err);
+ dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
+ /* get the supported phy types from the fw */
+ err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
+ if (err)
+ dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
+
+ /* Add a filter to drop all Flow control frames from any VSI from being
+ * transmitted. By doing so we stop a malicious VF from sending out
+ * PAUSE or PFC frames and potentially controlling traffic for other
+ * PF/VF VSIs.
+ * The FW can still send Flow control frames if enabled.
+ */
+ i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+ pf->main_vsi_seid);
+
/* print a string summarizing features */
i40e_print_features(pf);
@@ -10057,6 +10681,7 @@ err_dma:
static void i40e_remove(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_hw *hw = &pf->hw;
i40e_status ret_code;
int i;
@@ -10064,6 +10689,10 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_ptp_stop(pf);
+ /* Disable RSS in hw */
+ wr32(hw, I40E_PFQF_HENA(0), 0);
+ wr32(hw, I40E_PFQF_HENA(1), 0);
+
/* no more scheduling of any task */
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
@@ -10180,7 +10809,7 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
int err;
u32 reg;
- dev_info(&pdev->dev, "%s\n", __func__);
+ dev_dbg(&pdev->dev, "%s\n", __func__);
if (pci_enable_device_mem(pdev)) {
dev_info(&pdev->dev,
"Cannot re-enable PCI device after reset.\n");
@@ -10220,13 +10849,13 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
- dev_info(&pdev->dev, "%s\n", __func__);
+ dev_dbg(&pdev->dev, "%s\n", __func__);
if (test_bit(__I40E_SUSPENDED, &pf->state))
return;
rtnl_lock();
i40e_handle_reset_warning(pf);
- rtnl_lock();
+ rtnl_unlock();
}
/**
@@ -10247,6 +10876,19 @@ static void i40e_shutdown(struct pci_dev *pdev)
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ del_timer_sync(&pf->service_timer);
+ cancel_work_sync(&pf->service_task);
+ i40e_fdir_teardown(pf);
+
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+
+ wr32(hw, I40E_PFPM_APM,
+ (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC,
+ (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
i40e_clear_interrupt_scheme(pf);
if (system_state == SYSTEM_POWER_OFF) {
@@ -10267,9 +10909,6 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
set_bit(__I40E_SUSPENDED, &pf->state);
set_bit(__I40E_DOWN, &pf->state);
- del_timer_sync(&pf->service_timer);
- cancel_work_sync(&pf->service_task);
- i40e_fdir_teardown(pf);
rtnl_lock();
i40e_prep_for_reset(pf);
@@ -10302,9 +10941,7 @@ static int i40e_resume(struct pci_dev *pdev)
err = pci_enable_device_mem(pdev);
if (err) {
- dev_err(&pdev->dev,
- "%s: Cannot enable PCI device from suspend\n",
- __func__);
+ dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 554e49d02683..6100cdd9ad13 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -50,7 +50,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
I40E_GLNVM_GENS_SR_SIZE_SHIFT);
/* Switching to words (sr_size contains power of 2KB) */
- nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
+ nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
/* Check if we are in the normal or blank NVM programming mode */
fla = rd32(hw, I40E_GLNVM_FLA);
@@ -189,8 +189,8 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (!ret_code) {
/* Write the address and start reading */
- sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
- (1 << I40E_GLNVM_SRCTL_START_SHIFT);
+ sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+ BIT(I40E_GLNVM_SRCTL_START_SHIFT);
wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
/* Poll I40E_GLNVM_SRCTL until the done bit is set */
@@ -212,6 +212,74 @@ read_nvm_exit:
}
/**
+ * i40e_read_nvm_aq - Read Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command)
+{
+ i40e_status ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+
+ /* Here we are checking the SR limit only for the flat memory model.
+ * We cannot do it for the module-based model, as we did not acquire
+ * the NVM resource yet (we cannot get the module pointer value).
+ * Firmware will check the module-based model.
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+ /* We can write only up to 4KB (one sector), in one AQ write */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write fail error: tried to write %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+ /* A single write cannot spread over two sectors */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+ offset, words);
+ else
+ ret_code = i40e_aq_read_nvm(hw, module_pointer,
+ 2 * offset, /*bytes*/
+ 2 * words, /*bytes*/
+ data, last_command, &cmd_details);
+
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ i40e_status ret_code = I40E_ERR_TIMEOUT;
+
+ ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
+ *data = le16_to_cpu(*(__le16 *)data);
+
+ return ret_code;
+}
+
+/**
* i40e_read_nvm_word - Reads Shadow RAM
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
@@ -222,7 +290,18 @@ read_nvm_exit:
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
- return i40e_read_nvm_word_srctl(hw, offset, data);
+ enum i40e_status_code ret_code = 0;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_word_aq(hw, offset, data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ }
+ return ret_code;
}
/**
@@ -257,6 +336,63 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
}
/**
+ * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ i40e_status ret_code;
+ u16 read_size = *words;
+ bool last_cmd = false;
+ u16 words_read = 0;
+ u16 i = 0;
+
+ do {
+ /* Calculate number of bytes we should read in this step.
+ * FVL AQ do not allow to read more than one page at a time or
+ * to cross page boundaries.
+ */
+ if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
+ read_size = min(*words,
+ (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
+ (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
+ else
+ read_size = min((*words - words_read),
+ I40E_SR_SECTOR_SIZE_IN_WORDS);
+
+ /* Check if this is last command, if so set proper flag */
+ if ((words_read + read_size) >= *words)
+ last_cmd = true;
+
+ ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
+ data + words_read, last_cmd);
+ if (ret_code)
+ goto read_nvm_buffer_aq_exit;
+
+ /* Increment counter for words already read and move offset to
+ * new read location
+ */
+ words_read += read_size;
+ offset += read_size;
+ } while (words_read < *words);
+
+ for (i = 0; i < *words; i++)
+ data[i] = le16_to_cpu(((__le16 *)data)[i]);
+
+read_nvm_buffer_aq_exit:
+ *words = words_read;
+ return ret_code;
+}
+
+/**
* i40e_read_nvm_buffer - Reads Shadow RAM buffer
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
@@ -270,7 +406,19 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
- return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ enum i40e_status_code ret_code = 0;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+ data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ }
+ return ret_code;
}
/**
@@ -289,6 +437,10 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
bool last_command)
{
i40e_status ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
/* Here we are checking the SR limit only for the flat memory model.
* We cannot do it for the module-based model, as we did not acquire
@@ -314,7 +466,7 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
ret_code = i40e_aq_update_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
- data, last_command, NULL);
+ data, last_command, &cmd_details);
return ret_code;
}
@@ -332,7 +484,7 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
u16 *checksum)
{
- i40e_status ret_code = 0;
+ i40e_status ret_code;
struct i40e_virt_mem vmem;
u16 pcie_alt_module = 0;
u16 checksum_local = 0;
@@ -412,13 +564,16 @@ i40e_calc_nvm_checksum_exit:
**/
i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ i40e_status ret_code;
u16 checksum;
+ __le16 le_sum;
ret_code = i40e_calc_nvm_checksum(hw, &checksum);
- if (!ret_code)
+ if (!ret_code) {
+ le_sum = cpu_to_le16(checksum);
ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
- 1, &checksum, true);
+ 1, &le_sum, true);
+ }
return ret_code;
}
@@ -463,25 +618,31 @@ i40e_validate_nvm_checksum_exit:
static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno);
+ u8 *bytes, int *perrno);
static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno);
+ u8 *bytes, int *perrno);
static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *errno);
static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- int *errno);
+ int *perrno);
static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- int *errno);
+ int *perrno);
static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno);
+ u8 *bytes, int *perrno);
static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno);
+ u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
static inline u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
@@ -491,7 +652,7 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val)
return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
}
-static char *i40e_nvm_update_state_str[] = {
+static const char * const i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_INVALID",
"I40E_NVMUPD_READ_CON",
"I40E_NVMUPD_READ_SNT",
@@ -505,6 +666,9 @@ static char *i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_CSUM_CON",
"I40E_NVMUPD_CSUM_SA",
"I40E_NVMUPD_CSUM_LCB",
+ "I40E_NVMUPD_STATUS",
+ "I40E_NVMUPD_EXEC_AQ",
+ "I40E_NVMUPD_GET_AQ_RESULT",
};
/**
@@ -512,30 +676,60 @@ static char *i40e_nvm_update_state_str[] = {
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command
* @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
*
* Dispatches command depending on what update state is current
**/
i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno)
+ u8 *bytes, int *perrno)
{
i40e_status status;
+ enum i40e_nvmupd_cmd upd_cmd;
/* assume success */
- *errno = 0;
+ *perrno = 0;
+
+ /* early check for status command and debug msgs */
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
+ i40e_nvm_update_state_str[upd_cmd],
+ hw->nvmupd_state,
+ hw->aq.nvm_release_on_done);
+
+ if (upd_cmd == I40E_NVMUPD_INVALID) {
+ *perrno = -EFAULT;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_validate_command returns %d errno %d\n",
+ upd_cmd, *perrno);
+ }
+
+ /* a status request returns immediately rather than
+ * going into the state machine
+ */
+ if (upd_cmd == I40E_NVMUPD_STATUS) {
+ bytes[0] = hw->nvmupd_state;
+ return 0;
+ }
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
- status = i40e_nvmupd_state_init(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
break;
case I40E_NVMUPD_STATE_READING:
- status = i40e_nvmupd_state_reading(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
break;
case I40E_NVMUPD_STATE_WRITING:
- status = i40e_nvmupd_state_writing(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ status = I40E_ERR_NOT_READY;
+ *perrno = -EBUSY;
break;
default:
@@ -543,7 +737,7 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: no such state %d\n", hw->nvmupd_state);
status = I40E_NOT_SUPPORTED;
- *errno = -ESRCH;
+ *perrno = -ESRCH;
break;
}
return status;
@@ -554,28 +748,28 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
*
* Process legitimate commands of the Init state and conditionally set next
* state. Reject all other commands.
**/
static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno)
+ u8 *bytes, int *perrno)
{
i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
- upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
switch (upd_cmd) {
case I40E_NVMUPD_READ_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
- *errno = i40e_aq_rc_to_posix(status,
+ *perrno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
- status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
i40e_release_nvm(hw);
}
break;
@@ -583,10 +777,10 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
case I40E_NVMUPD_READ_SNT:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
- *errno = i40e_aq_rc_to_posix(status,
+ *perrno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
- status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
if (status)
i40e_release_nvm(hw);
else
@@ -597,70 +791,83 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
case I40E_NVMUPD_WRITE_ERA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *errno = i40e_aq_rc_to_posix(status,
+ *perrno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
- status = i40e_nvmupd_nvm_erase(hw, cmd, errno);
- if (status)
+ status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
+ if (status) {
i40e_release_nvm(hw);
- else
+ } else {
hw->aq.nvm_release_on_done = true;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
}
break;
case I40E_NVMUPD_WRITE_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *errno = i40e_aq_rc_to_posix(status,
+ *perrno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
- status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
- if (status)
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (status) {
i40e_release_nvm(hw);
- else
+ } else {
hw->aq.nvm_release_on_done = true;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
}
break;
case I40E_NVMUPD_WRITE_SNT:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *errno = i40e_aq_rc_to_posix(status,
+ *perrno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
- status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
if (status)
i40e_release_nvm(hw);
else
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
}
break;
case I40E_NVMUPD_CSUM_SA:
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
- *errno = i40e_aq_rc_to_posix(status,
+ *perrno = i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status);
} else {
status = i40e_update_nvm_checksum(hw);
if (status) {
- *errno = hw->aq.asq_last_status ?
+ *perrno = hw->aq.asq_last_status ?
i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status) :
-EIO;
i40e_release_nvm(hw);
} else {
hw->aq.nvm_release_on_done = true;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
break;
+ case I40E_NVMUPD_EXEC_AQ:
+ status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
+ break;
+
+ case I40E_NVMUPD_GET_AQ_RESULT:
+ status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
+ break;
+
default:
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: bad cmd %s in init state\n",
i40e_nvm_update_state_str[upd_cmd]);
status = I40E_ERR_NVM;
- *errno = -ESRCH;
+ *perrno = -ESRCH;
break;
}
return status;
@@ -671,28 +878,28 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
*
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands.
**/
static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno)
+ u8 *bytes, int *perrno)
{
- i40e_status status;
+ i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
- upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
switch (upd_cmd) {
case I40E_NVMUPD_READ_SA:
case I40E_NVMUPD_READ_CON:
- status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
break;
case I40E_NVMUPD_READ_LCB:
- status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
i40e_release_nvm(hw);
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
break;
@@ -702,7 +909,7 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
"NVMUPD: bad cmd %s in reading state.\n",
i40e_nvm_update_state_str[upd_cmd]);
status = I40E_NOT_SUPPORTED;
- *errno = -ESRCH;
+ *perrno = -ESRCH;
break;
}
return status;
@@ -713,55 +920,68 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
*
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands
**/
static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno)
+ u8 *bytes, int *perrno)
{
- i40e_status status;
+ i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
bool retry_attempt = false;
- upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
retry:
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
- status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (!status)
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
break;
case I40E_NVMUPD_WRITE_LCB:
- status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
- if (!status)
+ status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
+ i40e_aq_rc_to_posix(status,
+ hw->aq.asq_last_status) :
+ -EIO;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ } else {
hw->aq.nvm_release_on_done = true;
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
break;
case I40E_NVMUPD_CSUM_CON:
status = i40e_update_nvm_checksum(hw);
if (status) {
- *errno = hw->aq.asq_last_status ?
+ *perrno = hw->aq.asq_last_status ?
i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status) :
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ } else {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
}
break;
case I40E_NVMUPD_CSUM_LCB:
status = i40e_update_nvm_checksum(hw);
- if (status)
- *errno = hw->aq.asq_last_status ?
+ if (status) {
+ *perrno = hw->aq.asq_last_status ?
i40e_aq_rc_to_posix(status,
hw->aq.asq_last_status) :
-EIO;
- else
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ } else {
hw->aq.nvm_release_on_done = true;
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
break;
default:
@@ -769,7 +989,7 @@ retry:
"NVMUPD: bad cmd %s in writing state.\n",
i40e_nvm_update_state_str[upd_cmd]);
status = I40E_NOT_SUPPORTED;
- *errno = -ESRCH;
+ *perrno = -ESRCH;
break;
}
@@ -812,21 +1032,22 @@ retry:
* i40e_nvmupd_validate_command - Validate given command
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
*
* Return one of the valid command types or I40E_NVMUPD_INVALID
**/
static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- int *errno)
+ int *perrno)
{
enum i40e_nvmupd_cmd upd_cmd;
- u8 transaction;
+ u8 module, transaction;
/* anything that doesn't match a recognized case is an error */
upd_cmd = I40E_NVMUPD_INVALID;
transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
/* limits on data size */
if ((cmd->data_size < 1) ||
@@ -834,7 +1055,7 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_validate_command data_size %d\n",
cmd->data_size);
- *errno = -EFAULT;
+ *perrno = -EFAULT;
return I40E_NVMUPD_INVALID;
}
@@ -853,6 +1074,12 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
case I40E_NVM_SA:
upd_cmd = I40E_NVMUPD_READ_SA;
break;
+ case I40E_NVM_EXEC:
+ if (module == 0xf)
+ upd_cmd = I40E_NVMUPD_STATUS;
+ else if (module == 0)
+ upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
+ break;
}
break;
@@ -882,21 +1109,155 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
case (I40E_NVM_CSUM|I40E_NVM_LCB):
upd_cmd = I40E_NVMUPD_CSUM_LCB;
break;
+ case I40E_NVM_EXEC:
+ if (module == 0)
+ upd_cmd = I40E_NVMUPD_EXEC_AQ;
+ break;
}
break;
}
- i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
- i40e_nvm_update_state_str[upd_cmd],
- hw->nvmupd_state,
- hw->aq.nvm_release_on_done);
- if (upd_cmd == I40E_NVMUPD_INVALID) {
- *errno = -EFAULT;
+ return upd_cmd;
+}
+
+/**
+ * i40e_nvmupd_exec_aq - Run an AQ command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ i40e_status status;
+ struct i40e_aq_desc *aq_desc;
+ u32 buff_size = 0;
+ u8 *buff = NULL;
+ u32 aq_desc_len;
+ u32 aq_data_len;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ memset(&hw->nvm_wb_desc, 0, aq_desc_len);
+
+ /* get the aq descriptor */
+ if (cmd->data_size < aq_desc_len) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_validate_command returns %d errno %d\n",
- upd_cmd, *errno);
+ "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
+ cmd->data_size, aq_desc_len);
+ *perrno = -EINVAL;
+ return I40E_ERR_PARAM;
}
- return upd_cmd;
+ aq_desc = (struct i40e_aq_desc *)bytes;
+
+ /* if data buffer needed, make sure it's ready */
+ aq_data_len = cmd->data_size - aq_desc_len;
+ buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
+ if (buff_size) {
+ if (!hw->nvm_buff.va) {
+ status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
+ hw->aq.asq_buf_size);
+ if (status)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
+ status);
+ }
+
+ if (hw->nvm_buff.va) {
+ buff = hw->nvm_buff.va;
+ memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ }
+ }
+
+ /* and away we go! */
+ status = i40e_asq_send_command(hw, aq_desc, buff,
+ buff_size, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "i40e_nvmupd_exec_aq err %s aq_err %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+ int remainder;
+ u8 *buff;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
+
+ /* check offset range */
+ if (cmd->offset > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
+ __func__, cmd->offset, aq_total_len);
+ *perrno = -EINVAL;
+ return I40E_ERR_PARAM;
+ }
+
+ /* check copylength range */
+ if (cmd->data_size > (aq_total_len - cmd->offset)) {
+ int new_len = aq_total_len - cmd->offset;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, new_len);
+ cmd->data_size = new_len;
+ }
+
+ remainder = cmd->data_size;
+ if (cmd->offset < aq_desc_len) {
+ u32 len = aq_desc_len - cmd->offset;
+
+ len = min(len, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
+ __func__, cmd->offset, cmd->offset + len);
+
+ buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
+ memcpy(bytes, buff, len);
+
+ bytes += len;
+ remainder -= len;
+ buff = hw->nvm_buff.va;
+ } else {
+ buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+ }
+
+ if (remainder > 0) {
+ int start_byte = buff - (u8 *)hw->nvm_buff.va;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
+ __func__, start_byte, start_byte + remainder);
+ memcpy(bytes, buff, remainder);
+ }
+
+ return 0;
}
/**
@@ -904,14 +1265,15 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
*
* cmd structure contains identifiers and data buffer
**/
static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno)
+ u8 *bytes, int *perrno)
{
+ struct i40e_asq_cmd_details cmd_details;
i40e_status status;
u8 module, transaction;
bool last;
@@ -920,8 +1282,11 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
- bytes, last, NULL);
+ bytes, last, &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
@@ -929,7 +1294,7 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_read status %d aq %d\n",
status, hw->aq.asq_last_status);
- *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
return status;
@@ -939,23 +1304,28 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
* i40e_nvmupd_nvm_erase - Erase an NVM module
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
*
* module, offset, data_size and data are in cmd structure
**/
static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- int *errno)
+ int *perrno)
{
i40e_status status = 0;
+ struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
- last, NULL);
+ last, &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
@@ -963,7 +1333,7 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_erase status %d aq %d\n",
status, hw->aq.asq_last_status);
- *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
return status;
@@ -974,15 +1344,16 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
- * @errno: pointer to return error code
+ * @perrno: pointer to return error code
*
* module, offset, data_size and data are in cmd structure
**/
static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno)
+ u8 *bytes, int *perrno)
{
i40e_status status = 0;
+ struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
bool last;
@@ -990,8 +1361,12 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
status = i40e_aq_update_nvm(hw, module, cmd->offset,
- (u16)cmd->data_size, bytes, last, NULL);
+ (u16)cmd->data_size, bytes, last,
+ &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
@@ -999,7 +1374,7 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_write status %d aq %d\n",
status, hw->aq.asq_last_status);
- *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index ad802dd0f67a..5b6feb7edeb1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -35,7 +35,7 @@
#include <linux/highuid.h>
/* get readq/writeq support for 32 bit kernels, use the low-first version */
-#include <asm-generic/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
/* File to be the magic between shared code and
* actual OS primitives
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 7b34f1e660ea..bb9d583e5416 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -58,6 +58,19 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+
+i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
u32 i40e_led_get(struct i40e_hw *hw);
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
@@ -245,7 +258,8 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw);
i40e_status i40e_pf_reset(struct i40e_hw *hw);
void i40e_clear_hw(struct i40e_hw *hw);
void i40e_clear_pxe_mode(struct i40e_hw *hw);
-bool i40e_get_link_status(struct i40e_hw *hw);
+i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+i40e_status i40e_update_link_info(struct i40e_hw *hw);
i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
u32 *max_bw, u32 *min_bw, bool *min_valid,
@@ -308,4 +322,6 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
void *buff, u16 *ret_buff_size,
u8 *ret_next_table, u32 *ret_next_index,
struct i40e_asq_cmd_details *cmd_details);
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ u16 vsi_seid);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index a92b7725dec3..565ca7c835bc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -43,9 +43,8 @@
#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
#define I40E_PTP_1GB_INCVAL 0x2000000000ULL
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 (0x1 << \
- I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
/**
@@ -357,7 +356,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
- if (!(prttsyn_stat & (1 << index)))
+ if (!(prttsyn_stat & BIT(index)))
return;
lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
@@ -619,9 +618,8 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
/* Attempt to register the clock before enabling the hardware. */
pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
- if (IS_ERR(pf->ptp_clock)) {
+ if (IS_ERR(pf->ptp_clock))
return PTR_ERR(pf->ptp_clock);
- }
/* clear the hwtstamp settings here during clock create, instead of
* during regular init, so that we can maintain settings across a
@@ -676,8 +674,8 @@ void i40e_ptp_init(struct i40e_pf *pf)
struct timespec64 ts;
u32 regval;
- dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
- netdev->name);
+ if (pf->hw.debug_mask & I40E_DEBUG_LAN)
+ dev_info(&pf->pdev->dev, "PHC enabled\n");
pf->flags |= I40E_FLAG_PTP;
/* Ensure the clocks are running. */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 522d6df51330..dc0402fe3370 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -873,6 +873,13 @@
#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT)
#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
@@ -3366,4 +3373,1933 @@
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#endif
+
+#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
+#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0
+#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT)
+#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */
+#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2
+#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4
+#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8
+#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT)
+#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */
+#define I40E_MNGSB_FDS_START_BC_SHIFT 0
+#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT)
+#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16
+#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT)
+
+#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT)
+#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT)
+
+#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+
+#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT)
+#define I40E_GL_FWSTS_FWROWD_SHIFT 8
+#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT)
+#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT)
+#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT)
+#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT)
+#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT)
+#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT)
+#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */
+#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0
+#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT)
+#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT)
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QBASE_MAX_INDEX 127
+#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0
+#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11
+#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT)
+#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
+#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */
+#define I40E_GLNVM_AL_REQ_POR_SHIFT 0
+#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT)
+#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2
+#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT)
+#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3
+#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT)
+#define I40E_GLNVM_AL_REQ_PE_SHIFT 4
+#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT)
+#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1
+#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8
+#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9
+#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT)
+#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10
+#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT)
+#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2
+#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT)
+#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3
+#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5
+#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6
+#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7
+#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT)
+#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8
+#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10
+#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT)
+#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */
+#define I40E_MNGSB_DADD_ADDR_SHIFT 0
+#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT)
+#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */
+#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0
+#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT)
+#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */
+#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0
+#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT)
+#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8
+#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT)
+#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26
+#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30
+#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT)
+#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31
+#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT)
+#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */
+#define I40E_MNGSB_RDATA_DATA_SHIFT 0
+#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT)
+#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */
+#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0
+#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT)
+#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8
+#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT)
+#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16
+#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT)
+#define I40E_MNGSB_RHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT)
+#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27
+#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT)
+#define I40E_MNGSB_RHDR0_EH_SHIFT 31
+#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT)
+#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26
+#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31
+#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT)
+#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */
+#define I40E_MNGSB_WDATA_DATA_SHIFT 0
+#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT)
+#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */
+#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0
+#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT)
+#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12
+#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT)
+#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */
+#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0
+#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT)
+#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */
+#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0
+#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT)
+
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT)
+
+#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT)
+
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT)
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */
+#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13
+#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT)
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT)
+#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT)
+#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */
+#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
+#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT)
+#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */
+#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT)
+/* Redefined for X722 family */
+#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_X722_PFQF_HLUT_MAX_INDEX 127
+#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT)
+#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HKEY_MAX_INDEX 12
+#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT)
+#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HLUT_MAX_INDEX 15
+#define I40E_VSIQF_HLUT_LUT0_SHIFT 0
+#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT)
+#define I40E_VSIQF_HLUT_LUT1_SHIFT 8
+#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT)
+#define I40E_VSIQF_HLUT_LUT2_SHIFT 16
+#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT)
+#define I40E_VSIQF_HLUT_LUT3_SHIFT 24
+#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT)
+#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT)
+#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9a4f2bc70cd2..635b3ac17877 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -464,11 +464,12 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
- if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+ if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+ pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
- rx_desc->wb.qword0.hi_dword.fd_id);
+ pf->fd_inv);
/* Check if the programming error is for ATR.
* If so, auto disable ATR and set a state for
@@ -509,8 +510,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
dev_info(&pdev->dev,
"FD filter programming failed due to incorrect filter parameters\n");
}
- } else if (error ==
- (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+ } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
rx_desc->wb.qword0.hi_dword.fd_id);
@@ -602,27 +602,13 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
}
/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
- return le32_to_cpu(*(volatile __le32 *)head);
-}
-
-/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
*
* Since there is no access to the ring head register
* in XL710, we need to use our local copies
**/
-static u32 i40e_get_tx_pending(struct i40e_ring *ring)
+u32 i40e_get_tx_pending(struct i40e_ring *ring)
{
u32 head, tail;
@@ -636,50 +622,6 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
return 0;
}
-/**
- * i40e_check_tx_hang - Is there a hang in the Tx queue
- * @tx_ring: the ring of descriptors
- **/
-static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
-{
- u32 tx_done = tx_ring->stats.packets;
- u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
- u32 tx_pending = i40e_get_tx_pending(tx_ring);
- struct i40e_pf *pf = tx_ring->vsi->back;
- bool ret = false;
-
- clear_check_for_tx_hang(tx_ring);
-
- /* Check for a hung queue, but be thorough. This verifies
- * that a transmit has been completed since the previous
- * check AND there is at least one packet pending. The
- * ARMED bit is set to indicate a potential hang. The
- * bit is cleared if a pause frame is received to remove
- * false hang detection due to PFC or 802.3x frames. By
- * requiring this to fail twice we avoid races with
- * PFC clearing the ARMED bit and conditions where we
- * run the check_tx_hang logic with a transmit completion
- * pending but without time to complete it yet.
- */
- if ((tx_done_old == tx_done) && tx_pending) {
- /* make sure it is true for two checks in a row */
- ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
- &tx_ring->state);
- } else if (tx_done_old == tx_done &&
- (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
- if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
- dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
- tx_pending, tx_ring->queue_index);
- pf->tx_sluggish_count++;
- } else {
- /* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_done;
- clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
- }
-
- return ret;
-}
-
#define WB_STRIDE 0x3
/**
@@ -785,42 +727,21 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
- /* check to see if there are any non-cache aligned descriptors
- * waiting to be written back, and kick the hardware to force
- * them to be written back in case of napi polling
- */
- if (budget &&
- !((i & WB_STRIDE) == WB_STRIDE) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
- (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
- tx_ring->arm_wb = true;
- else
- tx_ring->arm_wb = false;
-
- if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
- /* schedule immediate reset if we believe we hung */
- dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
- " VSI <%d>\n"
- " Tx Queue <%d>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n",
- tx_ring->vsi->seid,
- tx_ring->queue_index,
- tx_ring->next_to_use, i);
-
- netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
-
- dev_info(tx_ring->dev,
- "tx hang detected on queue %d, reset requested\n",
- tx_ring->queue_index);
-
- /* do not fire the reset immediately, wait for the stack to
- * decide we are truly stuck, also prevents every queue from
- * simultaneously requesting a reset
+ if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+ unsigned int j = 0;
+
+ /* check to see if there are < 4 descriptors
+ * waiting to be written back, then kick the hardware to force
+ * them to be written back in case we stay in NAPI.
+ * In this mode on X722 we do not enable Interrupt.
*/
+ j = i40e_get_tx_pending(tx_ring);
- /* the adapter is about to reset, no point in enabling polling */
- budget = 1;
+ if (budget &&
+ ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
+ !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+ tx_ring->arm_wb = true;
}
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
@@ -852,23 +773,50 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
* @q_vector: the vector on which to force writeback
*
**/
-static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
- u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
- I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
- I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
- /* allow 00 to be written to the index */
-
- wr32(&vsi->back->hw,
- I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
- val);
+ u16 flags = q_vector->tx.ring[0].flags;
+
+ if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+ u32 val;
+
+ if (q_vector->arm_wb_state)
+ return;
+
+ val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
+
+ wr32(&vsi->back->hw,
+ I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+ vsi->base_vector - 1),
+ val);
+ q_vector->arm_wb_state = true;
+ } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
+ /* allow 00 to be written to the index */
+
+ wr32(&vsi->back->hw,
+ I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+ vsi->base_vector - 1), val);
+ } else {
+ u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
+ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
+ /* allow 00 to be written to the index */
+
+ wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
+ }
}
/**
* i40e_set_new_dynamic_itr - Find new ITR level
* @rc: structure containing ring performance data
*
+ * Returns true if ITR changed, false if not
+ *
* Stores a new ITR value based on packets and byte counts during
* the last interrupt. The advantage of per interrupt computation
* is faster updates and more accurate ITR for the current traffic
@@ -877,22 +825,33 @@ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
* testing data as well as attempting to minimize response time
* while increasing bulk throughput.
**/
-static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
{
enum i40e_latency_range new_latency_range = rc->latency_range;
+ struct i40e_q_vector *qv = rc->ring->q_vector;
u32 new_itr = rc->itr;
int bytes_per_int;
+ int usecs;
if (rc->total_packets == 0 || !rc->itr)
- return;
+ return false;
/* simple throttlerate management
- * 0-10MB/s lowest (100000 ints/s)
+ * 0-10MB/s lowest (50000 ints/s)
* 10-20MB/s low (20000 ints/s)
- * 20-1249MB/s bulk (8000 ints/s)
+ * 20-1249MB/s bulk (18000 ints/s)
+ * > 40000 Rx packets per second (8000 ints/s)
+ *
+ * The math works out because the divisor is in 10^(-6) which
+ * turns the bytes/us input value into MB/s values, but
+ * make sure to use usecs, as the register values written
+ * are in 2 usec increments in the ITR registers, and make sure
+ * to use the smoothed values that the countdown timer gives us.
*/
- bytes_per_int = rc->total_bytes / rc->itr;
- switch (rc->itr) {
+ usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
+ bytes_per_int = rc->total_bytes / usecs;
+
+ switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10)
new_latency_range = I40E_LOW_LATENCY;
@@ -904,58 +863,52 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
new_latency_range = I40E_LOWEST_LATENCY;
break;
case I40E_BULK_LATENCY:
+ case I40E_ULTRA_LATENCY:
+ default:
if (bytes_per_int <= 20)
- rc->latency_range = I40E_LOW_LATENCY;
+ new_latency_range = I40E_LOW_LATENCY;
break;
}
+ /* this is to adjust RX more aggressively when streaming small
+ * packets. The value of 40000 was picked as it is just beyond
+ * what the hardware can receive per second if in low latency
+ * mode.
+ */
+#define RX_ULTRA_PACKET_RATE 40000
+
+ if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
+ (&qv->rx == rc))
+ new_latency_range = I40E_ULTRA_LATENCY;
+
+ rc->latency_range = new_latency_range;
+
switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
- new_itr = I40E_ITR_100K;
+ new_itr = I40E_ITR_50K;
break;
case I40E_LOW_LATENCY:
new_itr = I40E_ITR_20K;
break;
case I40E_BULK_LATENCY:
+ new_itr = I40E_ITR_18K;
+ break;
+ case I40E_ULTRA_LATENCY:
new_itr = I40E_ITR_8K;
break;
default:
break;
}
- if (new_itr != rc->itr) {
- /* do an exponential smoothing */
- new_itr = (10 * new_itr * rc->itr) /
- ((9 * new_itr) + rc->itr);
- rc->itr = new_itr & I40E_MAX_ITR;
- }
-
rc->total_bytes = 0;
rc->total_packets = 0;
-}
-/**
- * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
- * @q_vector: the vector to adjust
- **/
-static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
-{
- u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
- struct i40e_hw *hw = &q_vector->vsi->back->hw;
- u32 reg_addr;
- u16 old_itr;
-
- reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
- old_itr = q_vector->rx.itr;
- i40e_set_new_dynamic_itr(&q_vector->rx);
- if (old_itr != q_vector->rx.itr)
- wr32(hw, reg_addr, q_vector->rx.itr);
-
- reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
- old_itr = q_vector->tx.itr;
- i40e_set_new_dynamic_itr(&q_vector->tx);
- if (old_itr != q_vector->tx.itr)
- wr32(hw, reg_addr, q_vector->tx.itr);
+ if (new_itr != rc->itr) {
+ rc->itr = new_itr;
+ return true;
+ }
+
+ return false;
}
/**
@@ -1001,6 +954,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
if (!dev)
return -ENOMEM;
+ /* warn if we are about to overwrite the pointer */
+ WARN_ON(tx_ring->tx_bi);
bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
if (!tx_ring->tx_bi)
@@ -1161,6 +1116,8 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
struct device *dev = rx_ring->dev;
int bi_size;
+ /* warn if we are about to overwrite the pointer */
+ WARN_ON(rx_ring->rx_bi);
bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
if (!rx_ring->rx_bi)
@@ -1341,16 +1298,11 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
struct sk_buff *skb, u16 vlan_tag)
{
struct i40e_q_vector *q_vector = rx_ring->q_vector;
- struct i40e_vsi *vsi = rx_ring->vsi;
- u64 flags = vsi->back->flags;
if (vlan_tag & VLAN_VID_MASK)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- if (flags & I40E_FLAG_IN_NETPOLL)
- netif_rx(skb);
- else
- napi_gro_receive(&q_vector->napi, skb);
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
@@ -1386,7 +1338,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
return;
/* did the hardware decode the packet and checksum? */
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
/* both known and outer_ip must be set for the below code to work */
@@ -1401,25 +1353,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
ipv6 = true;
if (ipv4 &&
- (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+ (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
goto checksum_fail;
/* likely incorrect csum if alternate IP extension headers found */
if (ipv6 &&
- rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
/* don't increment checksum err here, non-fatal err */
return;
/* there was some L4 error, count error and punt packet to the stack */
- if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+ if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
goto checksum_fail;
/* handle packets that were not able to be checksummed due
* to arrival speed, in this case the stack can compute
* the csum.
*/
- if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+ if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
/* If VXLAN traffic has an outer UDPv4 checksum we need to check
@@ -1428,7 +1380,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
* so the total length of IPv4 header is IHL*4 bytes
* The UDP_0 bit *may* bet set if the *inner* header is UDP
*/
- if (ipv4_tunnel) {
+ if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
+ (ipv4_tunnel)) {
skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4);
@@ -1516,7 +1469,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- const int current_node = numa_node_id();
+ const int current_node = numa_mem_id();
struct i40e_vsi *vsi = rx_ring->vsi;
u16 i = rx_ring->next_to_clean;
union i40e_rx_desc *rx_desc;
@@ -1543,7 +1496,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
@@ -1584,8 +1537,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
I40E_RXD_QW1_ERROR_SHIFT;
- rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
- rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1594,6 +1547,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
cleaned_count++;
if (rx_hbo || rx_sph) {
int len;
+
if (rx_hbo)
len = I40E_RX_HDR_SIZE;
else
@@ -1637,7 +1591,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
I40E_RX_INCREMENT(rx_ring, i);
if (unlikely(
- !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
struct i40e_rx_buffer *next_buffer;
next_buffer = &rx_ring->rx_bi[i];
@@ -1647,7 +1601,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
}
/* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
continue;
}
@@ -1669,7 +1623,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
- vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
#ifdef I40E_FCOE
@@ -1730,7 +1684,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
@@ -1753,7 +1707,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
I40E_RXD_QW1_ERROR_SHIFT;
- rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1771,17 +1725,14 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
I40E_RX_INCREMENT(rx_ring, i);
if (unlikely(
- !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
rx_ring->rx_stats.non_eop_descs++;
continue;
}
/* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
- /* TODO: shouldn't we increment a counter indicating the
- * drop?
- */
continue;
}
@@ -1802,7 +1753,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
- vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
#ifdef I40E_FCOE
@@ -1826,6 +1777,96 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
return total_rx_packets;
}
+static u32 i40e_buildreg_itr(const int type, const u16 itr)
+{
+ u32 val;
+
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+ (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+
+ return val;
+}
+
+/* a small macro to shorten up some long lines */
+#define INTREG I40E_PFINT_DYN_CTLN
+
+/**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+ struct i40e_q_vector *q_vector)
+{
+ struct i40e_hw *hw = &vsi->back->hw;
+ bool rx = false, tx = false;
+ u32 rxval, txval;
+ int vector;
+
+ vector = (q_vector->v_idx + vsi->base_vector);
+
+ /* avoid dynamic calculation if in countdown mode OR if
+ * all dynamic is disabled
+ */
+ rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+
+ if (q_vector->itr_countdown > 0 ||
+ (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
+ !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+ goto enable_int;
+ }
+
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+ rx = i40e_set_new_dynamic_itr(&q_vector->rx);
+ rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
+ }
+
+ if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+ tx = i40e_set_new_dynamic_itr(&q_vector->tx);
+ txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
+ }
+
+ if (rx || tx) {
+ /* get the higher of the two ITR adjustments and
+ * use the same value for both ITR registers
+ * when in adaptive mode (Rx and/or Tx)
+ */
+ u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
+
+ q_vector->tx.itr = q_vector->rx.itr = itr;
+ txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
+ tx = true;
+ rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
+ rx = true;
+ }
+
+ /* only need to enable the interrupt once, but need
+ * to possibly update both ITR values
+ */
+ if (rx) {
+ /* set the INTENA_MSK_MASK so that this first write
+ * won't actually enable the interrupt, instead just
+ * updating the ITR (it's bit 31 PF and VF)
+ */
+ rxval |= BIT(31);
+ /* don't check _DOWN because interrupt isn't being enabled */
+ wr32(hw, INTREG(vector - 1), rxval);
+ }
+
+enable_int:
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ wr32(hw, INTREG(vector - 1), txval);
+
+ if (q_vector->itr_countdown)
+ q_vector->itr_countdown--;
+ else
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+
+}
+
/**
* i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
* @napi: napi struct with our devices info in it
@@ -1844,7 +1885,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
bool clean_complete = true;
bool arm_wb = false;
int budget_per_ring;
- int cleaned;
+ int work_done = 0;
if (test_bit(__I40E_DOWN, &vsi->state)) {
napi_complete(napi);
@@ -1857,58 +1898,62 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
i40e_for_each_ring(ring, q_vector->tx) {
clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
arm_wb |= ring->arm_wb;
+ ring->arm_wb = false;
}
+ /* Handle case where we are called by netpoll with a budget of 0 */
+ if (budget <= 0)
+ goto tx_only;
+
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
*/
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
i40e_for_each_ring(ring, q_vector->rx) {
+ int cleaned;
+
if (ring_is_ps_enabled(ring))
cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
else
cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+
+ work_done += cleaned;
/* if we didn't clean as many as budgeted, we must be done */
clean_complete &= (budget_per_ring != cleaned);
}
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
+tx_only:
if (arm_wb)
i40e_force_wb(vsi, q_vector);
return budget;
}
+ if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+ q_vector->arm_wb_state = false;
+
/* Work is done so exit the polling mode and re-enable the interrupt */
- napi_complete(napi);
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
- ITR_IS_DYNAMIC(vsi->tx_itr_setting))
- i40e_update_dynamic_itr(q_vector);
-
- if (!test_bit(__I40E_DOWN, &vsi->state)) {
- if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
- i40e_irq_dynamic_enable(vsi,
- q_vector->v_idx + vsi->base_vector);
- } else {
- struct i40e_hw *hw = &vsi->back->hw;
- /* We re-enable the queue 0 cause, but
- * don't worry about dynamic_enable
- * because we left it on for the other
- * possible interrupts during napi
- */
- u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
- qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
- wr32(hw, I40E_QINT_RQCTL(0), qval);
-
- qval = rd32(hw, I40E_QINT_TQCTL(0));
- qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
- wr32(hw, I40E_QINT_TQCTL(0), qval);
-
- i40e_irq_dynamic_enable_icr0(vsi->back);
- }
+ napi_complete_done(napi, work_done);
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ i40e_update_enable_itr(vsi, q_vector);
+ } else { /* Legacy mode */
+ struct i40e_hw *hw = &vsi->back->hw;
+ /* We re-enable the queue 0 cause, but
+ * don't worry about dynamic_enable
+ * because we left it on for the other
+ * possible interrupts during napi
+ */
+ u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+
+ wr32(hw, I40E_QINT_RQCTL(0), qval);
+ qval = rd32(hw, I40E_QINT_TQCTL(0)) |
+ I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_TQCTL(0), qval);
+ i40e_irq_dynamic_enable_icr0(vsi->back);
}
-
return 0;
}
@@ -1982,6 +2027,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
/* Due to lack of space, no more new filters can be programmed */
if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
return;
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
+ /* HW ATR eviction will take care of removing filters on FIN
+ * and RST packets.
+ */
+ if (th->fin || th->rst)
+ return;
+ }
tx_ring->atr_count++;
@@ -2037,6 +2089,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
+ dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
+
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
fdir_desc->rsvd = cpu_to_le32(0);
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
@@ -2088,6 +2143,7 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
/* else if it is a SW VLAN, check the next protocol and store the tag */
} else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr;
+
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
if (!vhdr)
return -EINVAL;
@@ -2131,6 +2187,7 @@ out:
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
* @hdr_len: ptr to the size of the packet header
+ * @cd_type_cmd_tso_mss: ptr to u64 object
* @cd_tunneling: ptr to context descriptor bits
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
@@ -2190,6 +2247,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
* @tx_flags: the collected send information
+ * @cd_type_cmd_tso_mss: ptr to u64 object
*
* Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
**/
@@ -2232,6 +2290,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
* @tx_flags: pointer to Tx flags currently set
* @td_cmd: Tx descriptor command bits to set
* @td_offset: Tx descriptor header offsets to set
+ * @tx_ring: Tx descriptor ring
* @cd_tunneling: ptr to context desc bits
**/
static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
@@ -2244,14 +2303,21 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
struct iphdr *this_ip_hdr;
u32 network_hdr_len;
u8 l4_hdr = 0;
+ struct udphdr *oudph;
+ struct iphdr *oiph;
u32 l4_tunnel = 0;
if (skb->encapsulation) {
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
+ oudph = udp_hdr(skb);
+ oiph = ip_hdr(skb);
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
+ case IPPROTO_GRE:
+ l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
+ break;
default:
return;
}
@@ -2285,6 +2351,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
*tx_flags &= ~I40E_TX_FLAGS_IPV4;
*tx_flags |= I40E_TX_FLAGS_IPV6;
}
+ if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
+ (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
+ (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
+ oudph->check = ~csum_tcpudp_magic(oiph->saddr,
+ oiph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, 0);
+ *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+ }
} else {
network_hdr_len = skb_network_header_len(skb);
this_ip_hdr = ip_hdr(skb);
@@ -2500,6 +2575,9 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 td_tag = 0;
dma_addr_t dma;
u16 gso_segs;
+ u16 desc_count = 0;
+ bool tail_bump = true;
+ bool do_rs = false;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -2540,6 +2618,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++;
i++;
+ desc_count++;
+
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
i = 0;
@@ -2559,6 +2639,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++;
i++;
+ desc_count++;
+
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
i = 0;
@@ -2573,34 +2655,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i];
}
- /* Place RS bit on last descriptor of any packet that spans across the
- * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
- */
- if (((i & WB_STRIDE) != WB_STRIDE) &&
- (first <= &tx_ring->tx_bi[i]) &&
- (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
- tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
- I40E_TXD_QW1_CMD_SHIFT);
- } else {
- tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)I40E_TXD_CMD <<
- I40E_TXD_QW1_CMD_SHIFT);
- }
-
- netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
- first->bytecount);
-
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
-
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
@@ -2610,12 +2664,71 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
+ netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ /* Algorithm to optimize tail and RS bit setting:
+ * if xmit_more is supported
+ * if xmit_more is true
+ * do not update tail and do not mark RS bit.
+ * if xmit_more is false and last xmit_more was false
+ * if every packet spanned less than 4 desc
+ * then set RS bit on 4th packet and update tail
+ * on every packet
+ * else
+ * update tail and set RS bit on every packet.
+ * if xmit_more is false and last_xmit_more was true
+ * update tail and set RS bit.
+ *
+ * Optimization: wmb to be issued only in case of tail update.
+ * Also optimize the Descriptor WB path for RS bit with the same
+ * algorithm.
+ *
+ * Note: If there are less than 4 packets
+ * pending and interrupts were disabled the service task will
+ * trigger a force WB.
+ */
+ if (skb->xmit_more &&
+ !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index))) {
+ tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
+ tail_bump = false;
+ } else if (!skb->xmit_more &&
+ !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index)) &&
+ (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
+ (tx_ring->packet_stride < WB_STRIDE) &&
+ (desc_count < WB_STRIDE)) {
+ tx_ring->packet_stride++;
+ } else {
+ tx_ring->packet_stride = 0;
+ tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
+ do_rs = true;
+ }
+ if (do_rs)
+ tx_ring->packet_stride = 0;
+
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
+ I40E_TX_DESC_CMD_EOP) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+
/* notify HW of packet */
- if (!skb->xmit_more ||
- netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index)))
+ if (!tail_bump)
+ prefetchw(tx_desc + 1);
+
+ if (tail_bump) {
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
writel(i, tx_ring->tail);
+ }
return;
@@ -2693,6 +2806,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
u8 hdr_len = 0;
int tsyn;
int tso;
+
if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY;
@@ -2725,10 +2839,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN;
- if (i40e_chk_linearize(skb, tx_flags))
+ if (i40e_chk_linearize(skb, tx_flags)) {
if (skb_linearize(skb))
goto out_drop;
-
+ tx_ring->tx_stats.tx_linearize++;
+ }
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 0dc48dc9ca61..6779fb771d6a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -32,11 +32,14 @@
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
#define I40E_ITR_100K 0x0005
+#define I40E_ITR_50K 0x000A
#define I40E_ITR_20K 0x0019
+#define I40E_ITR_18K 0x001B
#define I40E_ITR_8K 0x003E
#define I40E_ITR_4K 0x007A
-#define I40E_ITR_RX_DEF I40E_ITR_8K
-#define I40E_ITR_TX_DEF I40E_ITR_4K
+#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
+#define I40E_ITR_RX_DEF I40E_ITR_20K
+#define I40E_ITR_TX_DEF I40E_ITR_20K
#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
@@ -44,6 +47,15 @@
#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
+ * the value of the rate limit is non-zero
+ */
+#define INTRL_ENA BIT(6)
+#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
+#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
+#define I40E_INTRL_8K 125 /* 8000 ints/sec */
+#define I40E_INTRL_62K 16 /* 62500 ints/sec */
+#define I40E_INTRL_83K 12 /* 83333 ints/sec */
#define I40E_QUEUE_END_OF_LIST 0x7FF
@@ -66,17 +78,29 @@ enum i40e_dyn_idx_t {
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
- ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hena(pf) \
+ (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
+ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
/* Supported Rx Buffer Sizes */
#define I40E_RXBUFFER_512 512 /* Used for packet split */
@@ -129,17 +153,17 @@ enum i40e_dyn_idx_t {
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
-#define I40E_TX_FLAGS_CSUM (u32)(1)
-#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
-#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
-#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
-#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
-#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
-#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
-#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
-#define I40E_TX_FLAGS_TSYN (u32)(1 << 8)
-#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
+#define I40E_TX_FLAGS_CSUM BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN BIT(2)
+#define I40E_TX_FLAGS_TSO BIT(3)
+#define I40E_TX_FLAGS_IPV4 BIT(4)
+#define I40E_TX_FLAGS_IPV6 BIT(5)
+#define I40E_TX_FLAGS_FCCRC BIT(6)
+#define I40E_TX_FLAGS_FSO BIT(7)
+#define I40E_TX_FLAGS_TSYN BIT(8)
+#define I40E_TX_FLAGS_FD_SB BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -153,6 +177,7 @@ struct i40e_tx_buffer {
};
unsigned int bytecount;
unsigned short gso_segs;
+
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
@@ -176,6 +201,7 @@ struct i40e_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
u64 tx_done_old;
+ u64 tx_linearize;
};
struct i40e_rx_queue_stats {
@@ -187,8 +213,6 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
- __I40E_TX_DETECT_HANG,
- __I40E_HANG_CHECK_ARMED,
__I40E_RX_PS_ENABLED,
__I40E_RX_16BYTE_DESC_ENABLED,
};
@@ -199,12 +223,6 @@ enum i40e_ring_state_t {
set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
#define clear_ring_ps_enabled(ring) \
clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define check_for_tx_hang(ring) \
- test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define set_check_for_tx_hang(ring) \
- set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define clear_check_for_tx_hang(ring) \
- clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define ring_is_16byte_desc_enabled(ring) \
test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
#define set_ring_16byte_desc_enabled(ring) \
@@ -252,6 +270,12 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
+ u8 packet_stride;
+
+ u16 flags;
+#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1)
+#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
/* stats structs */
struct i40e_queue_stats stats;
@@ -274,6 +298,7 @@ enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1,
I40E_BULK_LATENCY = 2,
+ I40E_ULTRA_LATENCY = 3,
};
struct i40e_ring_container {
@@ -310,4 +335,20 @@ int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring);
int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, u32 *flags);
#endif
+void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
+u32 i40e_get_tx_pending(struct i40e_ring *ring);
+
+/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 9a5a75b1e2bc..dd2da356d9a1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -33,24 +33,7 @@
#include "i40e_adminq.h"
#include "i40e_hmc.h"
#include "i40e_lan_hmc.h"
-
-/* Device IDs */
-#define I40E_DEV_ID_SFP_XL710 0x1572
-#define I40E_DEV_ID_QEMU 0x1574
-#define I40E_DEV_ID_KX_A 0x157F
-#define I40E_DEV_ID_KX_B 0x1580
-#define I40E_DEV_ID_KX_C 0x1581
-#define I40E_DEV_ID_QSFP_A 0x1583
-#define I40E_DEV_ID_QSFP_B 0x1584
-#define I40E_DEV_ID_QSFP_C 0x1585
-#define I40E_DEV_ID_10G_BASE_T 0x1586
-#define I40E_DEV_ID_20G_KR2 0x1587
-#define I40E_DEV_ID_VF 0x154C
-#define I40E_DEV_ID_VF_HV 0x1571
-
-#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
- (d) == I40E_DEV_ID_QSFP_B || \
- (d) == I40E_DEV_ID_QSFP_C)
+#include "i40e_devids.h"
/* I40E_MASK is a macro used on 32 bit registers */
#define I40E_MASK(mask, shift) (mask << shift)
@@ -120,6 +103,8 @@ enum i40e_mac_type {
I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
+ I40E_MAC_X722,
+ I40E_MAC_X722_VF,
I40E_MAC_GENERIC,
};
@@ -151,14 +136,14 @@ enum i40e_set_fc_aq_failures {
};
enum i40e_vsi_type {
- I40E_VSI_MAIN = 0,
- I40E_VSI_VMDQ1,
- I40E_VSI_VMDQ2,
- I40E_VSI_CTRL,
- I40E_VSI_FCOE,
- I40E_VSI_MIRROR,
- I40E_VSI_SRIOV,
- I40E_VSI_FDIR,
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1 = 1,
+ I40E_VSI_VMDQ2 = 2,
+ I40E_VSI_CTRL = 3,
+ I40E_VSI_FCOE = 4,
+ I40E_VSI_MIRROR = 5,
+ I40E_VSI_SRIOV = 6,
+ I40E_VSI_FDIR = 7,
I40E_VSI_TYPE_UNKNOWN
};
@@ -182,16 +167,65 @@ struct i40e_link_status {
bool crc_enable;
u8 pacing;
u8 requested_speeds;
+ u8 module_type[3];
+ /* 1st byte: module identifier */
+#define I40E_MODULE_TYPE_SFP 0x03
+#define I40E_MODULE_TYPE_QSFP 0x0D
+ /* 2nd byte: ethernet compliance codes for 10/40G */
+#define I40E_MODULE_TYPE_40G_ACTIVE 0x01
+#define I40E_MODULE_TYPE_40G_LR4 0x02
+#define I40E_MODULE_TYPE_40G_SR4 0x04
+#define I40E_MODULE_TYPE_40G_CR4 0x08
+#define I40E_MODULE_TYPE_10G_BASE_SR 0x10
+#define I40E_MODULE_TYPE_10G_BASE_LR 0x20
+#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40
+#define I40E_MODULE_TYPE_10G_BASE_ER 0x80
+ /* 3rd byte: ethernet compliance codes for 1G */
+#define I40E_MODULE_TYPE_1000BASE_SX 0x01
+#define I40E_MODULE_TYPE_1000BASE_LX 0x02
+#define I40E_MODULE_TYPE_1000BASE_CX 0x04
+#define I40E_MODULE_TYPE_1000BASE_T 0x08
+};
+
+enum i40e_aq_capabilities_phy_type {
+ I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
+ I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
+ I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
+ I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
+ I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
+ I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
+ I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
+ I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
+ I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
+ I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
+ I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
+ I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
+ I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
+ I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
+ I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
+ I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
+ I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
+ I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
+ I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
+ I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
+ I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
+ I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
+ I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
+ I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL =
+ BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
+ I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
};
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
- u32 autoneg_advertised;
- u32 phy_id;
- u32 module_type;
bool get_link_info;
enum i40e_media_type media_type;
+ /* all the phy types the NVM is capable of */
+ enum i40e_aq_capabilities_phy_type phy_types;
};
#define I40E_HW_CAP_MAX_GPIO 30
@@ -213,7 +247,17 @@ struct i40e_hw_capabilities {
bool dcb;
bool fcoe;
bool iscsi; /* Indicates iSCSI enabled */
- bool mfp_mode_1;
+ bool flex10_enable;
+ bool flex10_capable;
+ u32 flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN 0x0
+#define I40E_FLEX10_MODE_DCC 0x1
+#define I40E_FLEX10_MODE_DCI 0x2
+
+ u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
+#define I40E_FLEX10_STATUS_VC_MODE 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -270,6 +314,7 @@ struct i40e_nvm_info {
bool blank_nvm_mode; /* is NVM empty (no FW present)*/
u16 version; /* NVM package version */
u32 eetrack; /* NVM data version */
+ u32 oem_ver; /* OEM version info */
};
/* definitions used in NVM update support */
@@ -288,12 +333,17 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_CSUM_CON,
I40E_NVMUPD_CSUM_SA,
I40E_NVMUPD_CSUM_LCB,
+ I40E_NVMUPD_STATUS,
+ I40E_NVMUPD_EXEC_AQ,
+ I40E_NVMUPD_GET_AQ_RESULT,
};
enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_INIT,
I40E_NVMUPD_STATE_READING,
- I40E_NVMUPD_STATE_WRITING
+ I40E_NVMUPD_STATE_WRITING,
+ I40E_NVMUPD_STATE_INIT_WAIT,
+ I40E_NVMUPD_STATE_WRITE_WAIT,
};
/* nvm_access definition and its masks/shifts need to be accessible to
@@ -312,6 +362,7 @@ enum i40e_nvmupd_state {
#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
#define I40E_NVM_ERA 0x4
#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT)
@@ -392,6 +443,8 @@ struct i40e_fc_info {
#define I40E_APP_PROTOID_FIP 0x8914
#define I40E_APP_SEL_ETHTYPE 0x1
#define I40E_APP_SEL_TCPIP 0x2
+#define I40E_CEE_APP_SEL_ETHTYPE 0x0
+#define I40E_CEE_APP_SEL_TCPIP 0x1
/* CEE or IEEE 802.1Qaz ETS Configuration data */
struct i40e_dcb_ets_config {
@@ -422,7 +475,10 @@ struct i40e_dcbx_config {
u8 dcbx_mode;
#define I40E_DCBX_MODE_CEE 0x1
#define I40E_DCBX_MODE_IEEE 0x2
+ u8 app_mode;
+#define I40E_DCBX_APPS_NON_WILLING 0x1
u32 numapps;
+ u32 tlv_status; /* CEE mode TLV status */
struct i40e_dcb_ets_config etscfg;
struct i40e_dcb_ets_config etsrec;
struct i40e_dcb_pfc_config pfc;
@@ -474,6 +530,8 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
+ struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_virt_mem nvm_buff;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@@ -482,16 +540,22 @@ struct i40e_hw {
u16 dcbx_status;
/* DCBX info */
- struct i40e_dcbx_config local_dcbx_config;
- struct i40e_dcbx_config remote_dcbx_config;
+ struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+ struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
+ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
+
+#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+ u64 flags;
/* debug mask */
u32 debug_mask;
+ char err_str[16];
};
static inline bool i40e_is_vf(struct i40e_hw *hw)
{
- return hw->mac.type == I40E_MAC_VF;
+ return (hw->mac.type == I40E_MAC_VF ||
+ hw->mac.type == I40E_MAC_X722_VF);
}
struct i40e_driver_version {
@@ -588,19 +652,23 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
- I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
+ /* Note: Bit 8 is reserved in X710 and XL710 */
+ I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ /* Note: For non-tunnel packets INT_UDP_0 is the right status for
+ * UDP header
+ */
+ I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
#define I40E_RXD_QW1_STATUS_SHIFT 0
-#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
<< I40E_RXD_QW1_STATUS_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -608,8 +676,8 @@ enum i40e_rx_desc_status_bits {
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
- I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+ BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
enum i40e_rx_desc_fltstat_values {
I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
@@ -743,8 +811,7 @@ enum i40e_rx_ptype_payload_layer {
I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
- I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
enum i40e_rx_desc_ext_status_bits {
/* Note: These are predefined bit offsets */
@@ -920,12 +987,12 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
-#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
- I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
+ BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
@@ -937,6 +1004,8 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
struct i40e_filter_program_desc {
__le32 qindex_flex_ptype_vsi;
__le32 rsvd;
@@ -955,15 +1024,24 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Values 0-30 are reserved for future use */
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- /* Note: Value 32 is reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-40 are reserved for future use */
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
@@ -1009,14 +1087,17 @@ enum i40e_filter_program_desc_pcmd {
#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
- I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+
#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
@@ -1069,6 +1150,14 @@ struct i40e_eth_stats {
u64 tx_errors; /* tepc */
};
+/* Statistics collected per VEB per TC */
+struct i40e_veb_tc_stats {
+ u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
+};
+
#ifdef I40E_FCOE
/* Statistics collected per function for FCoE */
struct i40e_fcoe_stats {
@@ -1134,6 +1223,8 @@ struct i40e_hw_port_stats {
u64 fd_atr_match;
u64 fd_sb_match;
u64 fd_atr_tunnel_match;
+ u32 fd_atr_status;
+ u32 fd_sb_status;
/* EEE LPI */
u32 tx_lpi_status;
u32 rx_lpi_status;
@@ -1146,6 +1237,8 @@ struct i40e_hw_port_stats {
#define I40E_SR_EMP_MODULE_PTR 0x0F
#define I40E_SR_PBA_FLAGS 0x15
#define I40E_SR_PBA_BLOCK_PTR 0x16
+#define I40E_SR_BOOT_CONFIG_PTR 0x17
+#define I40E_NVM_OEM_VER_OFF 0x83
#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 2d20af290fbf..ae879826084b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -81,7 +81,6 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17,
- I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -110,7 +109,9 @@ struct i40e_virtchnl_msg {
* error regardless of version mismatch.
*/
#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 0
+#define I40E_VIRTCHNL_VERSION_MINOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
struct i40e_virtchnl_version_info {
u32 major;
u32 minor;
@@ -129,7 +130,8 @@ struct i40e_virtchnl_version_info {
*/
/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
* PF responds with an indirect message containing
* i40e_virtchnl_vf_resource and one or more
* i40e_virtchnl_vsi_resource structures.
@@ -143,9 +145,14 @@ struct i40e_virtchnl_vsi_resource {
u8 default_mac_addr[ETH_ALEN];
};
/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 23f95cdbdfcc..44462b40f2d7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -160,13 +160,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
**/
static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
{
- struct i40e_hw *hw = &pf->hw;
- u32 reg;
-
- reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
- reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
- i40e_flush(hw);
+ i40e_vc_notify_vf_reset(vf);
+ i40e_reset_vf(vf, false);
}
/**
@@ -282,16 +277,14 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
}
tempmap = vecmap->rxq_map;
for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
- linklistmap |= (1 <<
- (I40E_VIRTCHNL_SUPPORTED_QTYPES *
- vsi_queue_id));
+ linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+ vsi_queue_id));
}
tempmap = vecmap->txq_map;
for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
- linklistmap |= (1 <<
- (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
- + 1));
+ linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+ vsi_queue_id + 1));
}
next_q = find_first_bit(&linklistmap,
@@ -337,11 +330,23 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
reg = (vector_id) |
(qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
(pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
- (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+ BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
(itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
wr32(hw, reg_idx, reg);
}
+ /* if the vf is running in polling mode and using interrupt zero,
+ * need to disable auto-mask on enabling zero interrupt for VFs.
+ */
+ if ((vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
+ (vector_id == 0)) {
+ reg = rd32(hw, I40E_GLINT_CTL);
+ if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
+ reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
+ wr32(hw, I40E_GLINT_CTL, reg);
+ }
+ }
+
irq_list_done:
i40e_flush(hw);
}
@@ -531,6 +536,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
}
if (type == I40E_VSI_SRIOV) {
u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
vf->lan_vsi_idx = vsi->idx;
vf->lan_vsi_id = vsi->id;
/* If the port VLAN has been configured and then the
@@ -541,20 +547,25 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
*/
if (vf->port_vlan_id)
i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
+
+ spin_lock_bh(&vsi->mac_filter_list_lock);
f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
- vf->port_vlan_id, true, false);
+ vf->port_vlan_id ? vf->port_vlan_id : -1,
+ true, false);
if (!f)
dev_info(&pf->pdev->dev,
"Could not allocate VF MAC addr\n");
- f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
+ f = i40e_add_filter(vsi, brdcast,
+ vf->port_vlan_id ? vf->port_vlan_id : -1,
true, false);
if (!f)
dev_info(&pf->pdev->dev,
"Could not allocate VF broadcast filter\n");
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
}
/* program mac filter */
- ret = i40e_sync_vsi_filters(vsi);
+ ret = i40e_sync_vsi_filters(vsi, false);
if (ret)
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
@@ -598,6 +609,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
/* map PF queues to VF queues */
for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
+
reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
total_queue_pairs++;
@@ -694,6 +706,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
*/
vf->num_queue_pairs = 0;
vf->vf_states = 0;
+ clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
}
/**
@@ -832,10 +845,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
complete_reset:
/* reallocate VF resources to reset the VSI state */
i40e_free_vf_res(vf);
- i40e_alloc_vf_res(vf);
- i40e_enable_vf_mappings(vf);
- set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
-
+ if (!i40e_alloc_vf_res(vf)) {
+ i40e_enable_vf_mappings(vf);
+ set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+ clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+ }
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
i40e_flush(hw);
@@ -864,6 +878,11 @@ void i40e_free_vfs(struct i40e_pf *pf)
i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
false);
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
+ i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
+ false);
+
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
* the carpet out from underneath their feet.
@@ -899,7 +918,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
for (vf_id = 0; vf_id < tmp; vf_id++) {
reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
- wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
}
}
clear_bit(__I40E_VF_DISABLE, &pf->state);
@@ -925,8 +944,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
if (ret) {
- dev_err(&pf->pdev->dev,
- "Failed to enable SR-IOV, error %d.\n", ret);
+ pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
pf->num_alloc_vfs = 0;
goto err_iov;
}
@@ -951,8 +969,6 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* VF resources get allocated during reset */
i40e_reset_vf(&vfs[i], false);
- /* enable VF vplan_qtable mappings */
- i40e_enable_vf_mappings(&vfs[i]);
}
pf->num_alloc_vfs = num_alloc_vfs;
@@ -980,24 +996,26 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
int pre_existing_vfs = pci_num_vf(pdev);
int err = 0;
- if (pf->state & __I40E_TESTING) {
+ if (test_bit(__I40E_TESTING, &pf->state)) {
dev_warn(&pdev->dev,
"Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
err = -EPERM;
goto err_out;
}
- dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
i40e_free_vfs(pf);
else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
goto out;
if (num_vfs > pf->num_req_vfs) {
+ dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
+ num_vfs, pf->num_req_vfs);
err = -EPERM;
goto err_out;
}
+ dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
err = i40e_alloc_vfs(pf, num_vfs);
if (err) {
dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
@@ -1088,6 +1106,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
}
} else {
vf->num_valid_msgs++;
+ /* reset the invalid counter, if a valid message is received. */
+ vf->num_invalid_msgs = 0;
}
aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
@@ -1123,12 +1143,16 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
*
* called from the VF to request the API version used by the PF
**/
-static int i40e_vc_get_version_msg(struct i40e_vf *vf)
+static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
{
struct i40e_virtchnl_version_info info = {
I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
};
+ vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg;
+ /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+ if (VF_IS_V10(vf))
+ info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
I40E_SUCCESS, (u8 *)&info,
sizeof(struct
@@ -1143,7 +1167,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf)
*
* called from the VF to request its resources
**/
-static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
+static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
{
struct i40e_virtchnl_vf_resource *vfres = NULL;
struct i40e_pf *pf = vf->pf;
@@ -1167,11 +1191,27 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
len = 0;
goto err;
}
+ if (VF_IS_V11(vf))
+ vf->driver_caps = *(u32 *)msg;
+ else
+ vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi->info.pvid)
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+ vfres->vf_offload_flags |=
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ } else {
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
+ }
+
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
vfres->num_vsis = num_vsis;
vfres->num_queue_pairs = vf->num_queue_pairs;
@@ -1179,10 +1219,12 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
if (vf->lan_vsi_idx) {
vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
- vfres->vsi_res[i].num_queue_pairs =
- pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
- memcpy(vfres->vsi_res[i].default_mac_addr,
- vf->default_lan_addr.addr, ETH_ALEN);
+ vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs;
+ /* VFs only use TC 0 */
+ vfres->vsi_res[i].qset_handle
+ = le16_to_cpu(vsi->info.qs_handle[0]);
+ ether_addr_copy(vfres->vsi_res[i].default_mac_addr,
+ vf->default_lan_addr.addr);
i++;
}
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
@@ -1560,6 +1602,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
}
vsi = pf->vsi[vf->lan_vsi_idx];
+ /* Lock once, because all function inside for loop accesses VSI's
+ * MAC filter list which needs to be protected using same lock.
+ */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+
/* add new addresses to the list */
for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f;
@@ -1578,12 +1625,14 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
dev_err(&pf->pdev->dev,
"Unable to add VF MAC filter\n");
ret = I40E_ERR_PARAM;
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
goto error_param;
}
}
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
/* program the updated filter list */
- if (i40e_sync_vsi_filters(vsi))
+ if (i40e_sync_vsi_filters(vsi, false))
dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
error_param:
@@ -1628,13 +1677,15 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
}
vsi = pf->vsi[vf->lan_vsi_idx];
+ spin_lock_bh(&vsi->mac_filter_list_lock);
/* delete addresses from the list */
for (i = 0; i < al->num_elements; i++)
i40e_del_filter(vsi, al->list[i].addr,
I40E_VLAN_ANY, true, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
/* program the updated filter list */
- if (i40e_sync_vsi_filters(vsi))
+ if (i40e_sync_vsi_filters(vsi, false))
dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
error_param:
@@ -1686,6 +1737,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
for (i = 0; i < vfl->num_elements; i++) {
/* add new VLAN filter */
int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+
if (ret)
dev_err(&pf->pdev->dev,
"Unable to add VF vlan filter %d, error %d\n",
@@ -1737,6 +1789,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
for (i = 0; i < vfl->num_elements; i++) {
int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+
if (ret)
dev_err(&pf->pdev->dev,
"Unable to delete VF vlan filter %d, error %d\n",
@@ -1773,9 +1826,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
valid_len = sizeof(struct i40e_virtchnl_version_info);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
valid_len = 0;
break;
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ if (VF_IS_V11(vf))
+ valid_len = sizeof(u32);
+ else
+ valid_len = 0;
+ break;
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct i40e_virtchnl_txq_info);
break;
@@ -1843,7 +1901,6 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
return -EPERM;
- break;
}
/* few more checks */
if ((valid_len != msglen) || (err_msg_format)) {
@@ -1888,10 +1945,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
switch (v_opcode) {
case I40E_VIRTCHNL_OP_VERSION:
- ret = i40e_vc_get_version_msg(vf);
+ ret = i40e_vc_get_version_msg(vf, msg);
break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
- ret = i40e_vc_get_vf_resources_msg(vf);
+ ret = i40e_vc_get_vf_resources_msg(vf, msg);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
i40e_vc_reset_vf_msg(vf);
@@ -1969,9 +2026,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
/* read GLGEN_VFLRSTAT register to find out the flr VFs */
vf = &pf->vf[vf_id];
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
- if (reg & (1 << bit_idx)) {
+ if (reg & BIT(bit_idx)) {
/* clear the bit in GLGEN_VFLRSTAT */
- wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
if (!test_bit(__I40E_DOWN, &pf->state))
i40e_reset_vf(vf, true);
@@ -2022,8 +2079,14 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
}
+ /* Lock once because below invoked function add/del_filter requires
+ * mac_filter_list_lock to be held
+ */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+
/* delete the temporary mac address */
- i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
+ i40e_del_filter(vsi, vf->default_lan_addr.addr,
+ vf->port_vlan_id ? vf->port_vlan_id : -1,
true, false);
/* Delete all the filters for this VSI - we're going to kill it
@@ -2032,9 +2095,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
list_for_each_entry(f, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */
- if (i40e_sync_vsi_filters(vsi)) {
+ if (i40e_sync_vsi_filters(vsi, false)) {
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
ret = -EIO;
goto error_param;
@@ -2061,8 +2126,10 @@ error_param:
int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int vf_id, u16 vlan_id, u8 qos)
{
+ u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
+ bool is_vsi_in_vlan = false;
struct i40e_vsi *vsi;
struct i40e_vf *vf;
int ret = 0;
@@ -2088,7 +2155,15 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
goto error_pvid;
}
- if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
+ if (le16_to_cpu(vsi->info.pvid) == vlanprio)
+ /* duplicate request, so just return success */
+ goto error_pvid;
+
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+ if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) {
dev_err(&pf->pdev->dev,
"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
vf_id);
@@ -2108,7 +2183,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
* MAC addresses deleted.
*/
if ((!(vlan_id || qos) ||
- (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
+ vlanprio != le16_to_cpu(vsi->info.pvid)) &&
vsi->info.pvid)
ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
@@ -2123,8 +2198,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
}
}
if (vlan_id || qos)
- ret = i40e_vsi_add_pvid(vsi,
- vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
+ ret = i40e_vsi_add_pvid(vsi, vlanprio);
else
i40e_vsi_remove_pvid(vsi);
@@ -2277,7 +2351,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
ivi->vf = vf_id;
- memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
+ ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
ivi->max_tx_rate = vf->tx_rate;
ivi->min_tx_rate = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 09043c1aae54..da44995def42 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -29,8 +29,6 @@
#include "i40e.h"
-#define I40E_MAX_MACVLAN_FILTERS 256
-#define I40E_MAX_VLAN_FILTERS 256
#define I40E_MAX_VLANID 4095
#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
@@ -42,6 +40,9 @@
#define I40E_VLAN_MASK 0xFFF
#define I40E_PRIORITY_MASK 0x7000
+#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0))
+#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1))
+
/* Various queue ctrls */
enum i40e_queue_ctrl {
I40E_QUEUE_CTRL_UNKNOWN = 0,
@@ -75,6 +76,8 @@ struct i40e_vf {
u16 vf_id;
/* all VF vsis connect to the same parent */
enum i40e_switch_element_types parent_type;
+ struct i40e_virtchnl_version_info vf_ver;
+ u32 driver_caps; /* reported by VF driver */
/* VF Port Extender (PE) stag if used */
u16 stag;
@@ -93,7 +96,8 @@ struct i40e_vf {
u8 num_queue_pairs; /* num of qps assigned to VF vsis */
u64 num_mdd_events; /* num of mdd events detected */
- u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */
+ /* num of continuous malformed or invalid msgs detected */
+ u64 num_invalid_msgs;
u64 num_valid_msgs; /* num of valid msgs detected */
unsigned long vf_caps; /* vf's adv. capabilities */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index c1d25f8c1abc..fd123ca60761 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -60,17 +60,6 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
hw->aq.arq.len = I40E_VF_ARQLEN1;
hw->aq.arq.bal = I40E_VF_ARQBAL1;
hw->aq.arq.bah = I40E_VF_ARQBAH1;
- } else {
- hw->aq.asq.tail = I40E_PF_ATQT;
- hw->aq.asq.head = I40E_PF_ATQH;
- hw->aq.asq.len = I40E_PF_ATQLEN;
- hw->aq.asq.bal = I40E_PF_ATQBAL;
- hw->aq.asq.bah = I40E_PF_ATQBAH;
- hw->aq.arq.tail = I40E_PF_ARQT;
- hw->aq.arq.head = I40E_PF_ARQH;
- hw->aq.arq.len = I40E_PF_ARQLEN;
- hw->aq.arq.bal = I40E_PF_ARQBAL;
- hw->aq.arq.bah = I40E_PF_ARQBAH;
}
}
@@ -308,7 +297,7 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
/* set starting point */
wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
- I40E_PF_ATQLEN_ATQENABLE_MASK));
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
@@ -337,7 +326,7 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
/* set starting point */
wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
- I40E_PF_ARQLEN_ARQENABLE_MASK));
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
@@ -384,7 +373,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
hw->aq.asq.next_to_use = 0;
hw->aq.asq.next_to_clean = 0;
- hw->aq.asq.count = hw->aq.num_asq_entries;
/* allocate the ring memory */
ret_code = i40e_alloc_adminq_asq_ring(hw);
@@ -402,6 +390,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* success! */
+ hw->aq.asq.count = hw->aq.num_asq_entries;
goto init_adminq_exit;
init_adminq_free_rings:
@@ -443,7 +432,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
hw->aq.arq.next_to_use = 0;
hw->aq.arq.next_to_clean = 0;
- hw->aq.arq.count = hw->aq.num_arq_entries;
/* allocate the ring memory */
ret_code = i40e_alloc_adminq_arq_ring(hw);
@@ -461,6 +449,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* success! */
+ hw->aq.arq.count = hw->aq.num_arq_entries;
goto init_adminq_exit;
init_adminq_free_rings:
@@ -480,8 +469,12 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
- if (hw->aq.asq.count == 0)
- return I40E_ERR_NOT_READY;
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (hw->aq.asq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_asq_out;
+ }
/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.asq.head, 0);
@@ -490,16 +483,13 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
wr32(hw, hw->aq.asq.bal, 0);
wr32(hw, hw->aq.asq.bah, 0);
- /* make sure lock is available */
- mutex_lock(&hw->aq.asq_mutex);
-
hw->aq.asq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
i40e_free_asq_bufs(hw);
+shutdown_asq_out:
mutex_unlock(&hw->aq.asq_mutex);
-
return ret_code;
}
@@ -513,8 +503,12 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
- if (hw->aq.arq.count == 0)
- return I40E_ERR_NOT_READY;
+ mutex_lock(&hw->aq.arq_mutex);
+
+ if (hw->aq.arq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_arq_out;
+ }
/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.arq.head, 0);
@@ -523,16 +517,13 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
wr32(hw, hw->aq.arq.bal, 0);
wr32(hw, hw->aq.arq.bah, 0);
- /* make sure lock is available */
- mutex_lock(&hw->aq.arq_mutex);
-
hw->aq.arq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers */
i40e_free_arq_bufs(hw);
+shutdown_arq_out:
mutex_unlock(&hw->aq.arq_mutex);
-
return ret_code;
}
@@ -607,6 +598,9 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
/* destroy the locks */
+ if (hw->nvm_buff.va)
+ i40e_free_virt_mem(hw, &hw->nvm_buff);
+
return ret_code;
}
@@ -628,8 +622,7 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
details = I40E_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
- "%s: ntc %d head %d.\n", __func__, ntc,
- rd32(hw, hw->aq.asq.head));
+ "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
if (details->callback) {
I40E_ADMINQ_CALLBACK cb_func =
@@ -693,19 +686,23 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
u16 retval = 0;
u32 val = 0;
- val = rd32(hw, hw->aq.asq.head);
- if (val >= hw->aq.num_asq_entries) {
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (hw->aq.asq.count == 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
- "AQTX: head overrun at %d\n", val);
+ "AQTX: Admin queue not initialized.\n");
status = I40E_ERR_QUEUE_EMPTY;
- goto asq_send_command_exit;
+ goto asq_send_command_error;
}
- if (hw->aq.asq.count == 0) {
+ hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
+ val = rd32(hw, hw->aq.asq.head);
+ if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
- "AQTX: Admin queue not initialized.\n");
+ "AQTX: head overrun at %d\n", val);
status = I40E_ERR_QUEUE_EMPTY;
- goto asq_send_command_exit;
+ goto asq_send_command_error;
}
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
@@ -730,8 +727,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
desc->flags &= ~cpu_to_le16(details->flags_dis);
desc->flags |= cpu_to_le16(details->flags_ena);
- mutex_lock(&hw->aq.asq_mutex);
-
if (buff_size > hw->aq.asq_buf_size) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
@@ -841,6 +836,10 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
buff_size);
+ /* save writeback aq if requested */
+ if (details->wb_desc)
+ *details->wb_desc = *desc_on_ring;
+
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
@@ -852,7 +851,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
asq_send_command_error:
mutex_unlock(&hw->aq.asq_mutex);
-asq_send_command_exit:
return status;
}
@@ -898,8 +896,15 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
/* take the lock before we start messing with the ring */
mutex_lock(&hw->aq.arq_mutex);
+ if (hw->aq.arq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Admin queue not initialized.\n");
+ ret_code = I40E_ERR_QUEUE_EMPTY;
+ goto clean_arq_element_err;
+ }
+
/* set next_to_use to head */
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
@@ -959,6 +964,8 @@ clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+
+clean_arq_element_err:
mutex_unlock(&hw->aq.arq_mutex);
return ret_code;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index ef43d68f67b3..a3eae5d9a2bd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -69,6 +69,7 @@ struct i40e_asq_cmd_details {
u16 flags_dis;
bool async;
bool postpone;
+ struct i40e_aq_desc *wb_desc;
};
#define I40E_ADMINQ_DETAILS(R, i) \
@@ -108,9 +109,10 @@ struct i40e_adminq_info {
/**
* i40e_aq_rc_to_posix - convert errors to user-land codes
- * aq_rc: AdminQ error code to convert
+ * aq_ret: AdminQ handler error code can override aq_rc
+ * aq_rc: AdminQ firmware error code to convert
**/
-static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
+static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
{
int aq_to_posix[] = {
0, /* I40E_AQ_RC_OK */
@@ -142,8 +144,9 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
return -EAGAIN;
- if (aq_rc >= ARRAY_SIZE(aq_to_posix))
+ if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
return -ERANGE;
+
return aq_to_posix[aq_rc];
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index e715bccfb5d2..fcb9ef34cc7a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -34,8 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0002
-#define I40E_FW_API_VERSION_A0_MINOR 0x0000
+#define I40E_FW_API_VERSION_MINOR 0x0004
struct i40e_aq_desc {
__le16 flags;
@@ -133,12 +132,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
- i40e_aqc_opc_set_cppm_configuration = 0x0103,
- i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
- i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
-
/* LAA */
- i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -260,7 +254,10 @@ enum i40e_admin_queue_opc {
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
- i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+ i40e_aqc_opc_set_rss_key = 0x0B02,
+ i40e_aqc_opc_set_rss_lut = 0x0B03,
+ i40e_aqc_opc_get_rss_key = 0x0B04,
+ i40e_aqc_opc_get_rss_lut = 0x0B05,
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
@@ -272,8 +269,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
/* debug commands */
- i40e_aqc_opc_debug_get_deviceid = 0xFF00,
- i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
@@ -507,7 +502,8 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_SAN_ADDR_VALID 0x20
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
-#define I40E_AQC_ADDR_VALID_MASK 0xf0
+#define I40E_AQC_MC_MAG_EN_VALID 0x100
+#define I40E_AQC_ADDR_VALID_MASK 0x1F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -530,7 +526,9 @@ struct i40e_aqc_mac_address_write {
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
-#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000
+#define I40E_AQC_WRITE_TYPE_MASK 0xC000
+
__le16 mac_sah;
__le32 mac_sal;
u8 reserved[8];
@@ -824,8 +822,12 @@ struct i40e_aqc_vsi_properties_data {
I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
@@ -1066,6 +1068,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
__le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF
#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
u8 reserved[8];
};
@@ -1716,11 +1719,13 @@ struct i40e_aqc_get_link_status {
u8 phy_type; /* i40e_aq_phy_type */
u8 link_speed; /* i40e_aq_link_speed */
u8 link_info;
-#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_UP 0x01 /* obsolete */
+#define I40E_AQ_LINK_UP_FUNCTION 0x01
#define I40E_AQ_LINK_FAULT 0x02
#define I40E_AQ_LINK_FAULT_TX 0x04
#define I40E_AQ_LINK_FAULT_RX 0x08
#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_LINK_UP_PORT 0x20
#define I40E_AQ_MEDIA_AVAILABLE 0x40
#define I40E_AQ_SIGNAL_DETECT 0x80
u8 an_info;
@@ -2093,6 +2098,46 @@ struct i40e_aqc_del_udp_tunnel_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+
/* tunnel key structure 0x0B10 */
struct i40e_aqc_tunnel_key_structure_A0 {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 39fcb1dc4ea6..72b1942a94aa 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -51,9 +51,20 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_20G_KR2:
+ case I40E_DEV_ID_20G_KR2_A:
hw->mac.type = I40E_MAC_XL710;
break;
+ case I40E_DEV_ID_SFP_X722:
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ hw->mac.type = I40E_MAC_X722;
+ break;
+ case I40E_DEV_ID_X722_VF:
+ case I40E_DEV_ID_X722_VF_HV:
+ hw->mac.type = I40E_MAC_X722_VF;
+ break;
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
hw->mac.type = I40E_MAC_VF;
@@ -72,6 +83,212 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
}
/**
+ * i40evf_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case I40E_AQ_RC_OK:
+ return "OK";
+ case I40E_AQ_RC_EPERM:
+ return "I40E_AQ_RC_EPERM";
+ case I40E_AQ_RC_ENOENT:
+ return "I40E_AQ_RC_ENOENT";
+ case I40E_AQ_RC_ESRCH:
+ return "I40E_AQ_RC_ESRCH";
+ case I40E_AQ_RC_EINTR:
+ return "I40E_AQ_RC_EINTR";
+ case I40E_AQ_RC_EIO:
+ return "I40E_AQ_RC_EIO";
+ case I40E_AQ_RC_ENXIO:
+ return "I40E_AQ_RC_ENXIO";
+ case I40E_AQ_RC_E2BIG:
+ return "I40E_AQ_RC_E2BIG";
+ case I40E_AQ_RC_EAGAIN:
+ return "I40E_AQ_RC_EAGAIN";
+ case I40E_AQ_RC_ENOMEM:
+ return "I40E_AQ_RC_ENOMEM";
+ case I40E_AQ_RC_EACCES:
+ return "I40E_AQ_RC_EACCES";
+ case I40E_AQ_RC_EFAULT:
+ return "I40E_AQ_RC_EFAULT";
+ case I40E_AQ_RC_EBUSY:
+ return "I40E_AQ_RC_EBUSY";
+ case I40E_AQ_RC_EEXIST:
+ return "I40E_AQ_RC_EEXIST";
+ case I40E_AQ_RC_EINVAL:
+ return "I40E_AQ_RC_EINVAL";
+ case I40E_AQ_RC_ENOTTY:
+ return "I40E_AQ_RC_ENOTTY";
+ case I40E_AQ_RC_ENOSPC:
+ return "I40E_AQ_RC_ENOSPC";
+ case I40E_AQ_RC_ENOSYS:
+ return "I40E_AQ_RC_ENOSYS";
+ case I40E_AQ_RC_ERANGE:
+ return "I40E_AQ_RC_ERANGE";
+ case I40E_AQ_RC_EFLUSHED:
+ return "I40E_AQ_RC_EFLUSHED";
+ case I40E_AQ_RC_BAD_ADDR:
+ return "I40E_AQ_RC_BAD_ADDR";
+ case I40E_AQ_RC_EMODE:
+ return "I40E_AQ_RC_EMODE";
+ case I40E_AQ_RC_EFBIG:
+ return "I40E_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * i40evf_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+ switch (stat_err) {
+ case 0:
+ return "OK";
+ case I40E_ERR_NVM:
+ return "I40E_ERR_NVM";
+ case I40E_ERR_NVM_CHECKSUM:
+ return "I40E_ERR_NVM_CHECKSUM";
+ case I40E_ERR_PHY:
+ return "I40E_ERR_PHY";
+ case I40E_ERR_CONFIG:
+ return "I40E_ERR_CONFIG";
+ case I40E_ERR_PARAM:
+ return "I40E_ERR_PARAM";
+ case I40E_ERR_MAC_TYPE:
+ return "I40E_ERR_MAC_TYPE";
+ case I40E_ERR_UNKNOWN_PHY:
+ return "I40E_ERR_UNKNOWN_PHY";
+ case I40E_ERR_LINK_SETUP:
+ return "I40E_ERR_LINK_SETUP";
+ case I40E_ERR_ADAPTER_STOPPED:
+ return "I40E_ERR_ADAPTER_STOPPED";
+ case I40E_ERR_INVALID_MAC_ADDR:
+ return "I40E_ERR_INVALID_MAC_ADDR";
+ case I40E_ERR_DEVICE_NOT_SUPPORTED:
+ return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+ case I40E_ERR_MASTER_REQUESTS_PENDING:
+ return "I40E_ERR_MASTER_REQUESTS_PENDING";
+ case I40E_ERR_INVALID_LINK_SETTINGS:
+ return "I40E_ERR_INVALID_LINK_SETTINGS";
+ case I40E_ERR_AUTONEG_NOT_COMPLETE:
+ return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+ case I40E_ERR_RESET_FAILED:
+ return "I40E_ERR_RESET_FAILED";
+ case I40E_ERR_SWFW_SYNC:
+ return "I40E_ERR_SWFW_SYNC";
+ case I40E_ERR_NO_AVAILABLE_VSI:
+ return "I40E_ERR_NO_AVAILABLE_VSI";
+ case I40E_ERR_NO_MEMORY:
+ return "I40E_ERR_NO_MEMORY";
+ case I40E_ERR_BAD_PTR:
+ return "I40E_ERR_BAD_PTR";
+ case I40E_ERR_RING_FULL:
+ return "I40E_ERR_RING_FULL";
+ case I40E_ERR_INVALID_PD_ID:
+ return "I40E_ERR_INVALID_PD_ID";
+ case I40E_ERR_INVALID_QP_ID:
+ return "I40E_ERR_INVALID_QP_ID";
+ case I40E_ERR_INVALID_CQ_ID:
+ return "I40E_ERR_INVALID_CQ_ID";
+ case I40E_ERR_INVALID_CEQ_ID:
+ return "I40E_ERR_INVALID_CEQ_ID";
+ case I40E_ERR_INVALID_AEQ_ID:
+ return "I40E_ERR_INVALID_AEQ_ID";
+ case I40E_ERR_INVALID_SIZE:
+ return "I40E_ERR_INVALID_SIZE";
+ case I40E_ERR_INVALID_ARP_INDEX:
+ return "I40E_ERR_INVALID_ARP_INDEX";
+ case I40E_ERR_INVALID_FPM_FUNC_ID:
+ return "I40E_ERR_INVALID_FPM_FUNC_ID";
+ case I40E_ERR_QP_INVALID_MSG_SIZE:
+ return "I40E_ERR_QP_INVALID_MSG_SIZE";
+ case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+ return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+ case I40E_ERR_INVALID_FRAG_COUNT:
+ return "I40E_ERR_INVALID_FRAG_COUNT";
+ case I40E_ERR_QUEUE_EMPTY:
+ return "I40E_ERR_QUEUE_EMPTY";
+ case I40E_ERR_INVALID_ALIGNMENT:
+ return "I40E_ERR_INVALID_ALIGNMENT";
+ case I40E_ERR_FLUSHED_QUEUE:
+ return "I40E_ERR_FLUSHED_QUEUE";
+ case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+ case I40E_ERR_INVALID_IMM_DATA_SIZE:
+ return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+ case I40E_ERR_TIMEOUT:
+ return "I40E_ERR_TIMEOUT";
+ case I40E_ERR_OPCODE_MISMATCH:
+ return "I40E_ERR_OPCODE_MISMATCH";
+ case I40E_ERR_CQP_COMPL_ERROR:
+ return "I40E_ERR_CQP_COMPL_ERROR";
+ case I40E_ERR_INVALID_VF_ID:
+ return "I40E_ERR_INVALID_VF_ID";
+ case I40E_ERR_INVALID_HMCFN_ID:
+ return "I40E_ERR_INVALID_HMCFN_ID";
+ case I40E_ERR_BACKING_PAGE_ERROR:
+ return "I40E_ERR_BACKING_PAGE_ERROR";
+ case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case I40E_ERR_INVALID_PBLE_INDEX:
+ return "I40E_ERR_INVALID_PBLE_INDEX";
+ case I40E_ERR_INVALID_SD_INDEX:
+ return "I40E_ERR_INVALID_SD_INDEX";
+ case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+ return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+ case I40E_ERR_INVALID_SD_TYPE:
+ return "I40E_ERR_INVALID_SD_TYPE";
+ case I40E_ERR_MEMCPY_FAILED:
+ return "I40E_ERR_MEMCPY_FAILED";
+ case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+ return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+ case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+ return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+ case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+ case I40E_ERR_SRQ_ENABLED:
+ return "I40E_ERR_SRQ_ENABLED";
+ case I40E_ERR_ADMIN_QUEUE_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_ERROR";
+ case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+ case I40E_ERR_BUF_TOO_SHORT:
+ return "I40E_ERR_BUF_TOO_SHORT";
+ case I40E_ERR_ADMIN_QUEUE_FULL:
+ return "I40E_ERR_ADMIN_QUEUE_FULL";
+ case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+ return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+ case I40E_ERR_BAD_IWARP_CQE:
+ return "I40E_ERR_BAD_IWARP_CQE";
+ case I40E_ERR_NVM_BLANK_MODE:
+ return "I40E_ERR_NVM_BLANK_MODE";
+ case I40E_ERR_NOT_IMPLEMENTED:
+ return "I40E_ERR_NOT_IMPLEMENTED";
+ case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+ case I40E_ERR_DIAG_TEST_FAILED:
+ return "I40E_ERR_DIAG_TEST_FAILED";
+ case I40E_ERR_NOT_READY:
+ return "I40E_ERR_NOT_READY";
+ case I40E_NOT_SUPPORTED:
+ return "I40E_NOT_SUPPORTED";
+ case I40E_ERR_FIRMWARE_API_VERSION:
+ return "I40E_ERR_FIRMWARE_API_VERSION";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+/**
* i40evf_debug_aq
* @hw: debug mask related to admin queue
* @mask: debug mask
@@ -114,25 +331,11 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
len = buf_len;
/* write the full 16-byte chunks */
for (i = 0; i < (len - 16); i += 16)
- i40e_debug(hw, mask,
- "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- i, buf[i], buf[i + 1], buf[i + 2],
- buf[i + 3], buf[i + 4], buf[i + 5],
- buf[i + 6], buf[i + 7], buf[i + 8],
- buf[i + 9], buf[i + 10], buf[i + 11],
- buf[i + 12], buf[i + 13], buf[i + 14],
- buf[i + 15]);
+ i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i);
/* write whatever's left over without overrunning the buffer */
- if (i < len) {
- char d_buf[80];
- int j = 0;
-
- memset(d_buf, 0, sizeof(d_buf));
- j += sprintf(d_buf, "\t0x%04X ", i);
- while (i < len)
- j += sprintf(&d_buf[j], " %02X", buf[i++]);
- i40e_debug(hw, mask, "%s\n", d_buf);
- }
+ if (i < len)
+ i40e_debug(hw, mask, "\t0x%04X %*ph\n",
+ i, len - i, buf + i);
}
}
@@ -146,7 +349,7 @@ bool i40evf_check_asq_alive(struct i40e_hw *hw)
{
if (hw->aq.asq.len)
return !!(rd32(hw, hw->aq.asq.len) &
- I40E_PF_ATQLEN_ATQENABLE_MASK);
+ I40E_VF_ATQLEN1_ATQENABLE_MASK);
else
return false;
}
@@ -177,6 +380,164 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
return status;
}
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp =
+ (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_lut);
+ else
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ cpu_to_le16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= cpu_to_le16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= cpu_to_le16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40evf_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ false);
+}
+
+/**
+ * i40evf_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_key *cmd_resp =
+ (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+ if (set)
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_key);
+ else
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ cpu_to_le16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+
+ status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40evf_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40evf_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
@@ -612,10 +973,10 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
for (i = 0; i < msg->num_vsis; i++) {
if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
- memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr,
- ETH_ALEN);
- memcpy(hw->mac.addr, vsi_res->default_mac_addr,
- ETH_ALEN);
+ ether_addr_copy(hw->mac.perm_addr,
+ vsi_res->default_mac_addr);
+ ether_addr_copy(hw->mac.addr,
+ vsi_res->default_mac_addr);
}
vsi_res++;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
new file mode 100644
index 000000000000..e6a39c9862e8
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -0,0 +1,55 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DEVIDS_H_
+#define _I40E_DEVIDS_H_
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
+#define I40E_DEV_ID_20G_KR2 0x1587
+#define I40E_DEV_ID_20G_KR2_A 0x1588
+#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_X722_VF 0x37CD
+#define I40E_DEV_ID_X722_VF_HV 0x37D9
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
+
+#endif /* _I40E_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
index 931c88044300..00ed24bfce13 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -62,6 +62,7 @@ struct i40e_hmc_bp {
struct i40e_hmc_pd_entry {
struct i40e_hmc_bp bp;
u32 sd_index;
+ bool rsrc_pg;
bool valid;
};
@@ -126,8 +127,8 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
- (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
- val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
@@ -146,7 +147,7 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
- val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
@@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 pd_index);
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
index 21a91b14bf81..5e314fd3c016 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
@@ -34,7 +34,7 @@
#include <linux/pci.h>
/* get readq/writeq support for 32 bit kernels, use the low-first version */
-#include <asm-generic/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
/* File to be the magic between shared code and
* actual OS primitives
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 58e37a44b80a..cbd9a1b078ab 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -60,6 +60,19 @@ void i40e_idle_aq(struct i40e_hw *hw);
void i40evf_resume_aq(struct i40e_hw *hw);
bool i40evf_check_asq_alive(struct i40e_hw *hw);
i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+
+i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
i40e_status i40e_set_mac_type(struct i40e_hw *hw);
@@ -88,4 +101,6 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
u16 vsi_seid, u16 queue, bool is_add,
struct i40e_control_filter_stats *stats,
struct i40e_asq_cmd_details *cmd_details);
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ u16 vsi_seid);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
index 3cc737629bf7..10febcfd7cd8 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -27,1580 +27,6 @@
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
-#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
-#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
-#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
-#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
-#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
-#define I40E_GL_ARQH_ARQH_SHIFT 0
-#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
-#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
-#define I40E_GL_ARQT_ARQT_SHIFT 0
-#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
-#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
-#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
-#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
-#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
-#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
-#define I40E_GL_ATQH_ATQH_SHIFT 0
-#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
-#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
-#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT)
-#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT)
-#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT)
-#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
-#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */
-#define I40E_GL_ATQT_ATQT_SHIFT 0
-#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT)
-#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
-#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
-#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
-#define I40E_PF_ARQH_ARQH_SHIFT 0
-#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
-#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
-#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
-#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
-#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT)
-#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
-#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
-#define I40E_PF_ARQT_ARQT_SHIFT 0
-#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
-#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
-#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
-#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
-#define I40E_PF_ATQH_ATQH_SHIFT 0
-#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
-#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
-#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
-#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
-#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT)
-#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
-#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
-#define I40E_PF_ATQT_ATQT_SHIFT 0
-#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
-#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQBAH_MAX_INDEX 127
-#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQBAL_MAX_INDEX 127
-#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQH_MAX_INDEX 127
-#define I40E_VF_ARQH_ARQH_SHIFT 0
-#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT)
-#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQLEN_MAX_INDEX 127
-#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT)
-#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT)
-#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT)
-#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
-#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQT_MAX_INDEX 127
-#define I40E_VF_ARQT_ARQT_SHIFT 0
-#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT)
-#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQBAH_MAX_INDEX 127
-#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQBAL_MAX_INDEX 127
-#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQH_MAX_INDEX 127
-#define I40E_VF_ATQH_ATQH_SHIFT 0
-#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT)
-#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQLEN_MAX_INDEX 127
-#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT)
-#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT)
-#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT)
-#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
-#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQT_MAX_INDEX 127
-#define I40E_VF_ATQT_ATQT_SHIFT 0
-#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT)
-#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */
-#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
-#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */
-#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */
-#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */
-#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
-#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
-#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
-#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
-#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
-#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
-#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
-#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
-#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
-#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
-#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT)
-#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */
-#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
-#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
-#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
-#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
-#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
-#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
-#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */
-#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
-#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT)
-#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */
-#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
-#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
-#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
-#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
-#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
-#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */
-#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
-#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
-#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */
-#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
-#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
-#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
-#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
-#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
-#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
-#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
-#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
-#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
-#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
-#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
-#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT)
-#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
-#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
-#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
-#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
-#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
-#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
-#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
-#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
-#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
-#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
-#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
-#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
-#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
-#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
-#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
-#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
-#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
-#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
-#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
-#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
-#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
-#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
-#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
-#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
-#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
-#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
-#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
-#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
-#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
-#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
-#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
-#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
-#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
-#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
-#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
-#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
-#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
-#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
-#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
-#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
-#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
-#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
-#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
-#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
-#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
-#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
-#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
-#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
-#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
-#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
-#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
-#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
-#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
-#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
-#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
-#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
-#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
-#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
-#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
-#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
-#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
-#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
-#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
-#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
-#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
-#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
-#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
-#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
-#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
-#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
-#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
-#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
-#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
-#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
-#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
-#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
-#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
-#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
-#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
-#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
-#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
-#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
-#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
-#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
-#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
-#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
-#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
-#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
-#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */
-#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
-#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
-#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
-#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
-#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
-#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT)
-#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
-#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
-#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
-#define I40E_GL_FWSTS_FWS0B_SHIFT 0
-#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT)
-#define I40E_GL_FWSTS_FWRI_SHIFT 9
-#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
-#define I40E_GL_FWSTS_FWS1B_SHIFT 16
-#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
-#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
-#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
-#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
-#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
-#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
-#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
-#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
-#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
-#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
-#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
-#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
-#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
-#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
-#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
-#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
-#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
-#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
-#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
-#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
-#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
-#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
-#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
-#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
-#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
-#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
-#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
-#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
-#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
-#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */
-#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
-#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
-#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */
-#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
-#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
-#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
-#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
-#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT)
-#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
-#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT)
-#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
-#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
-#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
-#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT)
-#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
-#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT)
-#define I40E_GLGEN_I2CCMD_R_SHIFT 29
-#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT)
-#define I40E_GLGEN_I2CCMD_E_SHIFT 31
-#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT)
-#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
-#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
-#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
-#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
-#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
-#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
-#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
-#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
-#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
-#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
-#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
-#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
-#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
-#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */
-#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
-#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
-#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
-#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
-#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MSCA_MAX_INDEX 3
-#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
-#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT)
-#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
-#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT)
-#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
-#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT)
-#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
-#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
-#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT)
-#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
-#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
-#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
-#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MSRWD_MAX_INDEX 3
-#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
-#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
-#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
-#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
-#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
-#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
-#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
-#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
-#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
-#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
-#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
-#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
-#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
-#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
-#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
-#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
-#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
-#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
-#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
-#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */
-#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
-#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
-#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
-#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
-#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
-#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
-#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
-#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
-#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
-#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
-#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
-#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
-#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
-#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT)
-#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
-#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT)
-#define I40E_GLGEN_STAT_VTEN_SHIFT 3
-#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT)
-#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
-#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT)
-#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
-#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT)
-#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
-#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT)
-#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
-#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
-#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
-#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */
-#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
-#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT)
-#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */
-#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
-#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
-#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */
-#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
-#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
-#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */
-#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
-#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */
-#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0
-#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT)
-#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
-#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT)
-#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
-#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT)
-#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
-#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT)
-#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */
-#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
-#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
-#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
-#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */
-#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
-#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
-#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */
-#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
-#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
-#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
-#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
-#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
-#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
-#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
-#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
-#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
-#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
-#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
-#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
-#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
-#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
-#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT)
-#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
-#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
-#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
-#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
-#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
-#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
-#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
-#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
-#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
-#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
-#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
-#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
-#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
-#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
-#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
-#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
-#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */
-#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
-#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
-#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */
-#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
-#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */
-#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
-#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
-#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
-#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
-#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
-#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
-#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
-#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
-#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
-#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
-#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */
-#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
-#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
-#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */
-#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
-#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
-#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
-#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
-#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
-#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
-#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */
-#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
-#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
-#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */
-#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
-#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */
-#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
-#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
-#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
-#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
-#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
-#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
-#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
-#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
-#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */
-#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
-#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
-#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
-#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
-#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
-#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
-#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
-#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
-#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
-#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */
-#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
-#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
-#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
-#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
-#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_SDPART_MAX_INDEX 15
-#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
-#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
-#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */
-#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
-#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
-#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */
-#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
-#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
-#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
-#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
-#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
-#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
-#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
-#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
-#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
-#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
-#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
-#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
-#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
-#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
-#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */
-#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
-#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
-#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
-#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */
-#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
-#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
-#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */
-#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
-#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
-#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
-#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
-#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */
-#define I40E_GL_GP_FUSE_MAX_INDEX 28
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
-#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */
-#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
-#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
-#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
-#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT)
-#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
-#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
-#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
-#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
-#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */
-#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
-#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */
-#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
-#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
-#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
-#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
-#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
-#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
-#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
-#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
-#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_PFINT_CEQCTL_MAX_INDEX 511
-#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
-#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
-#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
-#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
-#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
-#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
-#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
-#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
-#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
-#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
-#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
-#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
-#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */
-#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
-#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
-#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
-#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
-#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
-#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
-#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT)
-#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
-#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
-#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
-#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_GRST_SHIFT 20
-#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
-#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
-#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
-#define I40E_PFINT_ICR0_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT)
-#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
-#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
-#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
-#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
-#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
-#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
-#define I40E_PFINT_ICR0_VFLR_SHIFT 29
-#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT)
-#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT)
-#define I40E_PFINT_ICR0_SWINT_SHIFT 31
-#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT)
-#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */
-#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
-#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
-#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
-#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
-#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT)
-#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
-#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
-#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
-#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
-#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
-#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
-#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
-#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
-#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
-#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
-#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
-#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
-#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
-#define I40E_PFINT_ITR0_MAX_INDEX 2
-#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_ITRN_MAX_INDEX 2
-#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */
-#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
-#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
-#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
-#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */
-#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT)
-#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_RATEN_MAX_INDEX 511
-#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
-#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
-#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
-#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QINT_RQCTL_MAX_INDEX 1535
-#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
-#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
-#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
-#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT)
-#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
-#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
-#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
-#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
-#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
-#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QINT_TQCTL_MAX_INDEX 1535
-#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
-#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
-#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
-#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT)
-#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
-#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
-#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
-#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
-#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
-#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
-#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
-#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
-#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_ICR0_MAX_INDEX 127
-#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT)
-#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR0_SWINT_SHIFT 31
-#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT)
-#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
-#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
-#define I40E_VFINT_ITR0_MAX_INDEX 2
-#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
-#define I40E_VFINT_ITRN_MAX_INDEX 2
-#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
-#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPINT_AEQCTL_MAX_INDEX 127
-#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
-#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_VPINT_CEQCTL_MAX_INDEX 511
-#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
-#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
-#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPINT_LNKLST0_MAX_INDEX 127
-#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
-#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
-#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
-#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
-#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPINT_RATE0_MAX_INDEX 127
-#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT)
-#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VPINT_RATEN_MAX_INDEX 511
-#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT)
-#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */
-#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
-#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
-#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
-#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT)
-#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */
-#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
-#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
-#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
-#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
-#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
-#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
-#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
-#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
-#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11
-#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
-#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16
-#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
-#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
-#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
-#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
-#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
-#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
-#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
-#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
-#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
-#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QRX_ENA_MAX_INDEX 1535
-#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
-#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
-#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT)
-#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
-#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
-#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QRX_TAIL_MAX_INDEX 1535
-#define I40E_QRX_TAIL_TAIL_SHIFT 0
-#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT)
-#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QTX_CTL_MAX_INDEX 1535
-#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
-#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
-#define I40E_QTX_CTL_PF_INDX_SHIFT 2
-#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT)
-#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
-#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
-#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QTX_ENA_MAX_INDEX 1535
-#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
-#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
-#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT)
-#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
-#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
-#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QTX_HEAD_MAX_INDEX 1535
-#define I40E_QTX_HEAD_HEAD_SHIFT 0
-#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT)
-#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
-#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT)
-#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QTX_TAIL_MAX_INDEX 1535
-#define I40E_QTX_TAIL_TAIL_SHIFT 0
-#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT)
-#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPLAN_MAPENA_MAX_INDEX 127
-#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
-#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
-#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
-#define I40E_VPLAN_QTABLE_MAX_INDEX 15
-#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
-#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
-#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
-#define I40E_VSILAN_QBASE_MAX_INDEX 383
-#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
-#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT)
-#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
-#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
-#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
-#define I40E_VSILAN_QTABLE_MAX_INDEX 7
-#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
-#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
-#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
-#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
-#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */
-#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
-#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
-#define I40E_PRTGL_SAH_MFS_SHIFT 16
-#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT)
-#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
-#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
-#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
-#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */
-#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
-#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
-#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */
-#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
-#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
-#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
-#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
-#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
-#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
-#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
-#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
-#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
-#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
-#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
-#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
-#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
-#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */
-#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
-#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
-#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
-#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
-#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
-#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
-#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
-#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
-#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */
-#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
-#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
-#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
-#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
-#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
-#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
-#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
-#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
-#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
-#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
-#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
-#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
-#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
-#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
-#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
-#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
-#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
-#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
-#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT)
-#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
-#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
-#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
-#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
-#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
-#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
-#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
-#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
-#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
-#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
-#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
-#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_METF_MAX_INDEX 3
-#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
-#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT)
-#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
-#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT)
-#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
-#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
-#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
-#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
-#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
-#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT)
-#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
-#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT)
-#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
-#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
-#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
-#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
-#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
-#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
-#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
-#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT)
-#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
-#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
-#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT)
-#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */
-#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
-#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
-#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */
-#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
-#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
-#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
-#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
-#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
-#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
-#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
-#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
-#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
-#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */
-#define I40E_MSIX_PBA_MAX_INDEX 5
-#define I40E_MSIX_PBA_PENBIT_SHIFT 0
-#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT)
-#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TADD_MAX_INDEX 128
-#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT)
-#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TMSG_MAX_INDEX 128
-#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TUADD_MAX_INDEX 128
-#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TVCTRL_MAX_INDEX 128
-#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
#define I40E_VFMSIX_PBA1_MAX_INDEX 19
#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
@@ -1623,1525 +49,6 @@
#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
-#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
-#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
-#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
-#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
-#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT)
-#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
-#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT)
-#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
-#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT)
-#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
-#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT)
-#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
-#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT)
-#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
-#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
-#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
-#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT)
-#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
-#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT)
-#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
-#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT)
-#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */
-#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
-#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT)
-#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31
-#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT)
-#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */
-#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
-#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT)
-#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
-#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
-#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
-#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT)
-#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
-#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT)
-#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
-#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
-#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */
-#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
-#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
-#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
-#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */
-#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
-#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
-#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
-#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT)
-#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
-#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT)
-#define I40E_GLNVM_SRCTL_START_SHIFT 30
-#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
-#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
-#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
-#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
-#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
-#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
-#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
-#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
-#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
-#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
-#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
-#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
-#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
-#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
-#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
-#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
-#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
-#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
-#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
-#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
-#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */
-#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */
-#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */
-#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
-#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */
-#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
-#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
-#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
-#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
-#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
-#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
-#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
-#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
-#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
-#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
-#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
-#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
-#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
-#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
-#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
-#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
-#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
-#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
-#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */
-#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
-#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT)
-#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
-#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
-#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */
-#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
-#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT)
-#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
-#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
-#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
-#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
-#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
-#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
-#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
-#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
-#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
-#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
-#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
-#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
-#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
-#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
-#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
-#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
-#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
-#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
-#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
-#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
-#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
-#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
-#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
-#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
-#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
-#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4
-#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT)
-#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
-#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10
-#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT)
-#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
-#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
-#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */
-#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
-#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
-#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
-#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
-#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
-#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
-#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */
-#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
-#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
-#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */
-#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
-#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */
-#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
-#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
-#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
-#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
-#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
-#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
-#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
-#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
-#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
-#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
-#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */
-#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
-#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
-#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
-#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
-#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
-#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */
-#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
-#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT)
-#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */
-#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
-#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
-#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */
-#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
-#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
-#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */
-#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0
-#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT)
-#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */
-#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
-#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT)
-#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */
-#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0
-#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT)
-#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */
-#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
-#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
-#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
-#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
-#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
-#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
-#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
-#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
-#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
-#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
-#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
-#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
-#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
-#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
-#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
-#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
-#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
-#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
-#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
-#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
-#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
-#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
-#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
-#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
-#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */
-#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
-#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
-#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1
-#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT)
-#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2
-#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
-#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */
-#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
-#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT)
-#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
-#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
-#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
-#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT)
-#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
-#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT)
-#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */
-#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
-#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
-#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16
-#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT)
-#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */
-#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
-#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
-#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
-#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
-#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */
-#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
-#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
-#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
-#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */
-#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */
-#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
-#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
-#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */
-#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
-#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
-#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */
-#define I40E_PFPCI_PM_PME_EN_SHIFT 0
-#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT)
-#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */
-#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
-#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
-#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
-#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
-#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
-#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16
-#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */
-#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */
-#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
-#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
-#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */
-#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
-#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT)
-#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
-#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
-#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
-#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
-#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
-#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
-#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
-#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */
-#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
-#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
-#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
-#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
-#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
-#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
-#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */
-#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
-#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
-#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */
-#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
-#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
-#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
-#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
-#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */
-#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
-#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
-#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */
-#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
-#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
-#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
-#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT)
-#define I40E_PRTPM_GC_RATD_SHIFT 2
-#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT)
-#define I40E_PRTPM_GC_LCDMP_SHIFT 3
-#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT)
-#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
-#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
-#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
-#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
-#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
-#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
-#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
-#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
-#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
-#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
-#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
-#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */
-#define I40E_GLRPB_GHW_GHW_SHIFT 0
-#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT)
-#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */
-#define I40E_GLRPB_GLW_GLW_SHIFT 0
-#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT)
-#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */
-#define I40E_GLRPB_PHW_PHW_SHIFT 0
-#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT)
-#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */
-#define I40E_GLRPB_PLW_PLW_SHIFT 0
-#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT)
-#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DHW_MAX_INDEX 7
-#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
-#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
-#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DLW_MAX_INDEX 7
-#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
-#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
-#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DPS_MAX_INDEX 7
-#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
-#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
-#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_SHT_MAX_INDEX 7
-#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
-#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
-#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
-#define I40E_PRTRPB_SHW_SHW_SHIFT 0
-#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
-#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_SLT_MAX_INDEX 7
-#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
-#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
-#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
-#define I40E_PRTRPB_SLW_SLW_SHIFT 0
-#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
-#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
-#define I40E_PRTRPB_SPS_SPS_SHIFT 0
-#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
-#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */
-#define I40E_GLQF_CTL_HTOEP_SHIFT 1
-#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT)
-#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
-#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
-#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
-#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
-#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6
-#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT)
-#define I40E_GLQF_CTL_RSVD_SHIFT 7
-#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT)
-#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
-#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
-#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
-#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
-#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
-#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
-#define I40E_GLQF_CTL_FDBEST_SHIFT 17
-#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT)
-#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
-#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT)
-#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
-#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT)
-#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
-#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT)
-#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
-#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
-#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
-#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
-#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
-#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
-#define I40E_GLQF_HKEY_MAX_INDEX 12
-#define I40E_GLQF_HKEY_KEY_0_SHIFT 0
-#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT)
-#define I40E_GLQF_HKEY_KEY_1_SHIFT 8
-#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT)
-#define I40E_GLQF_HKEY_KEY_2_SHIFT 16
-#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT)
-#define I40E_GLQF_HKEY_KEY_3_SHIFT 24
-#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT)
-#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_HSYM_MAX_INDEX 63
-#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
-#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
-#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_GLQF_PCNT_MAX_INDEX 511
-#define I40E_GLQF_PCNT_PCNT_SHIFT 0
-#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT)
-#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_SWAP_MAX_INDEX 1
-#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
-#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
-#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
-#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
-#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
-#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT)
-#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
-#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
-#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
-#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
-#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
-#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT)
-#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */
-#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
-#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
-#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
-#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
-#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
-#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
-#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT)
-#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
-#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
-#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
-#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
-#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
-#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
-#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
-#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */
-#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
-#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
-#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */
-#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
-#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
-#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
-#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT)
-#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */
-#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
-#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
-#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
-#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
-#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_PFQF_HENA_MAX_INDEX 1
-#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
-#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
-#define I40E_PFQF_HKEY_MAX_INDEX 12
-#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT)
-#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT)
-#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT)
-#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT)
-#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_PFQF_HLUT_MAX_INDEX 127
-#define I40E_PFQF_HLUT_LUT0_SHIFT 0
-#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT)
-#define I40E_PFQF_HLUT_LUT1_SHIFT 8
-#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT)
-#define I40E_PFQF_HLUT_LUT2_SHIFT 16
-#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT)
-#define I40E_PFQF_HLUT_LUT3_SHIFT 24
-#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT)
-#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */
-#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
-#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
-#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */
-#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
-#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
-#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
-#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
-#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
-#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
-#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT)
-#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
-#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
-#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
-#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
-#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
-#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
-#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
-#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
-#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HENA1_MAX_INDEX 1
-#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
-#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HKEY1_MAX_INDEX 12
-#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT)
-#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HLUT1_MAX_INDEX 15
-#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT)
-#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT)
-#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT)
-#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT)
-#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HREGION1_MAX_INDEX 7
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT)
-#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPQF_CTL_MAX_INDEX 127
-#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
-#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT)
-#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
-#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT)
-#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
-#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT)
-#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
-#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT)
-#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
-#define I40E_VSIQF_CTL_MAX_INDEX 383
-#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
-#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
-#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
-#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
-#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
-#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
-#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
-#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */
-#define I40E_VSIQF_TCREGION_MAX_INDEX 3
-#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
-#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
-#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
-#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
-#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
-#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOECRC_MAX_INDEX 143
-#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
-#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT)
-#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDDPC_MAX_INDEX 143
-#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
-#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
-#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
-#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
-#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
-#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
-#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
-#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
-#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
-#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
-#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
-#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
-#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
-#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
-#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
-#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
-#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
-#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
-#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
-#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
-#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
-#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
-#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
-#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
-#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
-#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
-#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOELAST_MAX_INDEX 143
-#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
-#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT)
-#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEPRC_MAX_INDEX 143
-#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
-#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
-#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEPTC_MAX_INDEX 143
-#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
-#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
-#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOERPDC_MAX_INDEX 143
-#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
-#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
-#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_RXERR1_L_MAX_INDEX 143
-#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
-#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
-#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_RXERR2_L_MAX_INDEX 143
-#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
-#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
-#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPRCH_MAX_INDEX 3
-#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
-#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPRCL_MAX_INDEX 3
-#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
-#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPTCH_MAX_INDEX 3
-#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
-#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPTCL_MAX_INDEX 3
-#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
-#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
-#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
-#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
-#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GORCH_MAX_INDEX 3
-#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
-#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT)
-#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GORCL_MAX_INDEX 3
-#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
-#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT)
-#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GOTCH_MAX_INDEX 3
-#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT)
-#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GOTCL_MAX_INDEX 3
-#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT)
-#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
-#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
-#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
-#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LDPC_MAX_INDEX 3
-#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
-#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT)
-#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
-#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
-#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
-#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
-#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
-#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
-#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
-#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
-#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
-#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
-#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
-#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MLFC_MAX_INDEX 3
-#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
-#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT)
-#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPRCH_MAX_INDEX 3
-#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT)
-#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPRCL_MAX_INDEX 3
-#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT)
-#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPTCH_MAX_INDEX 3
-#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT)
-#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPTCL_MAX_INDEX 3
-#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT)
-#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MRFC_MAX_INDEX 3
-#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
-#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT)
-#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
-#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
-#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
-#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
-#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
-#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
-#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC127H_MAX_INDEX 3
-#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
-#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT)
-#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC127L_MAX_INDEX 3
-#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
-#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT)
-#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
-#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
-#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
-#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
-#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC255H_MAX_INDEX 3
-#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
-#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
-#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC255L_MAX_INDEX 3
-#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
-#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT)
-#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC511H_MAX_INDEX 3
-#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
-#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT)
-#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC511L_MAX_INDEX 3
-#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
-#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT)
-#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC64H_MAX_INDEX 3
-#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
-#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT)
-#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC64L_MAX_INDEX 3
-#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
-#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT)
-#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
-#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
-#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
-#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
-#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
-#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
-#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
-#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
-#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
-#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
-#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC127H_MAX_INDEX 3
-#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
-#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT)
-#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC127L_MAX_INDEX 3
-#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
-#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT)
-#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
-#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
-#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
-#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
-#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
-#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
-#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC255H_MAX_INDEX 3
-#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
-#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT)
-#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC255L_MAX_INDEX 3
-#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
-#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT)
-#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC511H_MAX_INDEX 3
-#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
-#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT)
-#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC511L_MAX_INDEX 3
-#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
-#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT)
-#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC64H_MAX_INDEX 3
-#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
-#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT)
-#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC64L_MAX_INDEX 3
-#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
-#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT)
-#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
-#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
-#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
-#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
-#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
-#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
-#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
-#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
-#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
-#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
-#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
-#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
-#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
-#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
-#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
-#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
-#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RDPC_MAX_INDEX 3
-#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
-#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT)
-#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RFC_MAX_INDEX 3
-#define I40E_GLPRT_RFC_RFC_SHIFT 0
-#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT)
-#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RJC_MAX_INDEX 3
-#define I40E_GLPRT_RJC_RJC_SHIFT 0
-#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT)
-#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RLEC_MAX_INDEX 3
-#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
-#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT)
-#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_ROC_MAX_INDEX 3
-#define I40E_GLPRT_ROC_ROC_SHIFT 0
-#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT)
-#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RUC_MAX_INDEX 3
-#define I40E_GLPRT_RUC_RUC_SHIFT 0
-#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT)
-#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RUPP_MAX_INDEX 3
-#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
-#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT)
-#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
-#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
-#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
-#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_TDOLD_MAX_INDEX 3
-#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
-#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
-#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPRCH_MAX_INDEX 3
-#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT)
-#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPRCL_MAX_INDEX 3
-#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT)
-#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPTCH_MAX_INDEX 3
-#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT)
-#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPTCL_MAX_INDEX 3
-#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
-#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPRCH_MAX_INDEX 15
-#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT)
-#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPRCL_MAX_INDEX 15
-#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT)
-#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPTCH_MAX_INDEX 15
-#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT)
-#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPTCL_MAX_INDEX 15
-#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT)
-#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GORCH_MAX_INDEX 15
-#define I40E_GLSW_GORCH_GORCH_SHIFT 0
-#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT)
-#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GORCL_MAX_INDEX 15
-#define I40E_GLSW_GORCL_GORCL_SHIFT 0
-#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT)
-#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GOTCH_MAX_INDEX 15
-#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT)
-#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GOTCL_MAX_INDEX 15
-#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT)
-#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPRCH_MAX_INDEX 15
-#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT)
-#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPRCL_MAX_INDEX 15
-#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT)
-#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPTCH_MAX_INDEX 15
-#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT)
-#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPTCL_MAX_INDEX 15
-#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT)
-#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_RUPP_MAX_INDEX 15
-#define I40E_GLSW_RUPP_RUPP_SHIFT 0
-#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT)
-#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_TDPC_MAX_INDEX 15
-#define I40E_GLSW_TDPC_TDPC_SHIFT 0
-#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT)
-#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPRCH_MAX_INDEX 15
-#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT)
-#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPRCL_MAX_INDEX 15
-#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT)
-#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPTCH_MAX_INDEX 15
-#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT)
-#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPTCL_MAX_INDEX 15
-#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT)
-#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPRCH_MAX_INDEX 383
-#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT)
-#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPRCL_MAX_INDEX 383
-#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT)
-#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPTCH_MAX_INDEX 383
-#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT)
-#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPTCL_MAX_INDEX 383
-#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT)
-#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GORCH_MAX_INDEX 383
-#define I40E_GLV_GORCH_GORCH_SHIFT 0
-#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT)
-#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GORCL_MAX_INDEX 383
-#define I40E_GLV_GORCL_GORCL_SHIFT 0
-#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT)
-#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GOTCH_MAX_INDEX 383
-#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT)
-#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GOTCL_MAX_INDEX 383
-#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT)
-#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPRCH_MAX_INDEX 383
-#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT)
-#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPRCL_MAX_INDEX 383
-#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT)
-#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPTCH_MAX_INDEX 383
-#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT)
-#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPTCL_MAX_INDEX 383
-#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT)
-#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_RDPC_MAX_INDEX 383
-#define I40E_GLV_RDPC_RDPC_SHIFT 0
-#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT)
-#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_RUPP_MAX_INDEX 383
-#define I40E_GLV_RUPP_RUPP_SHIFT 0
-#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_TEPC_MAX_INDEX 383
-#define I40E_GLV_TEPC_TEPC_SHIFT 0
-#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
-#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPRCH_MAX_INDEX 383
-#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT)
-#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPRCL_MAX_INDEX 383
-#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT)
-#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPTCH_MAX_INDEX 383
-#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
-#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
-#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPTCL_MAX_INDEX 383
-#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT)
-#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
-#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
-#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
-#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
-#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
-#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
-#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
-#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
-#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
-#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
-#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
-#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
-#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
-#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
-#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
-#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
-#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
-#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
-#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
-#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
-#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
-#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
-#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
-#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
-#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
-#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
-#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
-#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
-#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
-#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
-#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
-#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
-#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */
-#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
-#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
-#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */
-#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35
-#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
-#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1
-#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
-#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
-#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
-#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
-#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
-#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT)
-#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
-#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
-#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
-#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
-#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
-#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
-#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
-#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
-#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
-#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
-#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
-#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
-#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
-#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
-#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
-#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
-#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
-#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
-#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
-#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
-#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
-#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
-#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
-#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
-#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
-#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
-#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
-#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
-#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
-#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
-#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
-#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
-#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
-#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
-#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
-#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
-#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
-#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
-#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */
-#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
-#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
-#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
-#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
-#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
-#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
-#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
-#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
-#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
-#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
-#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
-#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
-#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
-#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
-#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
-#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
-#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
-#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
-#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
-#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
-#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
-#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
-#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
-#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
-#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
-#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
-#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
-#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
-#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
-#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
-#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
-#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
-#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
-#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
-#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
-#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
-#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
-#define I40E_GL_MDET_RX_EVENT_SHIFT 8
-#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT)
-#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
-#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT)
-#define I40E_GL_MDET_RX_VALID_SHIFT 31
-#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT)
-#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */
-#define I40E_GL_MDET_TX_QUEUE_SHIFT 0
-#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT)
-#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12
-#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT)
-#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21
-#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT)
-#define I40E_GL_MDET_TX_EVENT_SHIFT 25
-#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT)
-#define I40E_GL_MDET_TX_VALID_SHIFT 31
-#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT)
-#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */
-#define I40E_PF_MDET_RX_VALID_SHIFT 0
-#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT)
-#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */
-#define I40E_PF_MDET_TX_VALID_SHIFT 0
-#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT)
-#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */
-#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
-#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
-#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
-#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
-#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
-#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
-#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VP_MDET_RX_MAX_INDEX 127
-#define I40E_VP_MDET_RX_VALID_SHIFT 0
-#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
-#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VP_MDET_TX_MAX_INDEX 127
-#define I40E_VP_MDET_TX_VALID_SHIFT 0
-#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
-#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */
-#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
-#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT)
-#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
-#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
-#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
-#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT)
-#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
-#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT)
-#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
-#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
-#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */
-#define I40E_PFPM_APM_APME_SHIFT 0
-#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
-#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
-#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */
-#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
-#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT)
-#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
-#define I40E_PFPM_WUFC_LNKC_SHIFT 0
-#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT)
-#define I40E_PFPM_WUFC_MAG_SHIFT 1
-#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
-#define I40E_PFPM_WUFC_MNG_SHIFT 3
-#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT)
-#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
-#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
-#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
-#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
-#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
-#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
-#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
-#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
-#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX0_SHIFT 16
-#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT)
-#define I40E_PFPM_WUFC_FLX1_SHIFT 17
-#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT)
-#define I40E_PFPM_WUFC_FLX2_SHIFT 18
-#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT)
-#define I40E_PFPM_WUFC_FLX3_SHIFT 19
-#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT)
-#define I40E_PFPM_WUFC_FLX4_SHIFT 20
-#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT)
-#define I40E_PFPM_WUFC_FLX5_SHIFT 21
-#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT)
-#define I40E_PFPM_WUFC_FLX6_SHIFT 22
-#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT)
-#define I40E_PFPM_WUFC_FLX7_SHIFT 23
-#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT)
-#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
-#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */
-#define I40E_PFPM_WUS_LNKC_SHIFT 0
-#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT)
-#define I40E_PFPM_WUS_MAG_SHIFT 1
-#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT)
-#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
-#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT)
-#define I40E_PFPM_WUS_MNG_SHIFT 3
-#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT)
-#define I40E_PFPM_WUS_FLX0_SHIFT 16
-#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT)
-#define I40E_PFPM_WUS_FLX1_SHIFT 17
-#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT)
-#define I40E_PFPM_WUS_FLX2_SHIFT 18
-#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT)
-#define I40E_PFPM_WUS_FLX3_SHIFT 19
-#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT)
-#define I40E_PFPM_WUS_FLX4_SHIFT 20
-#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT)
-#define I40E_PFPM_WUS_FLX5_SHIFT 21
-#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT)
-#define I40E_PFPM_WUS_FLX6_SHIFT 22
-#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT)
-#define I40E_PFPM_WUS_FLX7_SHIFT 23
-#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT)
-#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT)
-#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */
-#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
-#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT)
-#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
-#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
-#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
-#define I40E_PRTPM_SAH_MAX_INDEX 3
-#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
-#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
-#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
-#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
-#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
-#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
-#define I40E_PRTPM_SAH_AV_SHIFT 31
-#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT)
-#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
-#define I40E_PRTPM_SAL_MAX_INDEX 3
-#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
-#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
@@ -3366,4 +273,64 @@
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#endif
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 395f32f226c0..47e9a90d6b10 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -140,65 +140,6 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
return le32_to_cpu(*(volatile __le32 *)head);
}
-/**
- * i40e_get_tx_pending - how many tx descriptors not processed
- * @tx_ring: the ring of descriptors
- *
- * Since there is no access to the ring head register
- * in XL710, we need to use our local copies
- **/
-static u32 i40e_get_tx_pending(struct i40e_ring *ring)
-{
- u32 head, tail;
-
- head = i40e_get_head(ring);
- tail = readl(ring->tail);
-
- if (head != tail)
- return (head < tail) ?
- tail - head : (tail + ring->count - head);
-
- return 0;
-}
-
-/**
- * i40e_check_tx_hang - Is there a hang in the Tx queue
- * @tx_ring: the ring of descriptors
- **/
-static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
-{
- u32 tx_done = tx_ring->stats.packets;
- u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
- u32 tx_pending = i40e_get_tx_pending(tx_ring);
- bool ret = false;
-
- clear_check_for_tx_hang(tx_ring);
-
- /* Check for a hung queue, but be thorough. This verifies
- * that a transmit has been completed since the previous
- * check AND there is at least one packet pending. The
- * ARMED bit is set to indicate a potential hang. The
- * bit is cleared if a pause frame is received to remove
- * false hang detection due to PFC or 802.3x frames. By
- * requiring this to fail twice we avoid races with
- * PFC clearing the ARMED bit and conditions where we
- * run the check_tx_hang logic with a transmit completion
- * pending but without time to complete it yet.
- */
- if ((tx_done_old == tx_done) && tx_pending) {
- /* make sure it is true for two checks in a row */
- ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
- &tx_ring->state);
- } else if (tx_done_old == tx_done &&
- (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
- /* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_done;
- clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
- }
-
- return ret;
-}
-
#define WB_STRIDE 0x3
/**
@@ -304,36 +245,15 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
+ /* check to see if there are any non-cache aligned descriptors
+ * waiting to be written back, and kick the hardware to force
+ * them to be written back in case of napi polling
+ */
if (budget &&
!((i & WB_STRIDE) == WB_STRIDE) &&
!test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
- else
- tx_ring->arm_wb = false;
-
- if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
- /* schedule immediate reset if we believe we hung */
- dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
- " VSI <%d>\n"
- " Tx Queue <%d>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n",
- tx_ring->vsi->seid,
- tx_ring->queue_index,
- tx_ring->next_to_use, i);
-
- netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
-
- dev_info(tx_ring->dev,
- "tx hang detected on queue %d, resetting adapter\n",
- tx_ring->queue_index);
-
- tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
-
- /* the adapter is about to reset, no point in enabling stuff */
- return true;
- }
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
@@ -355,32 +275,51 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
}
}
- return budget > 0;
+ return !!budget;
}
/**
- * i40e_force_wb -Arm hardware to do a wb on noncache aligned descriptors
+ * i40evf_force_wb -Arm hardware to do a wb on noncache aligned descriptors
* @vsi: the VSI we care about
* @q_vector: the vector on which to force writeback
*
**/
-static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
- u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
- I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
- /* allow 00 to be written to the index */
-
- wr32(&vsi->back->hw,
- I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
- val);
+ u16 flags = q_vector->tx.ring[0].flags;
+
+ if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+ u32 val;
+
+ if (q_vector->arm_wb_state)
+ return;
+
+ val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK;
+
+ wr32(&vsi->back->hw,
+ I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
+ vsi->base_vector - 1),
+ val);
+ q_vector->arm_wb_state = true;
+ } else {
+ u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
+ I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
+ I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK;
+ /* allow 00 to be written to the index */
+
+ wr32(&vsi->back->hw,
+ I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
+ vsi->base_vector - 1), val);
+ }
}
/**
* i40e_set_new_dynamic_itr - Find new ITR level
* @rc: structure containing ring performance data
*
+ * Returns true if ITR changed, false if not
+ *
* Stores a new ITR value based on packets and byte counts during
* the last interrupt. The advantage of per interrupt computation
* is faster updates and more accurate ITR for the current traffic
@@ -389,22 +328,33 @@ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
* testing data as well as attempting to minimize response time
* while increasing bulk throughput.
**/
-static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
{
enum i40e_latency_range new_latency_range = rc->latency_range;
+ struct i40e_q_vector *qv = rc->ring->q_vector;
u32 new_itr = rc->itr;
int bytes_per_int;
+ int usecs;
if (rc->total_packets == 0 || !rc->itr)
- return;
+ return false;
/* simple throttlerate management
- * 0-10MB/s lowest (100000 ints/s)
+ * 0-10MB/s lowest (50000 ints/s)
* 10-20MB/s low (20000 ints/s)
- * 20-1249MB/s bulk (8000 ints/s)
+ * 20-1249MB/s bulk (18000 ints/s)
+ * > 40000 Rx packets per second (8000 ints/s)
+ *
+ * The math works out because the divisor is in 10^(-6) which
+ * turns the bytes/us input value into MB/s values, but
+ * make sure to use usecs, as the register values written
+ * are in 2 usec increments in the ITR registers, and make sure
+ * to use the smoothed values that the countdown timer gives us.
*/
- bytes_per_int = rc->total_bytes / rc->itr;
- switch (rc->itr) {
+ usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
+ bytes_per_int = rc->total_bytes / usecs;
+
+ switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10)
new_latency_range = I40E_LOW_LATENCY;
@@ -416,61 +366,55 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
new_latency_range = I40E_LOWEST_LATENCY;
break;
case I40E_BULK_LATENCY:
+ case I40E_ULTRA_LATENCY:
+ default:
if (bytes_per_int <= 20)
- rc->latency_range = I40E_LOW_LATENCY;
+ new_latency_range = I40E_LOW_LATENCY;
break;
}
+ /* this is to adjust RX more aggressively when streaming small
+ * packets. The value of 40000 was picked as it is just beyond
+ * what the hardware can receive per second if in low latency
+ * mode.
+ */
+#define RX_ULTRA_PACKET_RATE 40000
+
+ if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
+ (&qv->rx == rc))
+ new_latency_range = I40E_ULTRA_LATENCY;
+
+ rc->latency_range = new_latency_range;
+
switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
- new_itr = I40E_ITR_100K;
+ new_itr = I40E_ITR_50K;
break;
case I40E_LOW_LATENCY:
new_itr = I40E_ITR_20K;
break;
case I40E_BULK_LATENCY:
+ new_itr = I40E_ITR_18K;
+ break;
+ case I40E_ULTRA_LATENCY:
new_itr = I40E_ITR_8K;
break;
default:
break;
}
- if (new_itr != rc->itr) {
- /* do an exponential smoothing */
- new_itr = (10 * new_itr * rc->itr) /
- ((9 * new_itr) + rc->itr);
- rc->itr = new_itr & I40E_MAX_ITR;
- }
-
rc->total_bytes = 0;
rc->total_packets = 0;
-}
-/**
- * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
- * @q_vector: the vector to adjust
- **/
-static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
-{
- u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
- struct i40e_hw *hw = &q_vector->vsi->back->hw;
- u32 reg_addr;
- u16 old_itr;
-
- reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1);
- old_itr = q_vector->rx.itr;
- i40e_set_new_dynamic_itr(&q_vector->rx);
- if (old_itr != q_vector->rx.itr)
- wr32(hw, reg_addr, q_vector->rx.itr);
-
- reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1);
- old_itr = q_vector->tx.itr;
- i40e_set_new_dynamic_itr(&q_vector->tx);
- if (old_itr != q_vector->tx.itr)
- wr32(hw, reg_addr, q_vector->tx.itr);
+ if (new_itr != rc->itr) {
+ rc->itr = new_itr;
+ return true;
+ }
+
+ return false;
}
-/**
+/*
* i40evf_setup_tx_descriptors - Allocate the Tx descriptors
* @tx_ring: the tx ring to set up
*
@@ -828,16 +772,11 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
struct sk_buff *skb, u16 vlan_tag)
{
struct i40e_q_vector *q_vector = rx_ring->q_vector;
- struct i40e_vsi *vsi = rx_ring->vsi;
- u64 flags = vsi->back->flags;
if (vlan_tag & VLAN_VID_MASK)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- if (flags & I40E_FLAG_IN_NETPOLL)
- netif_rx(skb);
- else
- napi_gro_receive(&q_vector->napi, skb);
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
@@ -873,7 +812,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
return;
/* did the hardware decode the packet and checksum? */
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
/* both known and outer_ip must be set for the below code to work */
@@ -888,25 +827,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
ipv6 = true;
if (ipv4 &&
- (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+ (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
goto checksum_fail;
/* likely incorrect csum if alternate IP extension headers found */
if (ipv6 &&
- rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
/* don't increment checksum err here, non-fatal err */
return;
/* there was some L4 error, count error and punt packet to the stack */
- if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+ if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
goto checksum_fail;
/* handle packets that were not able to be checksummed due
* to arrival speed, in this case the stack can compute
* the csum.
*/
- if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+ if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
/* If VXLAN traffic has an outer UDPv4 checksum we need to check
@@ -1003,7 +942,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- const int current_node = numa_node_id();
+ const int current_node = numa_mem_id();
struct i40e_vsi *vsi = rx_ring->vsi;
u16 i = rx_ring->next_to_clean;
union i40e_rx_desc *rx_desc;
@@ -1027,7 +966,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
@@ -1063,8 +1002,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
I40E_RXD_QW1_ERROR_SHIFT;
- rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
- rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1073,6 +1012,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
cleaned_count++;
if (rx_hbo || rx_sph) {
int len;
+
if (rx_hbo)
len = I40E_RX_HDR_SIZE;
else
@@ -1116,7 +1056,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
I40E_RX_INCREMENT(rx_ring, i);
if (unlikely(
- !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
struct i40e_rx_buffer *next_buffer;
next_buffer = &rx_ring->rx_bi[i];
@@ -1126,7 +1066,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
}
/* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
continue;
}
@@ -1141,7 +1081,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
- vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
#ifdef I40E_FCOE
@@ -1202,7 +1142,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT;
- if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
@@ -1220,7 +1160,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
I40E_RXD_QW1_ERROR_SHIFT;
- rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1238,17 +1178,14 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
I40E_RX_INCREMENT(rx_ring, i);
if (unlikely(
- !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
rx_ring->rx_stats.non_eop_descs++;
continue;
}
/* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
- /* TODO: shouldn't we increment a counter indicating the
- * drop?
- */
continue;
}
@@ -1262,7 +1199,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
- vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
i40e_receive_skb(rx_ring, skb, vlan_tag);
@@ -1280,6 +1217,94 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
return total_rx_packets;
}
+static u32 i40e_buildreg_itr(const int type, const u16 itr)
+{
+ u32 val;
+
+ val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+ (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+ (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
+
+ return val;
+}
+
+/* a small macro to shorten up some long lines */
+#define INTREG I40E_VFINT_DYN_CTLN1
+
+/**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+ struct i40e_q_vector *q_vector)
+{
+ struct i40e_hw *hw = &vsi->back->hw;
+ bool rx = false, tx = false;
+ u32 rxval, txval;
+ int vector;
+
+ vector = (q_vector->v_idx + vsi->base_vector);
+
+ /* avoid dynamic calculation if in countdown mode OR if
+ * all dynamic is disabled
+ */
+ rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+
+ if (q_vector->itr_countdown > 0 ||
+ (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
+ !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+ goto enable_int;
+ }
+
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+ rx = i40e_set_new_dynamic_itr(&q_vector->rx);
+ rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
+ }
+ if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+ tx = i40e_set_new_dynamic_itr(&q_vector->tx);
+ txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
+ }
+ if (rx || tx) {
+ /* get the higher of the two ITR adjustments and
+ * use the same value for both ITR registers
+ * when in adaptive mode (Rx and/or Tx)
+ */
+ u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
+
+ q_vector->tx.itr = q_vector->rx.itr = itr;
+ txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
+ tx = true;
+ rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
+ rx = true;
+ }
+
+ /* only need to enable the interrupt once, but need
+ * to possibly update both ITR values
+ */
+ if (rx) {
+ /* set the INTENA_MSK_MASK so that this first write
+ * won't actually enable the interrupt, instead just
+ * updating the ITR (it's bit 31 PF and VF)
+ */
+ rxval |= BIT(31);
+ /* don't check _DOWN because interrupt isn't being enabled */
+ wr32(hw, INTREG(vector - 1), rxval);
+ }
+
+enable_int:
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ wr32(hw, INTREG(vector - 1), txval);
+
+ if (q_vector->itr_countdown)
+ q_vector->itr_countdown--;
+ else
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+
+}
+
/**
* i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
* @napi: napi struct with our devices info in it
@@ -1298,7 +1323,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
bool clean_complete = true;
bool arm_wb = false;
int budget_per_ring;
- int cleaned;
+ int work_done = 0;
if (test_bit(__I40E_DOWN, &vsi->state)) {
napi_complete(napi);
@@ -1311,38 +1336,45 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
i40e_for_each_ring(ring, q_vector->tx) {
clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
arm_wb |= ring->arm_wb;
+ ring->arm_wb = false;
}
+ /* Handle case where we are called by netpoll with a budget of 0 */
+ if (budget <= 0)
+ goto tx_only;
+
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
*/
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
i40e_for_each_ring(ring, q_vector->rx) {
+ int cleaned;
+
if (ring_is_ps_enabled(ring))
cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
else
cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+
+ work_done += cleaned;
/* if we didn't clean as many as budgeted, we must be done */
clean_complete &= (budget_per_ring != cleaned);
}
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
+tx_only:
if (arm_wb)
- i40e_force_wb(vsi, q_vector);
+ i40evf_force_wb(vsi, q_vector);
return budget;
}
- /* Work is done so exit the polling mode and re-enable the interrupt */
- napi_complete(napi);
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
- ITR_IS_DYNAMIC(vsi->tx_itr_setting))
- i40e_update_dynamic_itr(q_vector);
-
- if (!test_bit(__I40E_DOWN, &vsi->state))
- i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx);
+ if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+ q_vector->arm_wb_state = false;
+ /* Work is done so exit the polling mode and re-enable the interrupt */
+ napi_complete_done(napi, work_done);
+ i40e_update_enable_itr(vsi, q_vector);
return 0;
}
@@ -1385,6 +1417,7 @@ static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
/* else if it is a SW VLAN, check the next protocol and store the tag */
} else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr;
+
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
if (!vhdr)
return -EINVAL;
@@ -1476,11 +1509,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
struct iphdr *this_ip_hdr;
u32 network_hdr_len;
u8 l4_hdr = 0;
+ struct udphdr *oudph;
+ struct iphdr *oiph;
u32 l4_tunnel = 0;
if (skb->encapsulation) {
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
+ oudph = udp_hdr(skb);
+ oiph = ip_hdr(skb);
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
@@ -1519,6 +1556,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
}
+ if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
+ (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
+ (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
+ oudph->check = ~csum_tcpudp_magic(oiph->saddr,
+ oiph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, 0);
+ *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+ }
} else {
network_hdr_len = skb_network_header_len(skb);
this_ip_hdr = ip_hdr(skb);
@@ -1841,6 +1887,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index)))
writel(i, tx_ring->tail);
+ else
+ prefetchw(tx_desc + 1);
return;
@@ -1912,6 +1960,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
u32 td_cmd = 0;
u8 hdr_len = 0;
int tso;
+
if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY;
@@ -1939,10 +1988,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO;
- if (i40e_chk_linearize(skb, tx_flags))
+ if (i40e_chk_linearize(skb, tx_flags)) {
if (skb_linearize(skb))
goto out_drop;
-
+ tx_ring->tx_stats.tx_linearize++;
+ }
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index e7a34f899f2c..ebc1bf77f036 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -32,11 +32,14 @@
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
#define I40E_ITR_100K 0x0005
+#define I40E_ITR_50K 0x000A
#define I40E_ITR_20K 0x0019
+#define I40E_ITR_18K 0x001B
#define I40E_ITR_8K 0x003E
#define I40E_ITR_4K 0x007A
-#define I40E_ITR_RX_DEF I40E_ITR_8K
-#define I40E_ITR_TX_DEF I40E_ITR_4K
+#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
+#define I40E_ITR_RX_DEF I40E_ITR_20K
+#define I40E_ITR_TX_DEF I40E_ITR_20K
#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
@@ -44,6 +47,15 @@
#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
+ * the value of the rate limit is non-zero
+ */
+#define INTRL_ENA BIT(6)
+#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
+#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
+#define I40E_INTRL_8K 125 /* 8000 ints/sec */
+#define I40E_INTRL_62K 16 /* 62500 ints/sec */
+#define I40E_INTRL_83K 12 /* 83333 ints/sec */
#define I40E_QUEUE_END_OF_LIST 0x7FF
@@ -66,17 +78,29 @@ enum i40e_dyn_idx_t {
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
- ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hena(pf) \
+ (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
+ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
/* Supported Rx Buffer Sizes */
#define I40E_RXBUFFER_512 512 /* Used for packet split */
@@ -129,16 +153,16 @@ enum i40e_dyn_idx_t {
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
-#define I40E_TX_FLAGS_CSUM (u32)(1)
-#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
-#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
-#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
-#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
-#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
-#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
-#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
-#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
+#define I40E_TX_FLAGS_CSUM BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN BIT(2)
+#define I40E_TX_FLAGS_TSO BIT(3)
+#define I40E_TX_FLAGS_IPV4 BIT(4)
+#define I40E_TX_FLAGS_IPV6 BIT(5)
+#define I40E_TX_FLAGS_FCCRC BIT(6)
+#define I40E_TX_FLAGS_FSO BIT(7)
+#define I40E_TX_FLAGS_FD_SB BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -152,6 +176,7 @@ struct i40e_tx_buffer {
};
unsigned int bytecount;
unsigned short gso_segs;
+
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
@@ -175,6 +200,7 @@ struct i40e_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
u64 tx_done_old;
+ u64 tx_linearize;
};
struct i40e_rx_queue_stats {
@@ -186,8 +212,6 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
- __I40E_TX_DETECT_HANG,
- __I40E_HANG_CHECK_ARMED,
__I40E_RX_PS_ENABLED,
__I40E_RX_16BYTE_DESC_ENABLED,
};
@@ -198,12 +222,6 @@ enum i40e_ring_state_t {
set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
#define clear_ring_ps_enabled(ring) \
clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define check_for_tx_hang(ring) \
- test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define set_check_for_tx_hang(ring) \
- set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define clear_check_for_tx_hang(ring) \
- clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define ring_is_16byte_desc_enabled(ring) \
test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
#define set_ring_16byte_desc_enabled(ring) \
@@ -250,6 +268,10 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
+ u16 flags;
+#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1)
+
/* stats structs */
struct i40e_queue_stats stats;
struct u64_stats_sync syncp;
@@ -271,6 +293,7 @@ enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1,
I40E_BULK_LATENCY = 2,
+ I40E_ULTRA_LATENCY = 3,
};
struct i40e_ring_container {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index c463ec41579c..301fe2b6dd03 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -33,24 +33,7 @@
#include "i40e_adminq.h"
#include "i40e_hmc.h"
#include "i40e_lan_hmc.h"
-
-/* Device IDs */
-#define I40E_DEV_ID_SFP_XL710 0x1572
-#define I40E_DEV_ID_QEMU 0x1574
-#define I40E_DEV_ID_KX_A 0x157F
-#define I40E_DEV_ID_KX_B 0x1580
-#define I40E_DEV_ID_KX_C 0x1581
-#define I40E_DEV_ID_QSFP_A 0x1583
-#define I40E_DEV_ID_QSFP_B 0x1584
-#define I40E_DEV_ID_QSFP_C 0x1585
-#define I40E_DEV_ID_10G_BASE_T 0x1586
-#define I40E_DEV_ID_20G_KR2 0x1587
-#define I40E_DEV_ID_VF 0x154C
-#define I40E_DEV_ID_VF_HV 0x1571
-
-#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
- (d) == I40E_DEV_ID_QSFP_B || \
- (d) == I40E_DEV_ID_QSFP_C)
+#include "i40e_devids.h"
/* I40E_MASK is a macro used on 32 bit registers */
#define I40E_MASK(mask, shift) (mask << shift)
@@ -120,6 +103,8 @@ enum i40e_mac_type {
I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
+ I40E_MAC_X722,
+ I40E_MAC_X722_VF,
I40E_MAC_GENERIC,
};
@@ -151,14 +136,14 @@ enum i40e_set_fc_aq_failures {
};
enum i40e_vsi_type {
- I40E_VSI_MAIN = 0,
- I40E_VSI_VMDQ1,
- I40E_VSI_VMDQ2,
- I40E_VSI_CTRL,
- I40E_VSI_FCOE,
- I40E_VSI_MIRROR,
- I40E_VSI_SRIOV,
- I40E_VSI_FDIR,
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1 = 1,
+ I40E_VSI_VMDQ2 = 2,
+ I40E_VSI_CTRL = 3,
+ I40E_VSI_FCOE = 4,
+ I40E_VSI_MIRROR = 5,
+ I40E_VSI_SRIOV = 6,
+ I40E_VSI_FDIR = 7,
I40E_VSI_TYPE_UNKNOWN
};
@@ -182,16 +167,65 @@ struct i40e_link_status {
bool crc_enable;
u8 pacing;
u8 requested_speeds;
+ u8 module_type[3];
+ /* 1st byte: module identifier */
+#define I40E_MODULE_TYPE_SFP 0x03
+#define I40E_MODULE_TYPE_QSFP 0x0D
+ /* 2nd byte: ethernet compliance codes for 10/40G */
+#define I40E_MODULE_TYPE_40G_ACTIVE 0x01
+#define I40E_MODULE_TYPE_40G_LR4 0x02
+#define I40E_MODULE_TYPE_40G_SR4 0x04
+#define I40E_MODULE_TYPE_40G_CR4 0x08
+#define I40E_MODULE_TYPE_10G_BASE_SR 0x10
+#define I40E_MODULE_TYPE_10G_BASE_LR 0x20
+#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40
+#define I40E_MODULE_TYPE_10G_BASE_ER 0x80
+ /* 3rd byte: ethernet compliance codes for 1G */
+#define I40E_MODULE_TYPE_1000BASE_SX 0x01
+#define I40E_MODULE_TYPE_1000BASE_LX 0x02
+#define I40E_MODULE_TYPE_1000BASE_CX 0x04
+#define I40E_MODULE_TYPE_1000BASE_T 0x08
+};
+
+enum i40e_aq_capabilities_phy_type {
+ I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
+ I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
+ I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
+ I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
+ I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
+ I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
+ I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
+ I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
+ I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
+ I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
+ I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
+ I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
+ I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
+ I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
+ I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
+ I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
+ I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
+ I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
+ I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
+ I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
+ I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
+ I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
+ I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
+ I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL =
+ BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
+ I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
};
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
- u32 autoneg_advertised;
- u32 phy_id;
- u32 module_type;
bool get_link_info;
enum i40e_media_type media_type;
+ /* all the phy types the NVM is capable of */
+ enum i40e_aq_capabilities_phy_type phy_types;
};
#define I40E_HW_CAP_MAX_GPIO 30
@@ -213,7 +247,17 @@ struct i40e_hw_capabilities {
bool dcb;
bool fcoe;
bool iscsi; /* Indicates iSCSI enabled */
- bool mfp_mode_1;
+ bool flex10_enable;
+ bool flex10_capable;
+ u32 flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN 0x0
+#define I40E_FLEX10_MODE_DCC 0x1
+#define I40E_FLEX10_MODE_DCI 0x2
+
+ u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
+#define I40E_FLEX10_STATUS_VC_MODE 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -269,6 +313,7 @@ struct i40e_nvm_info {
bool blank_nvm_mode; /* is NVM empty (no FW present)*/
u16 version; /* NVM package version */
u32 eetrack; /* NVM data version */
+ u32 oem_ver; /* OEM version info */
};
/* definitions used in NVM update support */
@@ -287,12 +332,17 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_CSUM_CON,
I40E_NVMUPD_CSUM_SA,
I40E_NVMUPD_CSUM_LCB,
+ I40E_NVMUPD_STATUS,
+ I40E_NVMUPD_EXEC_AQ,
+ I40E_NVMUPD_GET_AQ_RESULT,
};
enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_INIT,
I40E_NVMUPD_STATE_READING,
- I40E_NVMUPD_STATE_WRITING
+ I40E_NVMUPD_STATE_WRITING,
+ I40E_NVMUPD_STATE_INIT_WAIT,
+ I40E_NVMUPD_STATE_WRITE_WAIT,
};
/* nvm_access definition and its masks/shifts need to be accessible to
@@ -311,6 +361,7 @@ enum i40e_nvmupd_state {
#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
#define I40E_NVM_ERA 0x4
#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT)
@@ -417,6 +468,7 @@ struct i40e_ieee_app_priority_table {
struct i40e_dcbx_config {
u32 numapps;
+ u32 tlv_status; /* CEE mode TLV status */
struct i40e_ieee_ets_config etscfg;
struct i40e_ieee_ets_recommend etsrec;
struct i40e_ieee_pfc_config pfc;
@@ -468,6 +520,8 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
+ struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_virt_mem nvm_buff;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@@ -476,16 +530,19 @@ struct i40e_hw {
u16 dcbx_status;
/* DCBX info */
- struct i40e_dcbx_config local_dcbx_config;
- struct i40e_dcbx_config remote_dcbx_config;
+ struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+ struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
+ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
/* debug mask */
u32 debug_mask;
+ char err_str[16];
};
static inline bool i40e_is_vf(struct i40e_hw *hw)
{
- return hw->mac.type == I40E_MAC_VF;
+ return (hw->mac.type == I40E_MAC_VF ||
+ hw->mac.type == I40E_MAC_X722_VF);
}
struct i40e_driver_version {
@@ -582,19 +639,23 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
- I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
+ /* Note: Bit 8 is reserved in X710 and XL710 */
+ I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ /* Note: For non-tunnel packets INT_UDP_0 is the right status for
+ * UDP header
+ */
+ I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
#define I40E_RXD_QW1_STATUS_SHIFT 0
-#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
<< I40E_RXD_QW1_STATUS_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -602,8 +663,8 @@ enum i40e_rx_desc_status_bits {
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
- I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+ BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
enum i40e_rx_desc_fltstat_values {
I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
@@ -737,8 +798,7 @@ enum i40e_rx_ptype_payload_layer {
I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
- I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
enum i40e_rx_desc_ext_status_bits {
/* Note: These are predefined bit offsets */
@@ -914,12 +974,12 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
-#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
- I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
+ BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
@@ -931,6 +991,8 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
struct i40e_filter_program_desc {
__le32 qindex_flex_ptype_vsi;
__le32 rsvd;
@@ -949,15 +1011,24 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Values 0-30 are reserved for future use */
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- /* Note: Value 32 is reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-40 are reserved for future use */
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
@@ -1003,8 +1074,7 @@ enum i40e_filter_program_desc_pcmd {
#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
- I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
I40E_TXD_FLTR_QW1_CMD_SHIFT)
@@ -1063,6 +1133,14 @@ struct i40e_eth_stats {
u64 tx_errors; /* tepc */
};
+/* Statistics collected per VEB per TC */
+struct i40e_veb_tc_stats {
+ u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
+ u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
+};
+
/* Statistics collected by the MAC */
struct i40e_hw_port_stats {
/* eth stats collected by the port */
@@ -1109,6 +1187,8 @@ struct i40e_hw_port_stats {
u64 fd_atr_match;
u64 fd_sb_match;
u64 fd_atr_tunnel_match;
+ u32 fd_atr_status;
+ u32 fd_sb_status;
/* EEE LPI */
u32 tx_lpi_status;
u32 rx_lpi_status;
@@ -1119,6 +1199,7 @@ struct i40e_hw_port_stats {
/* Checksum and Shadow RAM pointers */
#define I40E_SR_NVM_CONTROL_WORD 0x00
#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_NVM_OEM_VER_OFF 0x83
#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index 59f62f0e65dd..9f7b279b9d9c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -81,7 +81,6 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17,
- I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -110,7 +109,9 @@ struct i40e_virtchnl_msg {
* error regardless of version mismatch.
*/
#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 0
+#define I40E_VIRTCHNL_VERSION_MINOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
struct i40e_virtchnl_version_info {
u32 major;
u32 minor;
@@ -129,7 +130,8 @@ struct i40e_virtchnl_version_info {
*/
/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
* PF responds with an indirect message containing
* i40e_virtchnl_vf_resource and one or more
* i40e_virtchnl_vsi_resource structures.
@@ -143,9 +145,14 @@ struct i40e_virtchnl_vsi_resource {
u8 default_mac_addr[ETH_ALEN];
};
/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index fea3b75a9a35..22fc3d49c4b9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -48,10 +48,6 @@
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "i40evf: "
-#define DPRINTK(nlevel, klevel, fmt, args...) \
- ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
- printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
- __func__ , ## args)))
/* dummy struct to make common code less painful */
struct i40e_vsi {
@@ -70,6 +66,7 @@ struct i40e_vsi {
*/
u16 rx_itr_setting;
u16 tx_itr_setting;
+ u16 qs_handle;
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -90,7 +87,7 @@ struct i40e_vsi {
#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
#define I40EVF_MAX_AQ_BUF_SIZE 4096
#define I40EVF_AQ_LEN 32
-#define I40EVF_AQ_MAX_ERR 10 /* times to try before resetting AQ */
+#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
@@ -101,6 +98,8 @@ struct i40e_vsi {
#define MAX_RX_QUEUES 8
#define MAX_TX_QUEUES MAX_RX_QUEUES
+#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4)
+
/* MAX_MSIX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
*/
@@ -113,8 +112,11 @@ struct i40e_q_vector {
struct i40e_ring_container tx;
u32 ring_mask;
u8 num_ringpairs; /* total number of ring pairs in vector */
+#define ITR_COUNTDOWN_START 100
+ u8 itr_countdown; /* when 0 or 1 update ITR */
int v_idx; /* vector index in list */
char name[IFNAMSIZ + 9];
+ bool arm_wb_state;
cpumask_var_t affinity_mask;
};
@@ -207,33 +209,38 @@ struct i40evf_adapter {
struct msix_entry *msix_entries;
u32 flags;
-#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1)
-#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
-#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
-#define I40EVF_FLAG_RX_PS_ENABLED (u32)(1 << 3)
-#define I40EVF_FLAG_IN_NETPOLL (u32)(1 << 4)
-#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5)
-#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6)
-#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
-#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8)
-#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9)
-#define I40EVF_FLAG_RESET_NEEDED (u32)(1 << 10)
-/* duplcates for common code */
+#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
+#define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1)
+#define I40EVF_FLAG_RX_PS_CAPABLE BIT(2)
+#define I40EVF_FLAG_RX_PS_ENABLED BIT(3)
+#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
+#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
+#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
+#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8)
+#define I40EVF_FLAG_RESET_PENDING BIT(9)
+#define I40EVF_FLAG_RESET_NEEDED BIT(10)
+#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11)
+#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12)
+#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13)
+/* duplicates for common code */
#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0
-#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
+#define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE
+#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
/* flags for admin queue service task */
u32 aq_required;
-#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1)
-#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
-#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
-#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3)
-#define I40EVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4)
-#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5)
-#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
-#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
-#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
+#define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0)
+#define I40EVF_FLAG_AQ_DISABLE_QUEUES BIT(1)
+#define I40EVF_FLAG_AQ_ADD_MAC_FILTER BIT(2)
+#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3)
+#define I40EVF_FLAG_AQ_DEL_MAC_FILTER BIT(4)
+#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5)
+#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6)
+#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7)
+#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8)
+#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9)
+#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10)
/* OS defined structs */
struct net_device *netdev;
@@ -249,8 +256,17 @@ struct i40evf_adapter {
bool netdev_registered;
bool link_up;
enum i40e_virtchnl_ops current_op;
+#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \
+ I40E_VIRTCHNL_VF_OFFLOAD_IWARP)
+#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+ struct i40e_virtchnl_version_info pf_version;
+#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
+ ((_a)->pf_version.minor == 1))
u16 msg_enable;
struct i40e_eth_stats current_stats;
struct i40e_vsi vsi;
@@ -264,6 +280,7 @@ extern const char i40evf_driver_version[];
int i40evf_up(struct i40evf_adapter *adapter);
void i40evf_down(struct i40evf_adapter *adapter);
+int i40evf_process_config(struct i40evf_adapter *adapter);
void i40evf_reset(struct i40evf_adapter *adapter);
void i40evf_set_ethtool_ops(struct net_device *netdev);
void i40evf_update_stats(struct i40evf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 2b53c870e7f1..4790437a50ac 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -381,11 +381,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
switch (cmd->flow_type) {
case TCP_V4_FLOW:
- if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+ if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V4_FLOW:
- if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+ if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
@@ -397,11 +397,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
break;
case TCP_V6_FLOW:
- if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+ if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V6_FLOW:
- if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+ if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
@@ -479,10 +479,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
case TCP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break;
default:
return -EINVAL;
@@ -491,10 +491,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
case TCP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break;
default:
return -EINVAL;
@@ -503,12 +503,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
case UDP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
default:
return -EINVAL;
@@ -517,12 +517,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
case UDP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
default:
return -EINVAL;
@@ -535,7 +535,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
break;
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -544,15 +544,15 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
break;
case IPV4_FLOW:
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
case IPV6_FLOW:
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 4ab4ebba07a1..d962164dfb0f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -34,10 +34,10 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710/X710 Virtual Function Network Driver";
-#define DRV_VERSION "1.2.25"
+#define DRV_VERSION "1.3.33"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
- "Copyright (c) 2013 - 2014 Intel Corporation.";
+ "Copyright (c) 2013 - 2015 Intel Corporation.";
/* i40evf_pci_tbl - PCI Device ID Table
*
@@ -49,6 +49,7 @@ static const char i40evf_copyright[] =
*/
static const struct pci_device_id i40evf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
/* required last entry */
{0, }
};
@@ -203,7 +204,7 @@ static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
- wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
+ wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
/* read flush */
rd32(hw, I40E_VFGEN_RSTAT);
@@ -240,11 +241,11 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
int i;
for (i = 1; i < adapter->num_msix_vectors; i++) {
- if (mask & (1 << (i - 1))) {
+ if (mask & BIT(i - 1)) {
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
I40E_VFINT_DYN_CTLN1_INTENA_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK);
}
}
}
@@ -262,17 +263,17 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
if (mask & 1) {
dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
- dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
}
for (i = 1; i < adapter->num_msix_vectors; i++) {
- if (mask & (1 << i)) {
+ if (mask & BIT(i)) {
dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
- dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
- I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
}
}
@@ -281,6 +282,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
/**
* i40evf_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
+ * @flush: boolean value whether to run rd32()
**/
void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
{
@@ -304,15 +306,14 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw;
u32 val;
- u32 ena_mask;
/* handle non-queue interrupts */
- val = rd32(hw, I40E_VFINT_ICR01);
- ena_mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
+ rd32(hw, I40E_VFINT_ICR01);
+ rd32(hw, I40E_VFINT_ICR0_ENA1);
- val = rd32(hw, I40E_VFINT_DYN_CTL01);
- val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+ val = rd32(hw, I40E_VFINT_DYN_CTL01) |
+ I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTL01, val);
/* schedule work on the private workqueue */
@@ -333,7 +334,7 @@ static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
- napi_schedule(&q_vector->napi);
+ napi_schedule_irqoff(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -356,6 +357,7 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
q_vector->rx.ring = rx_ring;
q_vector->rx.count++;
q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
}
/**
@@ -376,8 +378,9 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
q_vector->num_ringpairs++;
- q_vector->ring_mask |= (1 << t_idx);
+ q_vector->ring_mask |= BIT(t_idx);
}
/**
@@ -406,7 +409,7 @@ static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
/* The ideal configuration...
* We have enough vectors to map one per queue.
*/
- if (q_vectors == (rxr_remaining * 2)) {
+ if (q_vectors >= (rxr_remaining * 2)) {
for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
@@ -443,6 +446,29 @@ out:
return err;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * i40evf_netpoll - A Polling 'interrupt' handler
+ * @netdev: network interface device structure
+ *
+ * This is used by netconsole to send skbs without having to re-enable
+ * interrupts. It's not called while the normal interrupt routine is executing.
+ **/
+static void i40evf_netpoll(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+ int i;
+
+ /* if interface is down do nothing */
+ if (test_bit(__I40E_DOWN, &adapter->vsi.state))
+ return;
+
+ for (i = 0; i < q_vectors; i++)
+ i40evf_msix_clean_rings(0, adapter->q_vector[i]);
+}
+
+#endif
/**
* i40evf_request_traffic_irqs - Initialize MSI-X interrupts
* @adapter: board private structure
@@ -488,8 +514,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
q_vector);
if (err) {
dev_info(&adapter->pdev->dev,
- "%s: request_irq failed, error: %d\n",
- __func__, err);
+ "Request_irq failed, error: %d\n", err);
goto free_queue_irqs;
}
/* assign the mask for this irq */
@@ -730,6 +755,8 @@ static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
+ if (!VLAN_ALLOWED(adapter))
+ return -EIO;
if (i40evf_add_vlan(adapter, vid) == NULL)
return -ENOMEM;
return 0;
@@ -745,8 +772,11 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- i40evf_del_vlan(adapter, vid);
- return 0;
+ if (VLAN_ALLOWED(adapter)) {
+ i40evf_del_vlan(adapter, vid);
+ return 0;
+ }
+ return -EIO;
}
/**
@@ -836,6 +866,15 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
return 0;
+ if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
+ return -EPERM;
+
+ f = i40evf_find_filter(adapter, hw->mac.addr);
+ if (f) {
+ f->remove = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ }
+
f = i40evf_add_filter(adapter, addr->sa_data);
if (f) {
ether_addr_copy(hw->mac.addr, addr->sa_data);
@@ -855,6 +894,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
struct i40evf_mac_filter *f, *ftmp;
struct netdev_hw_addr *uca;
struct netdev_hw_addr *mca;
+ struct netdev_hw_addr *ha;
int count = 50;
/* add addr if not already in the filter list */
@@ -876,27 +916,27 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
}
/* remove filter if not in netdev list */
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
- bool found = false;
-
- if (is_multicast_ether_addr(f->macaddr)) {
- netdev_for_each_mc_addr(mca, netdev) {
- if (ether_addr_equal(mca->addr, f->macaddr)) {
- found = true;
- break;
- }
- }
- } else {
- netdev_for_each_uc_addr(uca, netdev) {
- if (ether_addr_equal(uca->addr, f->macaddr)) {
- found = true;
- break;
- }
- }
- }
- if (found) {
- f->remove = true;
- adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
- }
+ netdev_for_each_mc_addr(mca, netdev)
+ if (ether_addr_equal(mca->addr, f->macaddr))
+ goto bottom_of_search_loop;
+
+ netdev_for_each_uc_addr(uca, netdev)
+ if (ether_addr_equal(uca->addr, f->macaddr))
+ goto bottom_of_search_loop;
+
+ for_each_dev_addr(netdev, ha)
+ if (ether_addr_equal(ha->addr, f->macaddr))
+ goto bottom_of_search_loop;
+
+ if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
+ goto bottom_of_search_loop;
+
+ /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
+ f->remove = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+
+bottom_of_search_loop:
+ continue;
}
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
}
@@ -1108,6 +1148,8 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
tx_ring->netdev = adapter->netdev;
tx_ring->dev = &adapter->pdev->dev;
tx_ring->count = adapter->tx_desc_count;
+ if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
+ tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
adapter->tx_rings[i] = tx_ring;
rx_ring = &tx_ring[1];
@@ -1162,7 +1204,7 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
for (vector = 0; vector < v_budget; vector++)
adapter->msix_entries[vector].entry = vector;
- i40evf_acquire_msix_vectors(adapter, v_budget);
+ err = i40evf_acquire_msix_vectors(adapter, v_budget);
out:
adapter->netdev->real_num_tx_queues = pairs;
@@ -1170,6 +1212,113 @@ out:
}
/**
+ * i40e_configure_rss_aq - Prepare for RSS using AQ commands
+ * @vsi: vsi structure
+ * @seed: RSS hash seed
+ **/
+static void i40evf_configure_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
+{
+ struct i40e_aqc_get_set_rss_key_data rss_key;
+ struct i40evf_adapter *adapter = vsi->back;
+ struct i40e_hw *hw = &adapter->hw;
+ int ret = 0, i;
+ u8 *rss_lut;
+
+ if (!vsi->id)
+ return;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot confiure RSS, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ memset(&rss_key, 0, sizeof(rss_key));
+ memcpy(&rss_key, seed, sizeof(rss_key));
+
+ rss_lut = kzalloc(((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4), GFP_KERNEL);
+ if (!rss_lut)
+ return;
+
+ /* Populate the LUT with max no. PF queues in round robin fashion */
+ for (i = 0; i <= (I40E_VFQF_HLUT_MAX_INDEX * 4); i++)
+ rss_lut[i] = i % adapter->num_active_queues;
+
+ ret = i40evf_aq_set_rss_key(hw, vsi->id, &rss_key);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot set RSS key, err %s aq_err %s\n",
+ i40evf_stat_str(hw, ret),
+ i40evf_aq_str(hw, hw->aq.asq_last_status));
+ return;
+ }
+
+ ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, rss_lut,
+ (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4);
+ if (ret)
+ dev_err(&adapter->pdev->dev,
+ "Cannot set RSS lut, err %s aq_err %s\n",
+ i40evf_stat_str(hw, ret),
+ i40evf_aq_str(hw, hw->aq.asq_last_status));
+}
+
+/**
+ * i40e_configure_rss_reg - Prepare for RSS if used
+ * @adapter: board private structure
+ * @seed: RSS hash seed
+ **/
+static void i40evf_configure_rss_reg(struct i40evf_adapter *adapter,
+ const u8 *seed)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ u32 *seed_dw = (u32 *)seed;
+ u32 cqueue = 0;
+ u32 lut = 0;
+ int i, j;
+
+ /* Fill out hash function seed */
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]);
+
+ /* Populate the LUT with max no. PF queues in round robin fashion */
+ for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
+ lut = 0;
+ for (j = 0; j < 4; j++) {
+ if (cqueue == adapter->num_active_queues)
+ cqueue = 0;
+ lut |= ((cqueue) << (8 * j));
+ cqueue++;
+ }
+ wr32(hw, I40E_VFQF_HLUT(i), lut);
+ }
+ i40e_flush(hw);
+}
+
+/**
+ * i40evf_configure_rss - Prepare for RSS
+ * @adapter: board private structure
+ **/
+static void i40evf_configure_rss(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ u8 seed[I40EVF_HKEY_ARRAY_SIZE];
+ u64 hena;
+
+ netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE);
+
+ /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+ hena = I40E_DEFAULT_RSS_HENA;
+ wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+
+ if (RSS_AQ(adapter))
+ i40evf_configure_rss_aq(&adapter->vsi, seed);
+ else
+ i40evf_configure_rss_reg(adapter, seed);
+}
+
+/**
* i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
* @adapter: board private structure to initialize
*
@@ -1311,16 +1460,16 @@ static void i40evf_watchdog_task(struct work_struct *work)
struct i40evf_adapter,
watchdog_task);
struct i40e_hw *hw = &adapter->hw;
- uint32_t rstat_val;
+ u32 reg_val;
if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
goto restart_watchdog;
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
- rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
- I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if ((rstat_val == I40E_VFR_VFACTIVE) ||
- (rstat_val == I40E_VFR_COMPLETED)) {
+ reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ if ((reg_val == I40E_VFR_VFACTIVE) ||
+ (reg_val == I40E_VFR_COMPLETED)) {
/* A chance for redemption! */
dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
adapter->state = __I40EVF_STARTUP;
@@ -1345,11 +1494,8 @@ static void i40evf_watchdog_task(struct work_struct *work)
goto watchdog_done;
/* check for reset */
- rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
- I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
- (rstat_val != I40E_VFR_VFACTIVE) &&
- (rstat_val != I40E_VFR_COMPLETED)) {
+ reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK;
+ if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) {
adapter->state = __I40EVF_RESETTING;
adapter->flags |= I40EVF_FLAG_RESET_PENDING;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
@@ -1369,6 +1515,10 @@ static void i40evf_watchdog_task(struct work_struct *work)
}
goto watchdog_done;
}
+ if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
+ i40evf_send_vf_config_msg(adapter);
+ goto watchdog_done;
+ }
if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
i40evf_disable_queues(adapter);
@@ -1410,6 +1560,16 @@ static void i40evf_watchdog_task(struct work_struct *work)
goto watchdog_done;
}
+ if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) {
+ /* This message goes straight to the firmware, not the
+ * PF, so we don't have to set current_op as we will
+ * not get a response through the ARQ.
+ */
+ i40evf_configure_rss(adapter);
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
+ goto watchdog_done;
+ }
+
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
watchdog_done:
@@ -1432,45 +1592,6 @@ restart_watchdog:
schedule_work(&adapter->adminq_task);
}
-/**
- * i40evf_configure_rss - Prepare for RSS
- * @adapter: board private structure
- **/
-static void i40evf_configure_rss(struct i40evf_adapter *adapter)
-{
- u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1];
- struct i40e_hw *hw = &adapter->hw;
- u32 cqueue = 0;
- u32 lut = 0;
- int i, j;
- u64 hena;
-
- /* Hash type is configured by the PF - we just supply the key */
- netdev_rss_key_fill(rss_key, sizeof(rss_key));
-
- /* Fill out hash function seed */
- for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]);
-
- /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
- hena = I40E_DEFAULT_RSS_HENA;
- wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
- wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
-
- /* Populate the LUT with max no. of queues in round robin fashion */
- for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
- lut = 0;
- for (j = 0; j < 4; j++) {
- if (cqueue == adapter->num_active_queues)
- cqueue = 0;
- lut |= ((cqueue) << (8 * j));
- cqueue++;
- }
- wr32(hw, I40E_VFQF_HLUT(i), lut);
- }
- i40e_flush(hw);
-}
-
#define I40EVF_RESET_WAIT_MS 10
#define I40EVF_RESET_WAIT_COUNT 500
/**
@@ -1488,8 +1609,9 @@ static void i40evf_reset_task(struct work_struct *work)
reset_task);
struct net_device *netdev = adapter->netdev;
struct i40e_hw *hw = &adapter->hw;
+ struct i40evf_vlan_filter *vlf;
struct i40evf_mac_filter *f;
- uint32_t rstat_val;
+ u32 reg_val;
int i = 0, err;
while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
@@ -1510,12 +1632,11 @@ static void i40evf_reset_task(struct work_struct *work)
/* poll until we see the reset actually happen */
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
- rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
- I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if ((rstat_val != I40E_VFR_VFACTIVE) &&
- (rstat_val != I40E_VFR_COMPLETED))
+ reg_val = rd32(hw, I40E_VF_ARQLEN1) &
+ I40E_VF_ARQLEN1_ARQENABLE_MASK;
+ if (!reg_val)
break;
- usleep_range(500, 1000);
+ usleep_range(5000, 10000);
}
if (i == I40EVF_RESET_WAIT_COUNT) {
dev_info(&adapter->pdev->dev, "Never saw reset\n");
@@ -1524,21 +1645,21 @@ static void i40evf_reset_task(struct work_struct *work)
/* wait until the reset is complete and the PF is responding to us */
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
- rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
- I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (rstat_val == I40E_VFR_VFACTIVE)
+ reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ if (reg_val == I40E_VFR_VFACTIVE)
break;
msleep(I40EVF_RESET_WAIT_MS);
}
/* extra wait to make sure minimum wait is met */
msleep(I40EVF_RESET_WAIT_MS);
if (i == I40EVF_RESET_WAIT_COUNT) {
- struct i40evf_mac_filter *f, *ftmp;
+ struct i40evf_mac_filter *ftmp;
struct i40evf_vlan_filter *fv, *fvtmp;
/* reset never finished */
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
- rstat_val);
+ reg_val);
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
if (netif_running(adapter->netdev)) {
@@ -1604,17 +1725,18 @@ continue_reset:
dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
err);
- i40evf_map_queues(adapter);
+ adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG;
+ adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
/* re-add all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->add = true;
}
/* re-add all VLAN filters */
- list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- f->add = true;
+ list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
+ vlf->add = true;
}
- adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
i40evf_misc_irq_enable(adapter);
@@ -1693,34 +1815,34 @@ static void i40evf_adminq_task(struct work_struct *work)
/* check for error indications */
val = rd32(hw, hw->aq.arq.len);
oldval = val;
- if (val & I40E_VF_ARQLEN_ARQVFE_MASK) {
+ if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
- val &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
+ val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
}
- if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) {
+ if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
- val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
+ val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
}
- if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) {
+ if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
- val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
+ val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
}
if (oldval != val)
wr32(hw, hw->aq.arq.len, val);
val = rd32(hw, hw->aq.asq.len);
oldval = val;
- if (val & I40E_VF_ATQLEN_ATQVFE_MASK) {
+ if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) {
dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
- val &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
+ val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
}
- if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) {
+ if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
- val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
+ val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
}
- if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) {
+ if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
- val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
+ val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
}
if (oldval != val)
wr32(hw, hw->aq.asq.len, val);
@@ -1767,8 +1889,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
if (!err)
continue;
dev_err(&adapter->pdev->dev,
- "%s: Allocation for Tx Queue %u failed\n",
- __func__, i);
+ "Allocation for Tx Queue %u failed\n", i);
break;
}
@@ -1795,8 +1916,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
if (!err)
continue;
dev_err(&adapter->pdev->dev,
- "%s: Allocation for Rx Queue %u failed\n",
- __func__, i);
+ "Allocation for Rx Queue %u failed\n", i);
break;
}
return err;
@@ -1856,6 +1976,7 @@ static int i40evf_open(struct net_device *netdev)
if (err)
goto err_req_irq;
+ i40evf_add_filter(adapter, adapter->hw.mac.addr);
i40evf_configure(adapter);
err = i40evf_up_complete(adapter);
@@ -1954,6 +2075,9 @@ static const struct net_device_ops i40evf_netdev_ops = {
.ndo_tx_timeout = i40evf_tx_timeout,
.ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = i40evf_netpoll,
+#endif
};
/**
@@ -1979,6 +2103,66 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
}
/**
+ * i40evf_process_config - Process the config information we got from the PF
+ * @adapter: board private structure
+ *
+ * Verify that we have a valid config struct, and set up our netdev features
+ * and our VSI struct.
+ **/
+int i40evf_process_config(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ /* got VF config message back from PF, now we can parse it */
+ for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+ if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+ }
+ if (!adapter->vsi_res) {
+ dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
+ return -ENODEV;
+ }
+
+ if (adapter->vf_res->vf_offload_flags
+ & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
+ netdev->vlan_features = netdev->features &
+ ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER);
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+ netdev->features |= NETIF_F_HIGHDMA |
+ NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_SCTP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM |
+ NETIF_F_GRO;
+
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features;
+ netdev->hw_features &= ~NETIF_F_RXCSUM;
+
+ adapter->vsi.id = adapter->vsi_res->vsi_id;
+
+ adapter->vsi.back = adapter;
+ adapter->vsi.base_vector = 1;
+ adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
+ adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+ adapter->vsi.netdev = adapter->netdev;
+ adapter->vsi.qs_handle = adapter->vsi_res->qset_handle;
+ return 0;
+}
+
+/**
* i40evf_init_task - worker thread to perform delayed initialization
* @work: pointer to work_struct containing our data
*
@@ -1996,10 +2180,9 @@ static void i40evf_init_task(struct work_struct *work)
struct i40evf_adapter,
init_task.work);
struct net_device *netdev = adapter->netdev;
- struct i40evf_mac_filter *f;
struct i40e_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- int i, err, bufsz;
+ int err, bufsz;
switch (adapter->state) {
case __I40EVF_STARTUP:
@@ -2050,6 +2233,12 @@ static void i40evf_init_task(struct work_struct *work)
if (err) {
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
err = i40evf_send_api_ver(adapter);
+ else
+ dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
+ adapter->pf_version.major,
+ adapter->pf_version.minor,
+ I40E_VIRTCHNL_VERSION_MAJOR,
+ I40E_VIRTCHNL_VERSION_MINOR);
goto err;
}
err = i40evf_send_vf_config_msg(adapter);
@@ -2085,60 +2274,26 @@ static void i40evf_init_task(struct work_struct *work)
default:
goto err_alloc;
}
- /* got VF config message back from PF, now we can parse it */
- for (i = 0; i < adapter->vf_res->num_vsis; i++) {
- if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
- adapter->vsi_res = &adapter->vf_res->vsi_res[i];
- }
- if (!adapter->vsi_res) {
- dev_err(&pdev->dev, "No LAN VSI found\n");
+ if (i40evf_process_config(adapter))
goto err_alloc;
- }
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
netdev->netdev_ops = &i40evf_netdev_ops;
i40evf_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- netdev->features |= NETIF_F_HIGHDMA |
- NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_SCTP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_RXCSUM |
- NETIF_F_GRO;
-
- if (adapter->vf_res->vf_offload_flags
- & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
- netdev->vlan_features = netdev->features;
- netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
- }
-
- /* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
- netdev->hw_features &= ~NETIF_F_RXCSUM;
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
adapter->hw.mac.addr);
- random_ether_addr(adapter->hw.mac.addr);
+ eth_hw_addr_random(netdev);
+ ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
+ } else {
+ adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF;
+ ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
- ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
-
- f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (!f)
- goto err_sw_init;
-
- ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
- f->add = true;
- adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
-
- list_add(&f->list, &adapter->mac_filter_list);
init_timer(&adapter->watchdog_timer);
adapter->watchdog_timer.function = &i40evf_watchdog_timer;
@@ -2154,24 +2309,17 @@ static void i40evf_init_task(struct work_struct *work)
if (err)
goto err_sw_init;
i40evf_map_rings_to_vectors(adapter);
- i40evf_configure_rss(adapter);
+ if (adapter->vf_res->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
+ if (!RSS_AQ(adapter))
+ i40evf_configure_rss(adapter);
err = i40evf_request_misc_irq(adapter);
if (err)
goto err_sw_init;
netif_carrier_off(netdev);
- adapter->vsi.id = adapter->vsi_res->vsi_id;
- adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
- adapter->vsi.back = adapter;
- adapter->vsi.base_vector = 1;
- adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
- adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
- adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
- adapter->vsi.netdev = adapter->netdev;
-
if (!adapter->netdev_registered) {
err = register_netdev(netdev);
if (err)
@@ -2190,10 +2338,16 @@ static void i40evf_init_task(struct work_struct *work)
adapter->state = __I40EVF_DOWN;
set_bit(__I40E_DOWN, &adapter->vsi.state);
i40evf_misc_irq_enable(adapter);
+
+ if (RSS_AQ(adapter)) {
+ adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ } else {
+ i40evf_configure_rss(adapter);
+ }
return;
restart:
- schedule_delayed_work(&adapter->init_task,
- msecs_to_jiffies(50));
+ schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
return;
err_register:
@@ -2206,11 +2360,14 @@ err_alloc:
err:
/* Things went into the weeds, so try again later */
if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
- dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n");
+ dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
- return; /* do not reschedule */
+ i40evf_shutdown_adminq(hw);
+ adapter->state = __I40EVF_STARTUP;
+ schedule_delayed_work(&adapter->init_task, HZ * 5);
+ return;
}
- schedule_delayed_work(&adapter->init_task, HZ * 3);
+ schedule_delayed_work(&adapter->init_task, HZ);
}
/**
@@ -2299,7 +2456,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw = &adapter->hw;
hw->back = adapter;
- adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+ adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
adapter->state = __I40EVF_STARTUP;
/* Call save state here because it relies on the adapter struct. */
@@ -2326,7 +2483,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
- schedule_delayed_work(&adapter->init_task, 10);
+ schedule_delayed_work(&adapter->init_task,
+ msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
return 0;
@@ -2402,6 +2560,7 @@ static int i40evf_resume(struct pci_dev *pdev)
rtnl_lock();
err = i40evf_set_interrupt_capability(adapter);
if (err) {
+ rtnl_unlock();
dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
return err;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 61e090558f31..32e620e1eb5c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -51,8 +51,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
if (err)
- dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
- op, err, hw->aq.asq_last_status);
+ dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
+ op, i40evf_stat_str(hw, err),
+ i40evf_aq_str(hw, hw->aq.asq_last_status));
return err;
}
@@ -125,8 +126,11 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
}
pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
- if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
- (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
+ adapter->pf_version = *pf_vvi;
+
+ if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
+ ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
+ (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR)))
err = -EIO;
out_alloc:
@@ -145,8 +149,25 @@ out:
**/
int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
{
- return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
- NULL, 0);
+ u32 caps;
+
+ adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+ caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
+ I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+ adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+ if (PF_IS_V11(adapter))
+ return i40evf_send_pf_msg(adapter,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ (u8 *)&caps, sizeof(caps));
+ else
+ return i40evf_send_pf_msg(adapter,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ NULL, 0);
}
/**
@@ -214,8 +235,8 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
+ adapter->current_op);
return;
}
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
@@ -268,13 +289,13 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
+ adapter->current_op);
return;
}
adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
- vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
+ vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
@@ -293,13 +314,13 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
+ adapter->current_op);
return;
}
adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
- vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
+ vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
@@ -321,8 +342,8 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
+ adapter->current_op);
return;
}
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
@@ -373,8 +394,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
+ adapter->current_op);
return;
}
list_for_each_entry(f, &adapter->mac_filter_list, list) {
@@ -390,8 +411,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
- __func__);
+ dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr);
@@ -433,8 +453,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
+ adapter->current_op);
return;
}
list_for_each_entry(f, &adapter->mac_filter_list, list) {
@@ -450,8 +470,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
- __func__);
+ dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr);
@@ -493,8 +512,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
+ adapter->current_op);
return;
}
@@ -511,8 +530,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
- __func__);
+ dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
sizeof(u16);
@@ -552,8 +570,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
+ adapter->current_op);
return;
}
@@ -570,8 +588,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
- __func__);
+ dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
sizeof(u16);
@@ -609,8 +626,8 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
- __func__, adapter->current_op);
+ dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
+ adapter->current_op);
return;
}
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
@@ -700,16 +717,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
break;
default:
- dev_err(&adapter->pdev->dev,
- "%s: Unknown event %d from pf\n",
- __func__, vpe->event);
+ dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
+ vpe->event);
break;
}
return;
}
if (v_retval) {
- dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
- __func__, v_retval, v_opcode);
+ dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
+ v_retval, i40evf_stat_str(&adapter->hw, v_retval),
+ v_opcode);
}
switch (v_opcode) {
case I40E_VIRTCHNL_OP_GET_STATS: {
@@ -729,6 +746,17 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
adapter->current_stats = *stats;
}
break;
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: {
+ u16 len = sizeof(struct i40e_virtchnl_vf_resource) +
+ I40E_MAX_VF_VSI *
+ sizeof(struct i40e_virtchnl_vsi_resource);
+ memcpy(adapter->vf_res, msg, min(msglen, len));
+ i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
+ /* restore current mac address */
+ ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
+ i40evf_process_config(adapter);
+ }
+ break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
i40evf_irq_enable(adapter, true);
@@ -740,7 +768,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
i40evf_free_all_rx_resources(adapter);
break;
case I40E_VIRTCHNL_OP_VERSION:
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
/* Don't display an error if we get these out of sequence.
* If the firmware needed to get kicked, we'll get these and
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index b0182dd31346..7a73510e547c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -139,10 +139,6 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* reset page to 0 */
- ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
- if (ret_val)
- return ret_val;
if (data & E1000_M88E1112_STATUS_LINK)
port = E1000_MEDIA_PORT_OTHER;
@@ -151,8 +147,20 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
if (port && (hw->dev_spec._82575.media_port != port)) {
hw->dev_spec._82575.media_port = port;
hw->dev_spec._82575.media_changed = true;
+ }
+
+ if (port == E1000_MEDIA_PORT_COPPER) {
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+ igb_check_for_link_82575(hw);
} else {
- ret_val = igb_check_for_link_82575(hw);
+ igb_check_for_link_82575(hw);
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
}
return 0;
@@ -223,6 +231,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
/* Verify phy id and set remaining function pointers */
switch (phy->id) {
case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
case M88E1111_I_PHY_ID:
@@ -235,7 +244,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
else
phy->ops.get_cable_length = igb_get_cable_length_m88;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
- /* Check if this PHY is confgured for media swap. */
+ /* Check if this PHY is configured for media swap. */
if (phy->id == M88E1112_E_PHY_ID) {
u16 data;
@@ -258,6 +267,11 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
hw->mac.ops.check_for_link =
igb_check_for_link_media_swap;
}
+ if (phy->id == M88E1512_E_PHY_ID) {
+ ret_val = igb_initialize_M88E1512_phy(hw);
+ if (ret_val)
+ goto out;
+ }
break;
case IGP03E1000_E_PHY_ID:
phy->type = e1000_phy_igp_3;
@@ -889,6 +903,7 @@ out:
**/
static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
{
+ struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
/* This isn't a true "hard" reset, but is the only reset
@@ -905,7 +920,11 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
goto out;
ret_val = igb_phy_sw_reset(hw);
+ if (ret_val)
+ goto out;
+ if (phy->id == M88E1512_E_PHY_ID)
+ ret_val = igb_initialize_M88E1512_phy(hw);
out:
return ret_val;
}
@@ -1579,6 +1598,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
case I210_I_PHY_ID:
ret_val = igb_copper_link_setup_m88_gen2(hw);
break;
@@ -2621,7 +2641,8 @@ s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
u16 phy_data;
if ((hw->phy.media_type != e1000_media_type_copper) ||
- (phy->id != M88E1543_E_PHY_ID))
+ ((phy->id != M88E1543_E_PHY_ID) &&
+ (phy->id != M88E1512_E_PHY_ID)))
goto out;
if (!hw->dev_spec._82575.eee_disable) {
@@ -2701,7 +2722,8 @@ s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
/* Check if EEE is supported on this device. */
if ((hw->phy.media_type != e1000_media_type_copper) ||
- (phy->id != M88E1543_E_PHY_ID))
+ ((phy->id != M88E1543_E_PHY_ID) &&
+ (phy->id != M88E1512_E_PHY_ID)))
goto out;
ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index f8684aa285be..b1915043bc0c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -604,6 +604,10 @@
#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7
#define E1000_M88E1112_PAGE_ADDR 0x16
#define E1000_M88E1112_STATUS 0x01
+#define E1000_M88E1512_CFG_REG_1 0x0010
+#define E1000_M88E1512_CFG_REG_2 0x0011
+#define E1000_M88E1512_CFG_REG_3 0x0007
+#define E1000_M88E1512_MODE 0x0014
/* PCI Express Control */
#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
@@ -861,6 +865,7 @@
#define M88_VENDOR 0x0141
#define I210_I_PHY_ID 0x01410C00
#define M88E1543_E_PHY_ID 0x01410EA0
+#define M88E1512_E_PHY_ID 0x01410DD0
/* M88E1000 Specific Registers */
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index c1bb64d8366f..23ec28f43f6d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,5 +1,5 @@
/* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
+ * Copyright(c) 2007-2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -36,9 +36,6 @@ static s32 igb_set_master_slave_mode(struct e1000_hw *hw);
/* Cable length tables */
static const u16 e1000_m88_cable_length_table[] = {
0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
-#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
- (sizeof(e1000_m88_cable_length_table) / \
- sizeof(e1000_m88_cable_length_table[0]))
static const u16 e1000_igp_2_cable_length_table[] = {
0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
@@ -49,9 +46,6 @@ static const u16 e1000_igp_2_cable_length_table[] = {
60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
104, 109, 114, 118, 121, 124};
-#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
- (sizeof(e1000_igp_2_cable_length_table) / \
- sizeof(e1000_igp_2_cable_length_table[0]))
/**
* igb_check_reset_block - Check if PHY reset is blocked
@@ -1268,6 +1262,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
switch (hw->phy.id) {
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
case I210_I_PHY_ID:
reset_dsp = false;
break;
@@ -1276,9 +1272,9 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
reset_dsp = false;
break;
}
- if (!reset_dsp)
+ if (!reset_dsp) {
hw_dbg("Link taking longer than expected.\n");
- else {
+ } else {
/* We didn't get link.
* Reset the DSP and cross our fingers.
*/
@@ -1303,6 +1299,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
if (hw->phy.type != e1000_phy_m88 ||
hw->phy.id == I347AT4_E_PHY_ID ||
hw->phy.id == M88E1112_E_PHY_ID ||
+ hw->phy.id == M88E1543_E_PHY_ID ||
+ hw->phy.id == M88E1512_E_PHY_ID ||
hw->phy.id == I210_I_PHY_ID)
goto out;
@@ -1700,7 +1698,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw)
index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
M88E1000_PSSR_CABLE_LENGTH_SHIFT;
- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+ if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
ret_val = -E1000_ERR_PHY;
goto out;
}
@@ -1743,6 +1741,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
phy->cable_length = phy_data / (is_cm ? 100 : 1);
break;
case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
case I347AT4_E_PHY_ID:
/* Remember the original page select and set it to 7 */
ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
@@ -1796,7 +1795,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
M88E1000_PSSR_CABLE_LENGTH_SHIFT;
- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+ if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
ret_val = -E1000_ERR_PHY;
goto out;
}
@@ -1840,7 +1839,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
s32 ret_val = 0;
u16 phy_data, i, agc_value = 0;
u16 cur_agc_index, max_agc_index = 0;
- u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+ u16 min_agc_index = ARRAY_SIZE(e1000_igp_2_cable_length_table) - 1;
static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
IGP02E1000_PHY_AGC_A,
IGP02E1000_PHY_AGC_B,
@@ -1863,7 +1862,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
IGP02E1000_AGC_LENGTH_MASK;
/* Array index bound check. */
- if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+ if ((cur_agc_index >= ARRAY_SIZE(e1000_igp_2_cable_length_table)) ||
(cur_agc_index == 0)) {
ret_val = -E1000_ERR_PHY;
goto out;
@@ -2195,6 +2194,90 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
}
/**
+ * igb_initialize_M88E1512_phy - Initialize M88E1512 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Initialize Marvel 1512 to work correctly with Avoton.
+ **/
+s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+
+ /* Switch to PHY page 0xFF. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0xFB. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0x12. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
+ if (ret_val)
+ goto out;
+
+ /* Change mode to SGMII-to-Copper */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
+ if (ret_val)
+ goto out;
+
+ /* Return the PHY to page 0. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+ if (ret_val)
+ goto out;
+
+ ret_val = igb_phy_sw_reset(hw);
+ if (ret_val) {
+ hw_dbg("Error committing the PHY changes\n");
+ return ret_val;
+ }
+
+ /* msec_delay(1000); */
+ usleep_range(1000, 2000);
+out:
+ return ret_val;
+}
+
+/**
* igb_power_up_phy_copper - Restore copper link in case of PHY power down
* @hw: pointer to the HW structure
*
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 7af4ffab0285..24d55edbb0e3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -61,6 +61,7 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
void igb_power_up_phy_copper(struct e1000_hw *hw);
void igb_power_down_phy_copper(struct e1000_hw *hw);
s32 igb_phy_init_script_igp3(struct e1000_hw *hw);
+s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw);
s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 6f0490d0e981..4af2870e49f8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -104,6 +104,8 @@
#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */
#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */
#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */
+#define E1000_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */
+#define E1000_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */
#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */
#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index c2bd4f98a837..1a2f1cc44b28 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -444,8 +444,8 @@ struct igb_adapter {
struct ptp_pin_desc sdp_config[IGB_N_SDP];
struct {
- struct timespec start;
- struct timespec period;
+ struct timespec64 start;
+ struct timespec64 period;
} perout[IGB_N_PEROUT];
char fw_version[32];
@@ -540,6 +540,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
struct sk_buff *skb);
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
#ifdef CONFIG_IGB_HWMON
void igb_sysfs_exit(struct igb_adapter *adapter);
int igb_sysfs_init(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index d5673eb90c54..2529bc625de4 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -842,10 +842,6 @@ static void igb_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = IGB_STATS_LEN;
- drvinfo->testinfo_len = IGB_TEST_LEN;
- drvinfo->regdump_len = igb_get_regs_len(netdev);
- drvinfo->eedump_len = igb_get_eeprom_len(netdev);
}
static void igb_get_ringparam(struct net_device *netdev,
@@ -2159,6 +2155,27 @@ static int igb_set_coalesce(struct net_device *netdev,
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
+ if (ec->rx_max_coalesced_frames ||
+ ec->rx_coalesce_usecs_irq ||
+ ec->rx_max_coalesced_frames_irq ||
+ ec->tx_max_coalesced_frames ||
+ ec->tx_coalesce_usecs_irq ||
+ ec->stats_block_coalesce_usecs ||
+ ec->use_adaptive_rx_coalesce ||
+ ec->use_adaptive_tx_coalesce ||
+ ec->pkt_rate_low ||
+ ec->rx_coalesce_usecs_low ||
+ ec->rx_max_coalesced_frames_low ||
+ ec->tx_coalesce_usecs_low ||
+ ec->tx_max_coalesced_frames_low ||
+ ec->pkt_rate_high ||
+ ec->rx_coalesce_usecs_high ||
+ ec->rx_max_coalesced_frames_high ||
+ ec->tx_coalesce_usecs_high ||
+ ec->tx_max_coalesced_frames_high ||
+ ec->rate_sample_interval)
+ return -ENOTSUPP;
+
if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 3) &&
(ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
@@ -2396,10 +2413,6 @@ static int igb_get_ts_info(struct net_device *dev,
info->rx_filters |=
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
@@ -2991,6 +3004,7 @@ static int igb_set_channels(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
unsigned int count = ch->combined_count;
+ unsigned int max_combined = 0;
/* Verify they are not requesting separate vectors */
if (!count || ch->rx_count || ch->tx_count)
@@ -3001,11 +3015,13 @@ static int igb_set_channels(struct net_device *netdev,
return -EINVAL;
/* Verify the number of channels doesn't exceed hw limits */
- if (count > igb_max_channels(adapter))
+ max_combined = igb_max_channels(adapter);
+ if (count > max_combined)
return -EINVAL;
if (count != adapter->rss_queues) {
adapter->rss_queues = count;
+ igb_set_flag_queue_pairs(adapter, max_combined);
/* Hardware has to reinitialize queues and interrupts to
* match the new configuration.
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 2f70a9b152bd..ea7b09887245 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -57,8 +57,8 @@
#include "igb.h"
#define MAJ 5
-#define MIN 2
-#define BUILD 18
+#define MIN 3
+#define BUILD 0
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
@@ -151,7 +151,7 @@ static void igb_setup_dca(struct igb_adapter *);
#endif /* CONFIG_IGB_DCA */
static int igb_poll(struct napi_struct *, int);
static bool igb_clean_tx_irq(struct igb_q_vector *);
-static bool igb_clean_rx_irq(struct igb_q_vector *, int);
+static int igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
@@ -179,6 +179,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *);
#ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf);
static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
+static int igb_disable_sriov(struct pci_dev *dev);
+static int igb_pci_disable_sriov(struct pci_dev *dev);
#endif
#ifdef CONFIG_PM
@@ -1205,10 +1207,14 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
/* allocate q_vector and rings */
q_vector = adapter->q_vector[v_idx];
- if (!q_vector)
+ if (!q_vector) {
q_vector = kzalloc(size, GFP_KERNEL);
- else
+ } else if (size > ksize(q_vector)) {
+ kfree_rcu(q_vector, rcu);
+ q_vector = kzalloc(size, GFP_KERNEL);
+ } else {
memset(q_vector, 0, size);
+ }
if (!q_vector)
return -ENOMEM;
@@ -2645,7 +2651,11 @@ err_eeprom:
if (hw->flash_address)
iounmap(hw->flash_address);
err_sw_init:
+ kfree(adapter->shadow_vfta);
igb_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_PCI_IOV
+ igb_disable_sriov(pdev);
+#endif
pci_iounmap(pdev, hw->hw_addr);
err_ioremap:
free_netdev(netdev);
@@ -2805,14 +2815,14 @@ static void igb_remove(struct pci_dev *pdev)
*/
igb_release_hw_control(adapter);
- unregister_netdev(netdev);
-
- igb_clear_interrupt_scheme(adapter);
-
#ifdef CONFIG_PCI_IOV
igb_disable_sriov(pdev);
#endif
+ unregister_netdev(netdev);
+
+ igb_clear_interrupt_scheme(adapter);
+
pci_iounmap(pdev, hw->hw_addr);
if (hw->flash_address)
iounmap(hw->flash_address);
@@ -2847,7 +2857,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
return;
pci_sriov_set_totalvfs(pdev, 7);
- igb_pci_enable_sriov(pdev, max_vfs);
+ igb_enable_sriov(pdev, max_vfs);
#endif /* CONFIG_PCI_IOV */
}
@@ -2888,6 +2898,14 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
+ igb_set_flag_queue_pairs(adapter, max_rss_queues);
+}
+
+void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
+ const u32 max_rss_queues)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
/* Determine if we need to pair queues. */
switch (hw->mac.type) {
case e1000_82575:
@@ -2968,6 +2986,11 @@ static int igb_sw_init(struct igb_adapter *adapter)
}
#endif /* CONFIG_PCI_IOV */
+ /* Assume MSI-X interrupts, will be checked during IRQ allocation */
+ adapter->flags |= IGB_FLAG_HAS_MSIX;
+
+ igb_probe_vfs(adapter);
+
igb_init_queue_configuration(adapter);
/* Setup and initialize a copy of the hw vlan table array */
@@ -2980,8 +3003,6 @@ static int igb_sw_init(struct igb_adapter *adapter)
return -ENOMEM;
}
- igb_probe_vfs(adapter);
-
/* Explicitly disable IRQ since the NIC can be in any state. */
igb_irq_disable(adapter);
@@ -5371,7 +5392,7 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct ptp_clock_event event;
- struct timespec ts;
+ struct timespec64 ts;
u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
if (tsicr & TSINTR_SYS_WRAP) {
@@ -5391,10 +5412,11 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter)
if (tsicr & TSINTR_TT0) {
spin_lock(&adapter->tmreg_lock);
- ts = timespec_add(adapter->perout[0].start,
- adapter->perout[0].period);
+ ts = timespec64_add(adapter->perout[0].start,
+ adapter->perout[0].period);
+ /* u32 conversion of tv_sec is safe until y2106 */
wr32(E1000_TRGTTIML0, ts.tv_nsec);
- wr32(E1000_TRGTTIMH0, ts.tv_sec);
+ wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
tsauxc = rd32(E1000_TSAUXC);
tsauxc |= TSAUXC_EN_TT0;
wr32(E1000_TSAUXC, tsauxc);
@@ -5405,10 +5427,10 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter)
if (tsicr & TSINTR_TT1) {
spin_lock(&adapter->tmreg_lock);
- ts = timespec_add(adapter->perout[1].start,
- adapter->perout[1].period);
+ ts = timespec64_add(adapter->perout[1].start,
+ adapter->perout[1].period);
wr32(E1000_TRGTTIML1, ts.tv_nsec);
- wr32(E1000_TRGTTIMH1, ts.tv_sec);
+ wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
tsauxc = rd32(E1000_TSAUXC);
tsauxc |= TSAUXC_EN_TT1;
wr32(E1000_TSAUXC, tsauxc);
@@ -6342,6 +6364,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
struct igb_q_vector,
napi);
bool clean_complete = true;
+ int work_done = 0;
#ifdef CONFIG_IGB_DCA
if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
@@ -6350,15 +6373,19 @@ static int igb_poll(struct napi_struct *napi, int budget)
if (q_vector->tx.ring)
clean_complete = igb_clean_tx_irq(q_vector);
- if (q_vector->rx.ring)
- clean_complete &= igb_clean_rx_irq(q_vector, budget);
+ if (q_vector->rx.ring) {
+ int cleaned = igb_clean_rx_irq(q_vector, budget);
+
+ work_done += cleaned;
+ clean_complete &= (cleaned < budget);
+ }
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
/* If not enough Rx work done, exit the polling mode */
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
igb_ring_irq_enable(q_vector);
return 0;
@@ -6566,7 +6593,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
static inline bool igb_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
@@ -6621,22 +6648,25 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = IGB_RX_BUFSZ;
#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int truesize = SKB_DATA_ALIGN(size);
#endif
+ unsigned int pull_len;
- if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
+ if (unlikely(skb_is_nonlinear(skb)))
+ goto add_tail_frag;
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
- va += IGB_TS_HDR_LEN;
- size -= IGB_TS_HDR_LEN;
- }
+ if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+ va += IGB_TS_HDR_LEN;
+ size -= IGB_TS_HDR_LEN;
+ }
+ if (likely(size <= IGB_RX_HDR_LEN)) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is not reserved, we can reuse buffer as-is */
@@ -6648,8 +6678,21 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
return false;
}
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ va += pull_len;
+ size -= pull_len;
+
+add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, truesize);
+ (unsigned long)va & ~PAGE_MASK, size, truesize);
return igb_can_reuse_rx_page(rx_buffer, page, truesize);
}
@@ -6791,62 +6834,6 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring,
}
/**
- * igb_pull_tail - igb specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being adjusted
- *
- * This function is an igb specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- */
-static void igb_pull_tail(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned char *va;
- unsigned int pull_len;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- /* retrieve timestamp from buffer */
- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
-
- /* update pointers to remove timestamp header */
- skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
- frag->page_offset += IGB_TS_HDR_LEN;
- skb->data_len -= IGB_TS_HDR_LEN;
- skb->len -= IGB_TS_HDR_LEN;
-
- /* move va to start of packet data */
- va += IGB_TS_HDR_LEN;
- }
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-}
-
-/**
* igb_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
@@ -6873,10 +6860,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
}
}
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- igb_pull_tail(rx_ring, rx_desc, skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
@@ -6926,7 +6909,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
-static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
struct igb_ring *rx_ring = q_vector->rx.ring;
struct sk_buff *skb = rx_ring->skb;
@@ -7000,7 +6983,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count);
- return total_packets < budget;
+ return total_packets;
}
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
@@ -7445,6 +7428,7 @@ static int igb_resume(struct device *dev)
if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ rtnl_unlock();
return -ENOMEM;
}
@@ -7538,6 +7522,7 @@ static int igb_sriov_reinit(struct pci_dev *dev)
igb_init_queue_configuration(adapter);
if (igb_init_interrupt_scheme(adapter, true)) {
+ rtnl_unlock();
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index c3a9392cbc19..c44df87c38de 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -143,7 +143,7 @@ static void igb_ptp_write_i210(struct igb_adapter *adapter,
* sub-nanosecond resolution.
*/
wr32(E1000_SYSTIML, ts->tv_nsec);
- wr32(E1000_SYSTIMH, ts->tv_sec);
+ wr32(E1000_SYSTIMH, (u32)ts->tv_sec);
}
/**
@@ -405,7 +405,7 @@ static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin)
wr32(E1000_CTRL_EXT, ctrl_ext);
}
-static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
+static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq)
{
static const u32 aux0_sel_sdp[IGB_N_SDP] = {
AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
@@ -424,6 +424,14 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1,
TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1,
};
+ static const u32 ts_sdp_sel_fc0[IGB_N_SDP] = {
+ TS_SDP0_SEL_FC0, TS_SDP1_SEL_FC0,
+ TS_SDP2_SEL_FC0, TS_SDP3_SEL_FC0,
+ };
+ static const u32 ts_sdp_sel_fc1[IGB_N_SDP] = {
+ TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
+ TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
+ };
static const u32 ts_sdp_sel_clr[IGB_N_SDP] = {
TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
@@ -445,11 +453,17 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
tssdp &= ~AUX1_TS_SDP_EN;
tssdp &= ~ts_sdp_sel_clr[pin];
- if (chan == 1)
- tssdp |= ts_sdp_sel_tt1[pin];
- else
- tssdp |= ts_sdp_sel_tt0[pin];
-
+ if (freq) {
+ if (chan == 1)
+ tssdp |= ts_sdp_sel_fc1[pin];
+ else
+ tssdp |= ts_sdp_sel_fc0[pin];
+ } else {
+ if (chan == 1)
+ tssdp |= ts_sdp_sel_tt1[pin];
+ else
+ tssdp |= ts_sdp_sel_tt0[pin];
+ }
tssdp |= ts_sdp_en[pin];
wr32(E1000_TSSDP, tssdp);
@@ -463,10 +477,10 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
struct igb_adapter *igb =
container_of(ptp, struct igb_adapter, ptp_caps);
struct e1000_hw *hw = &igb->hw;
- u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh;
+ u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout;
unsigned long flags;
- struct timespec ts;
- int pin = -1;
+ struct timespec64 ts;
+ int use_freq = 0, pin = -1;
s64 ns;
switch (rq->type) {
@@ -509,42 +523,60 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
}
ts.tv_sec = rq->perout.period.sec;
ts.tv_nsec = rq->perout.period.nsec;
- ns = timespec_to_ns(&ts);
+ ns = timespec64_to_ns(&ts);
ns = ns >> 1;
- if (on && ns < 500000LL) {
- /* 2k interrupts per second is an awful lot. */
- return -EINVAL;
+ if (on && ns <= 70000000LL) {
+ if (ns < 8LL)
+ return -EINVAL;
+ use_freq = 1;
}
- ts = ns_to_timespec(ns);
+ ts = ns_to_timespec64(ns);
if (rq->perout.index == 1) {
- tsauxc_mask = TSAUXC_EN_TT1;
- tsim_mask = TSINTR_TT1;
+ if (use_freq) {
+ tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1;
+ tsim_mask = 0;
+ } else {
+ tsauxc_mask = TSAUXC_EN_TT1;
+ tsim_mask = TSINTR_TT1;
+ }
trgttiml = E1000_TRGTTIML1;
trgttimh = E1000_TRGTTIMH1;
+ freqout = E1000_FREQOUT1;
} else {
- tsauxc_mask = TSAUXC_EN_TT0;
- tsim_mask = TSINTR_TT0;
+ if (use_freq) {
+ tsauxc_mask = TSAUXC_EN_CLK0 | TSAUXC_ST0;
+ tsim_mask = 0;
+ } else {
+ tsauxc_mask = TSAUXC_EN_TT0;
+ tsim_mask = TSINTR_TT0;
+ }
trgttiml = E1000_TRGTTIML0;
trgttimh = E1000_TRGTTIMH0;
+ freqout = E1000_FREQOUT0;
}
spin_lock_irqsave(&igb->tmreg_lock, flags);
tsauxc = rd32(E1000_TSAUXC);
tsim = rd32(E1000_TSIM);
+ if (rq->perout.index == 1) {
+ tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1);
+ tsim &= ~TSINTR_TT1;
+ } else {
+ tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0);
+ tsim &= ~TSINTR_TT0;
+ }
if (on) {
int i = rq->perout.index;
-
- igb_pin_perout(igb, i, pin);
+ igb_pin_perout(igb, i, pin, use_freq);
igb->perout[i].start.tv_sec = rq->perout.start.sec;
igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
igb->perout[i].period.tv_sec = ts.tv_sec;
igb->perout[i].period.tv_nsec = ts.tv_nsec;
wr32(trgttimh, rq->perout.start.sec);
wr32(trgttiml, rq->perout.start.nsec);
+ if (use_freq)
+ wr32(freqout, ns);
tsauxc |= tsauxc_mask;
tsim |= tsim_mask;
- } else {
- tsauxc &= ~tsauxc_mask;
- tsim &= ~tsim_mask;
}
wr32(E1000_TSAUXC, tsauxc);
wr32(E1000_TSIM, tsim);
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index c6996feb1cb4..b74ce53d7b52 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -196,8 +196,6 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->regdump_len = igbvf_get_regs_len(netdev);
- drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
}
static void igbvf_get_ringparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 95af14e139d7..297af801f051 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -319,6 +319,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_hdr_size,
DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
skb_put(skb, hlen);
}
@@ -1210,7 +1211,7 @@ static int igbvf_poll(struct napi_struct *napi, int budget)
/* If not enough Rx work done, exit the polling mode */
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (adapter->requested_itr & 3)
igbvf_set_itr(adapter);
@@ -2614,6 +2615,7 @@ static const struct net_device_ops igbvf_netdev_ops = {
.ndo_poll_controller = igbvf_netpoll,
#endif
.ndo_set_features = igbvf_set_features,
+ .ndo_features_check = passthru_features_check,
};
/**
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index b311e9e710d2..d2b29b490ae0 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -479,9 +479,6 @@ ixgb_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = IXGB_STATS_LEN;
- drvinfo->regdump_len = ixgb_get_regs_len(netdev);
- drvinfo->eedump_len = ixgb_get_eeprom_len(netdev);
}
static void
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index ac3ac2a20386..1d2174526a4c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -152,9 +152,17 @@ struct vf_data_storage {
u16 vlan_count;
u8 spoofchk_enabled;
bool rss_query_enabled;
+ u8 trusted;
+ int xcast_mode;
unsigned int vf_api;
};
+enum ixgbevf_xcast_modes {
+ IXGBEVF_XCAST_MODE_NONE = 0,
+ IXGBEVF_XCAST_MODE_MULTI,
+ IXGBEVF_XCAST_MODE_ALLMULTI,
+};
+
struct vf_macvlans {
struct list_head l;
int vf;
@@ -539,8 +547,7 @@ struct hwmon_buff {
#define IXGBE_MIN_RSC_ITR 24
#define IXGBE_100K_ITR 40
#define IXGBE_20K_ITR 200
-#define IXGBE_10K_ITR 400
-#define IXGBE_8K_ITR 500
+#define IXGBE_12K_ITR 336
/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
@@ -595,6 +602,7 @@ struct ixgbe_mac_addr {
/* default to trying for four seconds */
#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
+#define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */
/* board specific private data structure */
struct ixgbe_adapter {
@@ -630,6 +638,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21)
#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22)
#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23)
+#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24)
u32 flags2;
#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0)
@@ -644,6 +653,9 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11)
+#ifdef CONFIG_IXGBE_VXLAN
+#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12)
+#endif
/* Tx fast path data */
int num_tx_queues;
@@ -704,6 +716,7 @@ struct ixgbe_adapter {
u32 link_speed;
bool link_up;
+ unsigned long sfp_poll_time;
unsigned long link_check_timeout;
struct timer_list service_timer;
@@ -757,7 +770,9 @@ struct ixgbe_adapter {
u32 timer_event_accumulator;
u32 vferr_refcount;
struct ixgbe_mac_addr *mac_table;
+#ifdef CONFIG_IXGBE_VXLAN
u16 vxlan_port;
+#endif
struct kobject *info_kobj;
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
@@ -967,4 +982,5 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring);
u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
+void ixgbe_store_reta(struct ixgbe_adapter *adapter);
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 6b87d9634614..a39afcf03e2c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -44,9 +44,8 @@
static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
-static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+static void
+ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed);
static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
@@ -109,6 +108,9 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
if (hw->phy.multispeed_fiber) {
/* Set up dual speed SFP+ support */
mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+ mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
+ mac->ops.set_rate_select_speed =
+ ixgbe_set_hard_rate_select_speed;
} else {
if ((mac->ops.get_media_type(hw) ==
ixgbe_media_type_backplane) &&
@@ -504,16 +506,12 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
**/
static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
{
- u32 autoc2_reg, fwsm;
+ u32 autoc2_reg;
u16 ee_ctrl_2 = 0;
hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
- /* Check to see if MNG FW could be enabled */
- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
-
- if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
- !hw->wol_enabled &&
+ if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
@@ -650,176 +648,32 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
}
/**
- * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
- * @hw: pointer to hardware structure
- * @speed: new link speed
- * @autoneg_wait_to_complete: true when waiting for completion is needed
+ * ixgbe_set_hard_rate_select_speed - Set module link speed
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
*
- * Set the link speed in the AUTOC register and restarts link.
- **/
-static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ * Set module link speed via RS0/RS1 rate select pins.
+ */
+static void
+ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
{
- s32 status = 0;
- ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- u32 speedcnt = 0;
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
- u32 i = 0;
- bool link_up = false;
- bool autoneg = false;
-
- /* Mask off requested but non-supported speeds */
- status = hw->mac.ops.get_link_capabilities(hw, &link_speed,
- &autoneg);
- if (status != 0)
- return status;
-
- speed &= link_speed;
-
- /*
- * Try each speed one by one, highest priority first. We do this in
- * software because 10gb fiber doesn't support speed autonegotiation.
- */
- if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
- speedcnt++;
- highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
-
- /* If we already have link at this speed, just jump out */
- status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
- false);
- if (status != 0)
- return status;
-
- if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
- goto out;
-
- /* Set the module link speed */
- switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber:
- esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
- break;
- case ixgbe_media_type_fiber_qsfp:
- /* QSFP module automatically detects MAC link speed */
- break;
- default:
- hw_dbg(hw, "Unexpected media type.\n");
- break;
- }
- /* Allow module to change analog characteristics (1G->10G) */
- msleep(40);
-
- status = ixgbe_setup_mac_link_82599(hw,
- IXGBE_LINK_SPEED_10GB_FULL,
- autoneg_wait_to_complete);
- if (status != 0)
- return status;
-
- /* Flap the tx laser if it has not already been done */
- if (hw->mac.ops.flap_tx_laser)
- hw->mac.ops.flap_tx_laser(hw);
-
- /*
- * Wait for the controller to acquire link. Per IEEE 802.3ap,
- * Section 73.10.2, we may have to wait up to 500ms if KR is
- * attempted. 82599 uses the same timing for 10g SFI.
- */
- for (i = 0; i < 5; i++) {
- /* Wait for the link partner to also set speed */
- msleep(100);
-
- /* If we have link, just jump out */
- status = hw->mac.ops.check_link(hw, &link_speed,
- &link_up, false);
- if (status != 0)
- return status;
-
- if (link_up)
- goto out;
- }
- }
-
- if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
- speedcnt++;
- if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
- highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
-
- /* If we already have link at this speed, just jump out */
- status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
- false);
- if (status != 0)
- return status;
-
- if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
- goto out;
-
- /* Set the module link speed */
- switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber:
- esdp_reg &= ~IXGBE_ESDP_SDP5;
- esdp_reg |= IXGBE_ESDP_SDP5_DIR;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
- break;
- case ixgbe_media_type_fiber_qsfp:
- /* QSFP module automatically detects MAC link speed */
- break;
- default:
- hw_dbg(hw, "Unexpected media type.\n");
- break;
- }
-
- /* Allow module to change analog characteristics (10G->1G) */
- msleep(40);
-
- status = ixgbe_setup_mac_link_82599(hw,
- IXGBE_LINK_SPEED_1GB_FULL,
- autoneg_wait_to_complete);
- if (status != 0)
- return status;
-
- /* Flap the tx laser if it has not already been done */
- if (hw->mac.ops.flap_tx_laser)
- hw->mac.ops.flap_tx_laser(hw);
-
- /* Wait for the link partner to also set speed */
- msleep(100);
-
- /* If we have link, just jump out */
- status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
- false);
- if (status != 0)
- return status;
-
- if (link_up)
- goto out;
+ switch (speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ esdp_reg &= ~IXGBE_ESDP_SDP5;
+ esdp_reg |= IXGBE_ESDP_SDP5_DIR;
+ break;
+ default:
+ hw_dbg(hw, "Invalid fixed module speed\n");
+ return;
}
- /*
- * We didn't get link. Configure back to the highest speed we tried,
- * (if there was more than one). We call ourselves back with just the
- * single highest speed that the user requested.
- */
- if (speedcnt > 1)
- status = ixgbe_setup_mac_link_multispeed_fiber(hw,
- highest_link_speed,
- autoneg_wait_to_complete);
-
-out:
- /* Set autoneg_advertised value based on input link speed */
- hw->phy.autoneg_advertised = 0;
-
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
-
- if (speed & IXGBE_LINK_SPEED_1GB_FULL)
- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
-
- return status;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
}
/**
@@ -1246,6 +1100,25 @@ mac_reset_top:
}
/**
+ * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
+ * @hw: pointer to hardware structure
+ * @fdircmd: current value of FDIRCMD register
+ */
+static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
+ *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+ if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+ return 0;
+ udelay(10);
+ }
+
+ return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
+}
+
+/**
* ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
* @hw: pointer to hardware structure
**/
@@ -1253,6 +1126,8 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
{
int i;
u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+ u32 fdircmd;
+ s32 err;
fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
@@ -1260,15 +1135,10 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
* Before starting reinitialization process,
* FDIRCMD.CMD must be zero.
*/
- for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
- if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
- IXGBE_FDIRCMD_CMD_MASK))
- break;
- udelay(10);
- }
- if (i >= IXGBE_FDIRCMD_CMD_POLL) {
- hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n");
- return IXGBE_ERR_FDIR_REINIT_FAILED;
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ hw_dbg(hw, "Flow Director previous command did not complete, aborting table re-initialization.\n");
+ return err;
}
IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
@@ -1394,14 +1264,12 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
/*
* Continue setup of fdirctrl register bits:
* Turn perfect match filtering on
- * Report hash in RSS field of Rx wb descriptor
* Initialize the drop queue
* Move the flexible bytes to use the ethertype - shift 6 words
* Set the maximum length per hash bucket to 0xA filters
* Send interrupt when 64 (0x4 * 16) filters are left
*/
fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
- IXGBE_FDIRCTRL_REPORT_STATUS |
(IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
(0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
(0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
@@ -1509,20 +1377,28 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
* @input: unique input dword
* @common: compressed common input dword
* @queue: queue index to direct traffic to
+ *
+ * Note that the tunnel bit in input must not be set when the hardware
+ * tunneling support does not exist.
**/
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue)
{
- u64 fdirhashcmd;
- u32 fdircmd;
+ u64 fdirhashcmd;
+ u8 flow_type;
+ bool tunnel;
+ u32 fdircmd;
/*
* Get the flow_type in order to program FDIRCMD properly
* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
*/
- switch (input.formatted.flow_type) {
+ tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
+ flow_type = input.formatted.flow_type &
+ (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
+ switch (flow_type) {
case IXGBE_ATR_FLOW_TYPE_TCPV4:
case IXGBE_ATR_FLOW_TYPE_UDPV4:
case IXGBE_ATR_FLOW_TYPE_SCTPV4:
@@ -1538,8 +1414,10 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
/* configure FDIRCMD register */
fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
- fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ if (tunnel)
+ fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
/*
* The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
@@ -1746,6 +1624,16 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+ /* also use it for SCTP */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
+ break;
+ default:
+ break;
+ }
+
/* store source and destination IP masks (big-enian) */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
~input_mask->formatted.src_ip[0]);
@@ -1760,6 +1648,7 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
u16 soft_id, u8 queue)
{
u32 fdirport, fdirvlan, fdirhash, fdircmd;
+ s32 err;
/* currently IPv6 is not supported, must be programmed with 0 */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
@@ -1808,6 +1697,11 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ hw_dbg(hw, "Flow Director command did not complete!\n");
+ return err;
+ }
return 0;
}
@@ -1817,9 +1711,8 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
u16 soft_id)
{
u32 fdirhash;
- u32 fdircmd = 0;
- u32 retry_count;
- s32 err = 0;
+ u32 fdircmd;
+ s32 err;
/* configure FDIRHASH register */
fdirhash = input->formatted.bkt_hash;
@@ -1832,18 +1725,12 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
/* Query if filter is present */
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
- for (retry_count = 10; retry_count; retry_count--) {
- /* allow 10us for query to process */
- udelay(10);
- /* verify query completed successfully */
- fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
- if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
- break;
+ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ hw_dbg(hw, "Flow Director command did not complete!\n");
+ return err;
}
- if (!retry_count)
- err = IXGBE_ERR_FDIR_REINIT_FAILED;
-
/* if filter exists in hardware then remove it */
if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
@@ -1852,7 +1739,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
}
- return err;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 4c1c26732b67..ce61b36b94f1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -297,13 +297,13 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
/* Setup flow control */
ret_val = ixgbe_setup_fc(hw);
- if (!ret_val)
- return 0;
+ if (ret_val)
+ return ret_val;
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
- return ret_val;
+ return 0;
}
/**
@@ -2164,10 +2164,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
/*
* In order to prevent Tx hangs when the internal Tx
* switch is enabled we must set the high water mark
- * to the maximum FCRTH value. This allows the Tx
- * switch to function even under heavy Rx workloads.
+ * to the Rx packet buffer size - 24KB. This allows
+ * the Tx switch to function even under heavy Rx
+ * workloads.
*/
- fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+ fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
}
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
@@ -2476,6 +2477,9 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ if (hw->mac.type >= ixgbe_mac_X550)
+ return 0;
+
/*
* Before proceeding, make sure that the PCIe block does not have
* transactions pending.
@@ -3905,3 +3909,228 @@ void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
}
}
}
+
+/** ixgbe_mng_present - returns true when management capability is present
+ * @hw: pointer to hardware structure
+ **/
+bool ixgbe_mng_present(struct ixgbe_hw *hw)
+{
+ u32 fwsm;
+
+ if (hw->mac.type < ixgbe_mac_82599EB)
+ return false;
+
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
+ fwsm &= IXGBE_FWSM_MODE_MASK;
+ return fwsm == IXGBE_FWSM_FW_MODE_PT;
+}
+
+/**
+ * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Set the link speed in the MAC and/or PHY register and restarts link.
+ */
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ s32 status = 0;
+ u32 speedcnt = 0;
+ u32 i = 0;
+ bool autoneg, link_up = false;
+
+ /* Mask off requested but non-supported speeds */
+ status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg);
+ if (status)
+ return status;
+
+ speed &= link_speed;
+
+ /* Try each speed one by one, highest priority first. We do this in
+ * software because 10Gb fiber doesn't support speed autonegotiation.
+ */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ speedcnt++;
+ highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+ false);
+ if (status)
+ return status;
+
+ if (link_speed == IXGBE_LINK_SPEED_10GB_FULL && link_up)
+ goto out;
+
+ /* Set the module link speed */
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ hw->mac.ops.set_rate_select_speed(hw,
+ IXGBE_LINK_SPEED_10GB_FULL);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects MAC link speed */
+ break;
+ default:
+ hw_dbg(hw, "Unexpected media type\n");
+ break;
+ }
+
+ /* Allow module to change analog characteristics (1G->10G) */
+ msleep(40);
+
+ status = hw->mac.ops.setup_mac_link(hw,
+ IXGBE_LINK_SPEED_10GB_FULL,
+ autoneg_wait_to_complete);
+ if (status)
+ return status;
+
+ /* Flap the Tx laser if it has not already been done */
+ if (hw->mac.ops.flap_tx_laser)
+ hw->mac.ops.flap_tx_laser(hw);
+
+ /* Wait for the controller to acquire link. Per IEEE 802.3ap,
+ * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * attempted. 82599 uses the same timing for 10g SFI.
+ */
+ for (i = 0; i < 5; i++) {
+ /* Wait for the link partner to also set speed */
+ msleep(100);
+
+ /* If we have link, just jump out */
+ status = hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false);
+ if (status)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ speedcnt++;
+ if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
+ highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* If we already have link at this speed, just jump out */
+ status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+ false);
+ if (status)
+ return status;
+
+ if (link_speed == IXGBE_LINK_SPEED_1GB_FULL && link_up)
+ goto out;
+
+ /* Set the module link speed */
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ hw->mac.ops.set_rate_select_speed(hw,
+ IXGBE_LINK_SPEED_1GB_FULL);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects link speed */
+ break;
+ default:
+ hw_dbg(hw, "Unexpected media type\n");
+ break;
+ }
+
+ /* Allow module to change analog characteristics (10G->1G) */
+ msleep(40);
+
+ status = hw->mac.ops.setup_mac_link(hw,
+ IXGBE_LINK_SPEED_1GB_FULL,
+ autoneg_wait_to_complete);
+ if (status)
+ return status;
+
+ /* Flap the Tx laser if it has not already been done */
+ if (hw->mac.ops.flap_tx_laser)
+ hw->mac.ops.flap_tx_laser(hw);
+
+ /* Wait for the link partner to also set speed */
+ msleep(100);
+
+ /* If we have link, just jump out */
+ status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+ false);
+ if (status)
+ return status;
+
+ if (link_up)
+ goto out;
+ }
+
+ /* We didn't get link. Configure back to the highest speed we tried,
+ * (if there was more than one). We call ourselves back with just the
+ * single highest speed that the user requested.
+ */
+ if (speedcnt > 1)
+ status = ixgbe_setup_mac_link_multispeed_fiber(hw,
+ highest_link_speed,
+ autoneg_wait_to_complete);
+
+out:
+ /* Set autoneg_advertised value based on input link speed */
+ hw->phy.autoneg_advertised = 0;
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ return status;
+}
+
+/**
+ * ixgbe_set_soft_rate_select_speed - Set module link speed
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
+ *
+ * Set module link speed via the soft rate select.
+ */
+void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed)
+{
+ s32 status;
+ u8 rs, eeprom_data;
+
+ switch (speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ /* one bit mask same as setting on */
+ rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
+ break;
+ default:
+ hw_dbg(hw, "Invalid fixed module speed\n");
+ return;
+ }
+
+ /* Set RS0 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
+ return;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
+ return;
+ }
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index ec015fed8fa7..a0044e4a8b90 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -113,6 +113,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length, u32 timeout, bool return_data);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+bool ixgbe_mng_present(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
@@ -134,6 +135,11 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed);
#define IXGBE_FAILED_READ_REG 0xffffffffU
#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 3b932fe64ab6..23277ab153b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -259,7 +259,13 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
} else {
- reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+ /* In order to prevent Tx hangs when the internal Tx
+ * switch is enabled we must set the high water mark
+ * to the Rx packet buffer size - 24KB. This allows
+ * the Tx switch to function even under heavy Rx
+ * workloads.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index ec7b2324b77b..d681273bd39d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -166,6 +166,8 @@ static int ixgbe_get_settings(struct net_device *netdev,
/* set the supported link speeds */
if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
ecmd->supported |= SUPPORTED_10000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL)
+ ecmd->supported |= SUPPORTED_2500baseX_Full;
if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
ecmd->supported |= SUPPORTED_1000baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_100_FULL)
@@ -177,6 +179,8 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->advertising |= ADVERTISED_100baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
+ ecmd->advertising |= ADVERTISED_2500baseX_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
ecmd->advertising |= ADVERTISED_1000baseT_Full;
} else {
@@ -286,6 +290,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
case IXGBE_LINK_SPEED_10GB_FULL:
ethtool_cmd_speed_set(ecmd, SPEED_10000);
break;
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ ethtool_cmd_speed_set(ecmd, SPEED_2500);
+ break;
case IXGBE_LINK_SPEED_1GB_FULL:
ethtool_cmd_speed_set(ecmd, SPEED_1000);
break;
@@ -936,9 +943,6 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = IXGBE_STATS_LEN;
- drvinfo->testinfo_len = IXGBE_TEST_LEN;
- drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
}
static void ixgbe_get_ringparam(struct net_device *netdev,
@@ -2279,7 +2283,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
adapter->tx_itr_setting = ec->tx_coalesce_usecs;
if (adapter->tx_itr_setting == 1)
- tx_itr_param = IXGBE_10K_ITR;
+ tx_itr_param = IXGBE_12K_ITR;
else
tx_itr_param = adapter->tx_itr_setting;
@@ -2868,6 +2872,14 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
+static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
+{
+ if (adapter->hw.mac.type < ixgbe_mac_X550)
+ return 16;
+ else
+ return 64;
+}
+
static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2907,6 +2919,44 @@ static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return 0;
}
+static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int i;
+ u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
+
+ if (hfunc)
+ return -EINVAL;
+
+ /* Fill out the redirection table */
+ if (indir) {
+ int max_queues = min_t(int, adapter->num_rx_queues,
+ ixgbe_rss_indir_tbl_max(adapter));
+
+ /*Allow at least 2 queues w/ SR-IOV.*/
+ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
+ (max_queues < 2))
+ max_queues = 2;
+
+ /* Verify user input. */
+ for (i = 0; i < reta_entries; i++)
+ if (indir[i] >= max_queues)
+ return -EINVAL;
+
+ for (i = 0; i < reta_entries; i++)
+ adapter->rss_indir_tbl[i] = indir[i];
+ }
+
+ /* Fill out the rss hash key */
+ if (key)
+ memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
+
+ ixgbe_store_reta(adapter);
+
+ return 0;
+}
+
static int ixgbe_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
@@ -2938,14 +2988,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
break;
default:
@@ -3167,6 +3209,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_rxfh_indir_size = ixgbe_rss_indir_size,
.get_rxfh_key_size = ixgbe_get_rxfh_key_size,
.get_rxfh = ixgbe_get_rxfh,
+ .set_rxfh = ixgbe_set_rxfh,
.get_channels = ixgbe_get_channels,
.set_channels = ixgbe_set_channels,
.get_ts_info = ixgbe_get_ts_info,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 68e1e757ecef..f3168bcc7d87 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -866,7 +866,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
if (txr_count && !rxr_count) {
/* tx only vector */
if (adapter->tx_itr_setting == 1)
- q_vector->itr = IXGBE_10K_ITR;
+ q_vector->itr = IXGBE_12K_ITR;
else
q_vector->itr = adapter->tx_itr_setting;
} else {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 9aa6104e34ea..47395ff5d908 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -65,6 +65,9 @@
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
#include "ixgbe_sriov.h"
+#ifdef CONFIG_IXGBE_VXLAN
+#include <net/vxlan.h>
+#endif
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
@@ -76,10 +79,10 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "4.0.1-k"
+#define DRV_VERSION "4.2.1-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2014 Intel Corporation.";
+ "Copyright (c) 1999-2015 Intel Corporation.";
static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
@@ -134,6 +137,7 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
/* required last entry */
{0, }
};
@@ -243,13 +247,20 @@ static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
int expected_gts)
{
+ struct ixgbe_hw *hw = &adapter->hw;
int max_gts = 0;
enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
struct pci_dev *pdev;
- /* determine whether to use the the parent device
+ /* Some devices are not connected over PCIe and thus do not negotiate
+ * speed. These devices do not have valid bus info, and thus any report
+ * we generate may not be correct.
*/
+ if (hw->bus.type == ixgbe_bus_type_internal)
+ return;
+
+ /* determine whether to use the parent device */
if (ixgbe_pcie_from_parent(&adapter->hw))
pdev = adapter->pdev->bus->parent->self;
else
@@ -1234,9 +1245,12 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
int cpu)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
+ u32 txctrl = 0;
u16 reg_offset;
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ txctrl = dca3_get_tag(tx_ring->dev, cpu);
+
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
@@ -1268,9 +1282,11 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
int cpu)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
+ u32 rxctrl = 0;
u8 reg_idx = rx_ring->reg_idx;
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ rxctrl = dca3_get_tag(rx_ring->dev, cpu);
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
@@ -1287,6 +1303,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
* which will cause the DCA tag to be cleared.
*/
rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_RXCTRL_DATA_DCA_EN |
IXGBE_DCA_RXCTRL_DESC_DCA_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
@@ -1316,11 +1333,13 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
{
int i;
- if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
- return;
-
/* always use CB2 mode, difference is masked in the CB driver */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
+ IXGBE_DCA_CTRL_DCA_MODE_CB2);
+ else
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
+ IXGBE_DCA_CTRL_DCA_DISABLE);
for (i = 0; i < adapter->num_q_vectors; i++) {
adapter->q_vector[i]->cpu = -1;
@@ -1343,7 +1362,8 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
break;
if (dca_add_requester(dev) == 0) {
adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
- ixgbe_setup_dca(adapter);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
+ IXGBE_DCA_CTRL_DCA_MODE_CB2);
break;
}
/* Fall Through since DCA is disabled. */
@@ -1351,7 +1371,8 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
dca_remove_requester(dev);
adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
+ IXGBE_DCA_CTRL_DCA_DISABLE);
}
break;
}
@@ -1360,14 +1381,31 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
}
#endif /* CONFIG_IXGBE_DCA */
+
+#define IXGBE_RSS_L4_TYPES_MASK \
+ ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
+
static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- if (ring->netdev->features & NETIF_F_RXHASH)
- skb_set_hash(skb,
- le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
- PKT_HASH_TYPE_L3);
+ u16 rss_type;
+
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
+ return;
+
+ rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
+ IXGBE_RXDADV_RSSTYPE_MASK;
+
+ if (!rss_type)
+ return;
+
+ skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+ (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
}
#ifdef IXGBE_FCOE
@@ -1414,7 +1452,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
(hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) {
encap_pkt = true;
skb->encapsulation = 1;
- skb->ip_summed = CHECKSUM_NONE;
}
/* if IP and error */
@@ -1832,7 +1869,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
static inline bool ixgbe_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
/**
@@ -2235,7 +2272,7 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
/* simple throttlerate management
* 0-10MB/s lowest (100000 ints/s)
* 10-20MB/s low (20000 ints/s)
- * 20-1249MB/s bulk (8000 ints/s)
+ * 20-1249MB/s bulk (12000 ints/s)
*/
/* what was last interrupt timeslice? */
timepassed_us = q_vector->itr >> 2;
@@ -2324,7 +2361,7 @@ static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
new_itr = IXGBE_20K_ITR;
break;
case bulk_latency:
- new_itr = IXGBE_8K_ITR;
+ new_itr = IXGBE_12K_ITR;
break;
default:
break;
@@ -2469,17 +2506,27 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
{
struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
- if (eicr & IXGBE_EICR_GPI_SDP2(hw)) {
+ if (!ixgbe_is_sfp(hw))
+ return;
+
+ /* Later MAC's use different SDP */
+ if (hw->mac.type >= ixgbe_mac_X540)
+ eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
+
+ if (eicr & eicr_mask) {
/* Clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2(hw));
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
+ adapter->sfp_poll_time = 0;
ixgbe_service_event_schedule(adapter);
}
}
- if (eicr & IXGBE_EICR_GPI_SDP1(hw)) {
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
+ (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
/* Clear the interrupt */
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
@@ -2596,6 +2643,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP)
+ mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
mask |= IXGBE_EICR_GPI_SDP0_X540;
mask |= IXGBE_EIMS_ECC;
@@ -2726,7 +2775,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *ring;
- int per_ring_budget;
+ int per_ring_budget, work_done = 0;
bool clean_complete = true;
#ifdef CONFIG_IXGBE_DCA
@@ -2747,9 +2796,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
else
per_ring_budget = budget;
- ixgbe_for_each_ring(ring, q_vector->rx)
- clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring,
- per_ring_budget) < per_ring_budget);
+ ixgbe_for_each_ring(ring, q_vector->rx) {
+ int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
+ per_ring_budget);
+
+ work_done += cleaned;
+ clean_complete &= (cleaned < per_ring_budget);
+ }
ixgbe_qv_unlock_napi(q_vector);
/* If all work not completed, return budget and keep polling */
@@ -2757,7 +2810,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
return budget;
/* all work done, exit the polling mode */
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (adapter->rx_itr_setting & 1)
ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -3287,7 +3340,7 @@ u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
*
* Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
*/
-static void ixgbe_store_reta(struct ixgbe_adapter *adapter)
+void ixgbe_store_reta(struct ixgbe_adapter *adapter)
{
u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3674,14 +3727,20 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
adapter->num_vfs);
- /* Ensure LLDP is set for Ethertype Antispoofing if we will be
+ /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
* calling set_ethertype_anti_spoofing for each VF in loop below
*/
- if (hw->mac.ops.set_ethertype_anti_spoofing)
+ if (hw->mac.ops.set_ethertype_anti_spoofing) {
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
- (IXGBE_ETQF_FILTER_EN | /* enable filter */
- IXGBE_ETQF_TX_ANTISPOOF | /* tx antispoof */
- IXGBE_ETH_P_LLDP)); /* LLDP eth type */
+ (IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ IXGBE_ETH_P_LLDP));
+
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
+ (IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ ETH_P_PAUSE));
+ }
/* For VFs that have spoof checking turned off */
for (i = 0; i < adapter->num_vfs; i++) {
@@ -3751,8 +3810,6 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
switch (hw->mac.type) {
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
case ixgbe_mac_82598EB:
/*
* For VMDq support of different descriptor types or
@@ -3766,6 +3823,11 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
*/
rdrxctl |= IXGBE_RDRXCTL_MVMEN;
break;
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ if (adapter->num_vfs)
+ rdrxctl |= IXGBE_RDRXCTL_PSP;
+ /* fall through for older HW */
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
/* Disable RSC for ACK packets */
@@ -4245,6 +4307,21 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
}
}
+static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
+{
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
+#ifdef CONFIG_IXGBE_VXLAN
+ adapter->vxlan_port = 0;
+#endif
+ break;
+ default:
+ break;
+ }
+}
+
#ifdef CONFIG_IXGBE_DCB
/**
* ixgbe_configure_dcb - Configure DCB hardware
@@ -4726,6 +4803,12 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
break;
}
+#ifdef CONFIG_IXGBE_DCA
+ /* configure DCA */
+ if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
+ ixgbe_setup_dca(adapter);
+#endif /* CONFIG_IXGBE_DCA */
+
#ifdef IXGBE_FCOE
/* configure FCoE L2 filters, redirection table, and Rx control */
ixgbe_configure_fcoe(adapter);
@@ -4752,6 +4835,7 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
+ adapter->sfp_poll_time = 0;
}
/**
@@ -4842,9 +4926,6 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
case ixgbe_mac_82599EB:
gpie |= IXGBE_SDP0_GPIEN_8259X;
break;
- case ixgbe_mac_X540:
- gpie |= IXGBE_EIMS_TS;
- break;
default:
break;
}
@@ -4854,9 +4935,15 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
gpie |= IXGBE_SDP1_GPIEN(hw);
- if (hw->mac.type == ixgbe_mac_82599EB) {
- gpie |= IXGBE_SDP1_GPIEN_8259X;
- gpie |= IXGBE_SDP2_GPIEN_8259X;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
+ break;
+ case ixgbe_mac_X550EM_x:
+ gpie |= IXGBE_SDP0_GPIEN_X540;
+ break;
+ default:
+ break;
}
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -5179,11 +5266,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
ixgbe_clean_all_tx_rings(adapter);
ixgbe_clean_all_rx_rings(adapter);
-
-#ifdef CONFIG_IXGBE_DCA
- /* since we reset the hardware DCA settings were cleared */
- ixgbe_setup_dca(adapter);
-#endif
}
/**
@@ -5229,7 +5311,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
adapter->ring_feature[RING_F_RSS].limit = rss;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
- adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
adapter->max_q_vectors = MAX_Q_VECTORS_82599;
adapter->atr_sample_rate = 20;
fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
@@ -5255,7 +5336,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
- adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
@@ -5286,6 +5366,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCA
adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
#endif
+#ifdef CONFIG_IXGBE_VXLAN
+ adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
+#endif
break;
default:
break;
@@ -5737,10 +5820,11 @@ static int ixgbe_open(struct net_device *netdev)
ixgbe_up_complete(adapter);
-#if IS_ENABLED(CONFIG_IXGBE_VXLAN)
+ ixgbe_clear_vxlan_port(adapter);
+#ifdef CONFIG_IXGBE_VXLAN
vxlan_get_rx_port(netdev);
-
#endif
+
return 0;
err_set_queues:
@@ -5761,7 +5845,15 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
{
ixgbe_ptp_suspend(adapter);
- ixgbe_down(adapter);
+ if (adapter->hw.phy.ops.enter_lplu) {
+ adapter->hw.phy.reset_disable = true;
+ ixgbe_down(adapter);
+ adapter->hw.phy.ops.enter_lplu(&adapter->hw);
+ adapter->hw.phy.reset_disable = false;
+ } else {
+ ixgbe_down(adapter);
+ }
+
ixgbe_free_irq(adapter);
ixgbe_free_all_tx_resources(adapter);
@@ -6327,6 +6419,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
struct net_device *upper;
struct list_head *iter;
u32 link_speed = adapter->link_speed;
+ const char *speed_str;
bool flow_rx, flow_tx;
/* only continue if link was previously down */
@@ -6364,14 +6457,24 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
ixgbe_ptp_start_cyclecounter(adapter);
- e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
- (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
- "10 Gbps" :
- (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
- "1 Gbps" :
- (link_speed == IXGBE_LINK_SPEED_100_FULL ?
- "100 Mbps" :
- "unknown speed"))),
+ switch (link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ speed_str = "10 Gbps";
+ break;
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ speed_str = "2.5 Gbps";
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ speed_str = "1 Gbps";
+ break;
+ case IXGBE_LINK_SPEED_100_FULL:
+ speed_str = "100 Mbps";
+ break;
+ default:
+ speed_str = "unknown speed";
+ break;
+ }
+ e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
((flow_rx && flow_tx) ? "RX/TX" :
(flow_rx ? "RX" :
(flow_tx ? "TX" : "None"))));
@@ -6628,10 +6731,16 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
return;
+ if (adapter->sfp_poll_time &&
+ time_after(adapter->sfp_poll_time, jiffies))
+ return; /* If not yet time to poll for SFP */
+
/* someone else is in init, wait until next service event */
if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
return;
+ adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
+
err = hw->phy.ops.identify_sfp(hw);
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
goto sfp_out;
@@ -6800,6 +6909,12 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter);
return;
}
+#ifdef CONFIG_IXGBE_VXLAN
+ if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
+ adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
+ vxlan_get_rx_port(adapter->netdev);
+ }
+#endif /* CONFIG_IXGBE_VXLAN */
ixgbe_reset_subtask(adapter);
ixgbe_phy_interrupt_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
@@ -6896,31 +7011,55 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
!(first->tx_flags & IXGBE_TX_FLAGS_CC))
return;
+ vlan_macip_lens = skb_network_offset(skb) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT;
} else {
u8 l4_hdr = 0;
- switch (first->protocol) {
- case htons(ETH_P_IP):
- vlan_macip_lens |= skb_network_header_len(skb);
+ union {
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ u8 *raw;
+ } network_hdr;
+ union {
+ struct tcphdr *tcphdr;
+ u8 *raw;
+ } transport_hdr;
+
+ if (skb->encapsulation) {
+ network_hdr.raw = skb_inner_network_header(skb);
+ transport_hdr.raw = skb_inner_transport_header(skb);
+ vlan_macip_lens = skb_inner_network_offset(skb) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT;
+ } else {
+ network_hdr.raw = skb_network_header(skb);
+ transport_hdr.raw = skb_transport_header(skb);
+ vlan_macip_lens = skb_network_offset(skb) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT;
+ }
+
+ /* use first 4 bits to determine IP version */
+ switch (network_hdr.ipv4->version) {
+ case IPVERSION:
+ vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
- l4_hdr = ip_hdr(skb)->protocol;
+ l4_hdr = network_hdr.ipv4->protocol;
break;
- case htons(ETH_P_IPV6):
- vlan_macip_lens |= skb_network_header_len(skb);
- l4_hdr = ipv6_hdr(skb)->nexthdr;
+ case 6:
+ vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
+ l4_hdr = network_hdr.ipv6->nexthdr;
break;
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
- "partial checksum but proto=%x!\n",
- first->protocol);
+ "partial checksum but version=%d\n",
+ network_hdr.ipv4->version);
}
- break;
}
switch (l4_hdr) {
case IPPROTO_TCP:
type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- mss_l4len_idx = tcp_hdrlen(skb) <<
+ mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
IXGBE_ADVTXD_L4LEN_SHIFT;
break;
case IPPROTO_SCTP:
@@ -6946,7 +7085,6 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
}
/* vlan_macip_lens: MACLEN, VLAN tag */
- vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
@@ -7201,6 +7339,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
struct ipv6hdr *ipv6;
} hdr;
struct tcphdr *th;
+ struct sk_buff *skb;
+#ifdef CONFIG_IXGBE_VXLAN
+ u8 encap = false;
+#endif /* CONFIG_IXGBE_VXLAN */
__be16 vlan_id;
/* if ring doesn't have a interrupt vector, cannot perform ATR */
@@ -7214,16 +7356,36 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
ring->atr_count++;
/* snag network header to get L4 type and address */
- hdr.network = skb_network_header(first->skb);
+ skb = first->skb;
+ hdr.network = skb_network_header(skb);
+ if (skb->encapsulation) {
+#ifdef CONFIG_IXGBE_VXLAN
+ struct ixgbe_adapter *adapter = q_vector->adapter;
- /* Currently only IPv4/IPv6 with TCP is supported */
- if ((first->protocol != htons(ETH_P_IPV6) ||
- hdr.ipv6->nexthdr != IPPROTO_TCP) &&
- (first->protocol != htons(ETH_P_IP) ||
- hdr.ipv4->protocol != IPPROTO_TCP))
+ if (!adapter->vxlan_port)
+ return;
+ if (first->protocol != htons(ETH_P_IP) ||
+ hdr.ipv4->version != IPVERSION ||
+ hdr.ipv4->protocol != IPPROTO_UDP) {
+ return;
+ }
+ if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
+ return;
+ encap = true;
+ hdr.network = skb_inner_network_header(skb);
+ th = inner_tcp_hdr(skb);
+#else
return;
-
- th = tcp_hdr(first->skb);
+#endif /* CONFIG_IXGBE_VXLAN */
+ } else {
+ /* Currently only IPv4/IPv6 with TCP is supported */
+ if ((first->protocol != htons(ETH_P_IPV6) ||
+ hdr.ipv6->nexthdr != IPPROTO_TCP) &&
+ (first->protocol != htons(ETH_P_IP) ||
+ hdr.ipv4->protocol != IPPROTO_TCP))
+ return;
+ th = tcp_hdr(skb);
+ }
/* skip this packet since it is invalid or the socket is closing */
if (!th || th->fin)
@@ -7272,6 +7434,11 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
hdr.ipv6->daddr.s6_addr32[3];
}
+#ifdef CONFIG_IXGBE_VXLAN
+ if (encap)
+ input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
+#endif /* CONFIG_IXGBE_VXLAN */
+
/* This assumes the Rx queue and Tx queue are bound to the same CPU */
ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
input, common, ring->queue_index);
@@ -7737,9 +7904,10 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
bool pools;
/* Hardware supports up to 8 traffic classes */
- if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
- (hw->mac.type == ixgbe_mac_82598EB &&
- tc < MAX_TRAFFIC_CLASS))
+ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
+ return -EINVAL;
+
+ if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
return -EINVAL;
pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
@@ -7898,12 +8066,23 @@ static int ixgbe_set_features(struct net_device *netdev,
need_reset = true;
netdev->features = features;
+
+#ifdef CONFIG_IXGBE_VXLAN
+ if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
+ if (features & NETIF_F_RXCSUM)
+ adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
+ else
+ ixgbe_clear_vxlan_port(adapter);
+ }
+#endif /* CONFIG_IXGBE_VXLAN */
+
if (need_reset)
ixgbe_do_reset(netdev);
return 0;
}
+#ifdef CONFIG_IXGBE_VXLAN
/**
* ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
* @dev: The port's netdev
@@ -7917,17 +8096,18 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
struct ixgbe_hw *hw = &adapter->hw;
u16 new_port = ntohs(port);
+ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+ return;
+
if (sa_family == AF_INET6)
return;
- if (adapter->vxlan_port == new_port) {
- netdev_info(dev, "Port %d already offloaded\n", new_port);
+ if (adapter->vxlan_port == new_port)
return;
- }
if (adapter->vxlan_port) {
netdev_info(dev,
- "Hit Max num of UDP ports, not adding port %d\n",
+ "Hit Max num of VXLAN ports, not adding port %d\n",
new_port);
return;
}
@@ -7946,9 +8126,11 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
__be16 port)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- struct ixgbe_hw *hw = &adapter->hw;
u16 new_port = ntohs(port);
+ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+ return;
+
if (sa_family == AF_INET6)
return;
@@ -7958,9 +8140,10 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
return;
}
- adapter->vxlan_port = 0;
- IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, 0);
+ ixgbe_clear_vxlan_port(adapter);
+ adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
}
+#endif /* CONFIG_IXGBE_VXLAN */
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
@@ -8135,7 +8318,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
(adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
return ERR_PTR(-EBUSY);
- fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL);
+ fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
if (!fwd_adapter)
return ERR_PTR(-ENOMEM);
@@ -8191,6 +8374,21 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
kfree(fwd_adapter);
}
+#define IXGBE_MAX_TUNNEL_HDR_LEN 80
+static netdev_features_t
+ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ if (!skb->encapsulation)
+ return features;
+
+ if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
+ IXGBE_MAX_TUNNEL_HDR_LEN))
+ return features & ~NETIF_F_ALL_CSUM;
+
+ return features;
+}
+
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
@@ -8209,6 +8407,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
.ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
+ .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64,
#ifdef CONFIG_IXGBE_DCB
@@ -8236,8 +8435,11 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
.ndo_dfwd_add_station = ixgbe_fwd_add,
.ndo_dfwd_del_station = ixgbe_fwd_del,
+#ifdef CONFIG_IXGBE_VXLAN
.ndo_add_vxlan_port = ixgbe_add_vxlan_port,
.ndo_del_vxlan_port = ixgbe_del_vxlan_port,
+#endif /* CONFIG_IXGBE_VXLAN */
+ .ndo_features_check = ixgbe_features_check,
};
/**
@@ -8539,8 +8741,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->phy.reset_if_overtemp = true;
err = hw->mac.ops.reset_hw(hw);
hw->phy.reset_if_overtemp = false;
- if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
- hw->mac.type == ixgbe_mac_82598EB) {
+ if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
err = 0;
} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
@@ -8597,17 +8798,24 @@ skip_sriov:
netdev->vlan_features |= NETIF_F_IPV6_CSUM;
netdev->vlan_features |= NETIF_F_SG;
+ netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM;
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
+#ifdef CONFIG_IXGBE_VXLAN
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
- netdev->hw_enc_features |= NETIF_F_RXCSUM;
+ netdev->hw_enc_features |= NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM;
break;
default:
break;
}
+#endif /* CONFIG_IXGBE_VXLAN */
#ifdef CONFIG_IXGBE_DCB
netdev->dcbnl_ops = &dcbnl_ops;
@@ -8694,9 +8902,10 @@ skip_sriov:
hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
/* pick up the PCI bus settings for reporting later */
- hw->mac.ops.get_bus_info(hw);
if (ixgbe_pcie_from_parent(hw))
ixgbe_get_parent_bus_info(adapter);
+ else
+ hw->mac.ops.get_bus_info(hw);
/* calculate the expected PCIe bandwidth required for optimal
* performance. Note that some older parts will never have enough
@@ -8844,7 +9053,8 @@ static void ixgbe_remove(struct pci_dev *pdev)
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
dca_remove_requester(&pdev->dev);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
+ IXGBE_DCA_CTRL_DCA_DISABLE);
}
#endif
@@ -8855,17 +9065,12 @@ static void ixgbe_remove(struct pci_dev *pdev)
/* remove the added san mac */
ixgbe_del_sanmac_netdev(netdev);
+#ifdef CONFIG_PCI_IOV
+ ixgbe_disable_sriov(adapter);
+#endif
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
-#ifdef CONFIG_PCI_IOV
- /*
- * Only disable SR-IOV on unload if the user specified the now
- * deprecated max_vfs module parameter.
- */
- if (max_vfs)
- ixgbe_disable_sriov(adapter);
-#endif
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_release_hw_control(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index b1e4703ff2a5..8daa95f74548 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -102,6 +102,8 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */
#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */
+#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 526a20bf7488..fb8673d63806 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -100,16 +100,17 @@ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
}
/**
- * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
+ * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation
* @hw: pointer to the hardware structure
* @addr: I2C bus address to read from
* @reg: I2C device register to read from
* @val: pointer to location to receive read value
+ * @lock: true if to take and release semaphore
*
* Returns an error code on error.
- **/
-s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
- u16 reg, u16 *val)
+ */
+static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
int max_retry = 10;
@@ -124,7 +125,7 @@ s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
csum = ~csum;
do {
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return IXGBE_ERR_SWFW_SYNC;
ixgbe_i2c_start(hw);
/* Device Address and write indication */
@@ -157,13 +158,15 @@ s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
if (ixgbe_clock_out_i2c_bit(hw, false))
goto fail;
ixgbe_i2c_stop(hw);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
*val = (high_bits << 8) | low_bits;
return 0;
fail:
ixgbe_i2c_bus_clear(hw);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
retry++;
if (retry < max_retry)
hw_dbg(hw, "I2C byte read combined error - Retry.\n");
@@ -175,17 +178,49 @@ fail:
}
/**
- * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
+ * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val)
+{
+ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
+}
+
+/**
+ * ixgbe_read_i2c_combined_generic_unlocked - Unlocked I2C read combined
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to read from
+ * @reg: I2C device register to read from
+ * @val: pointer to location to receive read value
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val)
+{
+ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false);
+}
+
+/**
+ * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation
* @hw: pointer to the hardware structure
* @addr: I2C bus address to write to
* @reg: I2C device register to write to
* @val: value to write
+ * @lock: true if to take and release semaphore
*
* Returns an error code on error.
- **/
-s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
- u8 addr, u16 reg, u16 val)
+ */
+static s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 val, bool lock)
{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
int max_retry = 1;
int retry = 0;
u8 reg_high;
@@ -197,6 +232,8 @@ s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
csum = ~csum;
do {
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ return IXGBE_ERR_SWFW_SYNC;
ixgbe_i2c_start(hw);
/* Device Address and write indication */
if (ixgbe_out_i2c_byte_ack(hw, addr))
@@ -217,10 +254,14 @@ s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
if (ixgbe_out_i2c_byte_ack(hw, csum))
goto fail;
ixgbe_i2c_stop(hw);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
return 0;
fail:
ixgbe_i2c_bus_clear(hw);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
retry++;
if (retry < max_retry)
hw_dbg(hw, "I2C byte write combined error - Retry.\n");
@@ -232,6 +273,36 @@ fail:
}
/**
+ * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+ u8 addr, u16 reg, u16 val)
+{
+ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
+}
+
+/**
+ * ixgbe_write_i2c_combined_generic_unlocked - Unlocked I2C write combined
+ * @hw: pointer to the hardware structure
+ * @addr: I2C bus address to write to
+ * @reg: I2C device register to write to
+ * @val: value to write
+ *
+ * Returns an error code on error.
+ */
+s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
+ u8 addr, u16 reg, u16 val)
+{
+ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false);
+}
+
+/**
* ixgbe_identify_phy_generic - Get physical layer module
* @hw: pointer to hardware structure
*
@@ -243,9 +314,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
u16 ext_ability = 0;
if (!hw->phy.phy_semaphore_mask) {
- hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) &
- IXGBE_STATUS_LAN_ID_1;
- if (hw->phy.lan_id)
+ if (hw->bus.lan_id)
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
else
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
@@ -608,12 +677,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
s32 status;
- u32 gssr;
-
- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
- gssr = IXGBE_GSSR_PHY1_SM;
- else
- gssr = IXGBE_GSSR_PHY0_SM;
+ u32 gssr = hw->phy.phy_semaphore_mask;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
@@ -737,39 +801,61 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
}
/**
- * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
+ * ixgbe_get_copper_speeds_supported - Get copper link speed from phy
* @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @autoneg: boolean auto-negotiation value
*
- * Determines the link capabilities by reading the AUTOC register.
+ * Determines the supported link capabilities by reading the PHY auto
+ * negotiation register.
*/
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
+static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
{
- s32 status;
u16 speed_ability;
-
- *speed = 0;
- *autoneg = true;
+ s32 status;
status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
&speed_ability);
+ if (status)
+ return status;
- if (status == 0) {
- if (speed_ability & MDIO_SPEED_10G)
- *speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (speed_ability & MDIO_PMA_SPEED_1000)
- *speed |= IXGBE_LINK_SPEED_1GB_FULL;
- if (speed_ability & MDIO_PMA_SPEED_100)
- *speed |= IXGBE_LINK_SPEED_100_FULL;
+ if (speed_ability & MDIO_SPEED_10G)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (speed_ability & MDIO_PMA_SPEED_1000)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (speed_ability & MDIO_PMA_SPEED_100)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+ break;
+ case ixgbe_mac_X550EM_x:
+ hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
+ break;
+ default:
+ break;
}
- /* Internal PHY does not support 100 Mbps */
- if (hw->mac.type == ixgbe_mac_X550EM_x)
- *speed &= ~IXGBE_LINK_SPEED_100_FULL;
+ return 0;
+}
+
+/**
+ * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ */
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status = 0;
+
+ *autoneg = true;
+ if (!hw->phy.speeds_supported)
+ status = ixgbe_get_copper_speeds_supported(hw);
+ *speed = hw->phy.speeds_supported;
return status;
}
@@ -1085,6 +1171,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
return IXGBE_ERR_SFP_NOT_PRESENT;
}
+ /* LAN ID is needed for sfp_type determination */
+ hw->mac.ops.set_lan_id(hw);
+
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_IDENTIFIER,
&identifier);
@@ -1092,9 +1181,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (status)
goto err_read_i2c_eeprom;
- /* LAN ID is needed for sfp_type determination */
- hw->mac.ops.set_lan_id(hw);
-
if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
return IXGBE_ERR_SFP_NOT_SUPPORTED;
@@ -1144,7 +1230,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type = ixgbe_sfp_type_lr;
else
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
- } else if (hw->mac.type == ixgbe_mac_82599EB) {
+ } else {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
@@ -1645,26 +1731,46 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
}
/**
- * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
+ * ixgbe_is_sfp_probe - Returns true if SFP is being detected
+ * @hw: pointer to hardware structure
+ * @offset: eeprom offset to be read
+ * @addr: I2C address to be read
+ */
+static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
+{
+ if (addr == IXGBE_I2C_EEPROM_DEV_ADDR &&
+ offset == IXGBE_SFF_IDENTIFIER &&
+ hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ return true;
+ return false;
+}
+
+/**
+ * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
* @data: value read
+ * @lock: true if to take and release semaphore
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
- **/
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data)
+ */
+static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data, bool lock)
{
s32 status;
u32 max_retry = 10;
u32 retry = 0;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
bool nack = true;
+
+ if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
+ max_retry = IXGBE_SFP_DETECT_RETRIES;
+
*data = 0;
do {
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return IXGBE_ERR_SWFW_SYNC;
ixgbe_i2c_start(hw);
@@ -1706,12 +1812,16 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
goto fail;
ixgbe_i2c_stop(hw);
- break;
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ return 0;
fail:
ixgbe_i2c_bus_clear(hw);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
- msleep(100);
+ if (lock) {
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msleep(100);
+ }
retry++;
if (retry < max_retry)
hw_dbg(hw, "I2C byte read error - Retrying.\n");
@@ -1720,29 +1830,60 @@ fail:
} while (retry < max_retry);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-
return status;
}
/**
- * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
+ * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ */
+s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+ data, true);
+}
+
+/**
+ * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ */
+s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+ data, false);
+}
+
+/**
+ * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
* @data: value to write
+ * @lock: true if to take and release semaphore
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
- **/
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data)
+ */
+static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data, bool lock)
{
s32 status;
u32 max_retry = 1;
u32 retry = 0;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return IXGBE_ERR_SWFW_SYNC;
do {
@@ -1773,7 +1914,9 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
goto fail;
ixgbe_i2c_stop(hw);
- break;
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ return 0;
fail:
ixgbe_i2c_bus_clear(hw);
@@ -1784,21 +1927,57 @@ fail:
hw_dbg(hw, "I2C byte write error.\n");
} while (retry < max_retry);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ if (lock)
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
return status;
}
/**
+ * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ */
+s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+ data, true);
+}
+
+/**
+ * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ */
+s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
+ data, false);
+}
+
+/**
* ixgbe_i2c_start - Sets I2C start condition
* @hw: pointer to hardware structure
*
* Sets I2C start condition (High -> Low on SDA while SCL is High)
+ * Set bit-bang mode on X550 hardware.
**/
static void ixgbe_i2c_start(struct ixgbe_hw *hw)
{
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ i2cctl |= IXGBE_I2C_BB_EN(hw);
+
/* Start condition must begin with data and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 1);
ixgbe_raise_i2c_clk(hw, &i2cctl);
@@ -1823,10 +2002,15 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
*
* Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ * Disables bit-bang mode and negates data output enable on X550
+ * hardware.
**/
static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
{
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
+ u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
+ u32 bb_en_bit = IXGBE_I2C_BB_EN(hw);
/* Stop condition must begin with data low and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 0);
@@ -1839,6 +2023,13 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
/* bus free time between stop and start (4.7us)*/
udelay(IXGBE_I2C_T_BUF);
+
+ if (bb_en_bit || data_oe_bit || clk_oe_bit) {
+ i2cctl &= ~bb_en_bit;
+ i2cctl |= data_oe_bit | clk_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
}
/**
@@ -1853,6 +2044,7 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
s32 i;
bool bit = false;
+ *data = 0;
for (i = 7; i >= 0; i--) {
ixgbe_clock_in_i2c_bit(hw, &bit);
*data |= bit << i;
@@ -1886,6 +2078,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
/* Release SDA line (set high) */
i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
i2cctl |= IXGBE_I2C_DATA_OUT(hw);
+ i2cctl |= IXGBE_I2C_DATA_OE_N_EN(hw);
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
IXGBE_WRITE_FLUSH(hw);
@@ -1900,15 +2093,21 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
**/
static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
{
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
s32 status = 0;
u32 i = 0;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
u32 timeout = 10;
bool ack = true;
+ if (data_oe_bit) {
+ i2cctl |= IXGBE_I2C_DATA_OUT(hw);
+ i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
ixgbe_raise_i2c_clk(hw, &i2cctl);
-
/* Minimum high period of clock is 4us */
udelay(IXGBE_I2C_T_HIGH);
@@ -1946,7 +2145,14 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
{
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
+ if (data_oe_bit) {
+ i2cctl |= IXGBE_I2C_DATA_OUT(hw);
+ i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
ixgbe_raise_i2c_clk(hw, &i2cctl);
/* Minimum high period of clock is 4us */
@@ -2001,13 +2207,20 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
* @i2cctl: Current value of I2CCTL register
*
* Raises the I2C clock line '0'->'1'
+ * Negates the I2C clock output enable on X550 hardware.
**/
static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
{
+ u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
u32 i = 0;
u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
u32 i2cctl_r = 0;
+ if (clk_oe_bit) {
+ *i2cctl |= clk_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
+ }
+
for (i = 0; i < timeout; i++) {
*i2cctl |= IXGBE_I2C_CLK_OUT(hw);
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
@@ -2027,11 +2240,13 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
* @i2cctl: Current value of I2CCTL register
*
* Lowers the I2C clock line '1'->'0'
+ * Asserts the I2C clock output enable on X550 hardware.
**/
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
{
*i2cctl &= ~IXGBE_I2C_CLK_OUT(hw);
+ *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN(hw);
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
@@ -2047,13 +2262,17 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
* @data: I2C data value (0 or 1) to set
*
* Sets the I2C data bit
+ * Asserts the I2C data output enable on X550 hardware.
**/
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
{
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
+
if (data)
*i2cctl |= IXGBE_I2C_DATA_OUT(hw);
else
*i2cctl &= ~IXGBE_I2C_DATA_OUT(hw);
+ *i2cctl &= ~data_oe_bit;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
@@ -2061,6 +2280,14 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
+ if (!data) /* Can't verify data in this case */
+ return 0;
+ if (data_oe_bit) {
+ *i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
/* Verify data was set correctly */
*i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
@@ -2077,9 +2304,19 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
* @i2cctl: Current value of I2CCTL register
*
* Returns the I2C data bit value
+ * Negates the I2C data output enable on X550 hardware.
**/
static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
{
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
+
+ if (data_oe_bit) {
+ *i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(IXGBE_I2C_T_FALL);
+ }
+
if (*i2cctl & IXGBE_I2C_DATA_IN(hw))
return true;
return false;
@@ -2094,10 +2331,11 @@ static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
**/
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ u32 i2cctl;
u32 i;
ixgbe_i2c_start(hw);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
ixgbe_set_i2c_data(hw, &i2cctl, 1);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index e45988c4dad5..5abd66c84d00 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -66,6 +66,9 @@
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
#define IXGBE_SFF_ADDRESSING_MODE 0x4
#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
@@ -78,9 +81,29 @@
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
#define IXGBE_CS4227 0xBE /* CS4227 address */
-#define IXGBE_CS4227_SPARE24_LSB 0x12B0 /* Reg to program EDC */
+#define IXGBE_CS4227_SCRATCH 2
+#define IXGBE_CS4227_RESET_PENDING 0x1357
+#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
+#define IXGBE_CS4227_RETRIES 15
+#define IXGBE_CS4227_EFUSE_STATUS 0x0181
+#define IXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to set speed */
+#define IXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to set EDC */
+#define IXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to set speed */
+#define IXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */
+#define IXGBE_CS4227_EEPROM_STATUS 0x5001
+#define IXGBE_CS4227_EEPROM_LOAD_OK 0x0001
+#define IXGBE_CS4227_SPEED_1G 0x8000
+#define IXGBE_CS4227_SPEED_10G 0
#define IXGBE_CS4227_EDC_MODE_CX1 0x0002
#define IXGBE_CS4227_EDC_MODE_SR 0x0004
+#define IXGBE_CS4227_EDC_MODE_DIAG 0x0008
+#define IXGBE_CS4227_RESET_HOLD 500 /* microseconds */
+#define IXGBE_CS4227_RESET_DELAY 500 /* milliseconds */
+#define IXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */
+#define IXGBE_PE 0xE0 /* Port expander addr */
+#define IXGBE_PE_OUTPUT 1 /* Output reg offset */
+#define IXGBE_PE_CONFIG 3 /* Config reg offset */
+#define IXGBE_PE_BIT1 (1 << 1)
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
@@ -109,6 +132,8 @@
#define IXGBE_I2C_T_SU_STO 4
#define IXGBE_I2C_T_BUF 5
+#define IXGBE_SFP_DETECT_RETRIES 2
+
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
@@ -154,8 +179,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
+s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
+s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
@@ -164,6 +193,10 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val);
+s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 *val);
s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 val);
+s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
+ u16 reg, u16 val);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 1d17b5872dd1..fcd8b27a0ccb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -116,6 +116,12 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
* we want to disable the querying by default.
*/
adapter->vfinfo[i].rss_query_enabled = 0;
+
+ /* Untrust all VFs */
+ adapter->vfinfo[i].trusted = false;
+
+ /* set the default xcast mode */
+ adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
}
return 0;
@@ -1001,6 +1007,59 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
return 0;
}
+static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int xcast_mode = msgbuf[1];
+ u32 vmolr, disable, enable;
+
+ /* verify the PF is supporting the correct APIs */
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_12:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI &&
+ !adapter->vfinfo[vf].trusted) {
+ xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
+ }
+
+ if (adapter->vfinfo[vf].xcast_mode == xcast_mode)
+ goto out;
+
+ switch (xcast_mode) {
+ case IXGBEVF_XCAST_MODE_NONE:
+ disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
+ enable = 0;
+ break;
+ case IXGBEVF_XCAST_MODE_MULTI:
+ disable = IXGBE_VMOLR_MPE;
+ enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
+ break;
+ case IXGBEVF_XCAST_MODE_ALLMULTI:
+ disable = 0;
+ enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+ vmolr &= ~disable;
+ vmolr |= enable;
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
+ adapter->vfinfo[vf].xcast_mode = xcast_mode;
+
+out:
+ msgbuf[1] = xcast_mode;
+
+ return 0;
+}
+
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
{
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
@@ -1063,6 +1122,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
case IXGBE_VF_GET_RSS_KEY:
retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf);
break;
+ case IXGBE_VF_UPDATE_XCAST_MODE:
+ retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
+ break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
retval = IXGBE_ERR_MBX;
@@ -1124,6 +1186,17 @@ void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
}
+static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 ping;
+
+ ping = IXGBE_PF_CONTROL_MSG;
+ if (adapter->vfinfo[vf].clear_to_send)
+ ping |= IXGBE_VT_MSGTYPE_CTS;
+ ixgbe_write_mbx(hw, &ping, 1, vf);
+}
+
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -1416,6 +1489,28 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
return 0;
}
+int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ /* nothing to do */
+ if (adapter->vfinfo[vf].trusted == setting)
+ return 0;
+
+ adapter->vfinfo[vf].trusted = setting;
+
+ /* reset VF to reconfigure features */
+ adapter->vfinfo[vf].clear_to_send = false;
+ ixgbe_ping_vf(adapter, vf);
+
+ e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not ");
+
+ return 0;
+}
+
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi)
{
@@ -1430,5 +1525,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
ivi->qos = adapter->vfinfo[vf].pf_qos;
ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled;
+ ivi->trusted = adapter->vfinfo[vf].trusted;
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 2c197e6d1fe7..dad925706f4c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -49,6 +49,7 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
bool setting);
+int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index b6f424f3b1a8..995f03107eac 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -402,6 +402,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_FDIRSIP4M 0x0EE40
#define IXGBE_FDIRTCPM 0x0EE44
#define IXGBE_FDIRUDPM 0x0EE48
+#define IXGBE_FDIRSCTPM 0x0EE78
#define IXGBE_FDIRIP6M 0x0EE74
#define IXGBE_FDIRM 0x0EE70
@@ -848,6 +849,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */
+#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */
@@ -856,6 +858,24 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */
+
+#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */
+#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
+#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
+#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */
+#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
+#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400
+#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800
+#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */
+#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */
+#define IXGBE_MII_RESTART 0x200
+#define IXGBE_MII_AUTONEG_COMPLETE 0x20
+#define IXGBE_MII_AUTONEG_LINK_UP 0x04
+#define IXGBE_MII_AUTONEG_REG 0x0
/* Management */
#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -1173,6 +1193,7 @@ struct ixgbe_thermal_sensor_data {
/* RDRXCTL Bit Masks */
#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */
#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */
+#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad small packet */
#define IXGBE_RDRXCTL_MVMEN 0x00000020
#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
@@ -1305,6 +1326,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */
#define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */
#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
@@ -1312,7 +1334,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */
#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
-
+#define IXGBE_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG Rx LP Status Reg */
+#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT */
#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */
#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */
#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
@@ -1729,6 +1752,9 @@ enum {
* FCoE (0x8906): Filter 2
* 1588 (0x88f7): Filter 3
* FIP (0x8914): Filter 4
+ * LLDP (0x88CC): Filter 5
+ * LACP (0x8809): Filter 6
+ * FC (0x8808): Filter 7
*/
#define IXGBE_ETQF_FILTER_EAPOL 0
#define IXGBE_ETQF_FILTER_FCOE 2
@@ -1736,6 +1762,7 @@ enum {
#define IXGBE_ETQF_FILTER_FIP 4
#define IXGBE_ETQF_FILTER_LLDP 5
#define IXGBE_ETQF_FILTER_LACP 6
+#define IXGBE_ETQF_FILTER_FC 7
/* VLAN Control Bit Masks */
#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
@@ -1927,6 +1954,7 @@ enum {
#define IXGBE_GSSR_SW_MNG_SM 0x0400
#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */
#define IXGBE_GSSR_I2C_MASK 0x1800
+#define IXGBE_GSSR_NVM_PHY_MASK 0xF
/* FW Status register bitmask */
#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */
@@ -2041,6 +2069,11 @@ enum {
#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */
+#define NVM_INIT_CTRL_3 0x38
+#define NVM_INIT_CTRL_3_LPLU 0x8
+#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40
+#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100
+
#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
@@ -2540,9 +2573,11 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
+#define IXGBE_FDIRCMD_RX_TUNNEL_FILTER_SHIFT 23
#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
#define IXGBE_FDIR_INIT_DONE_POLL 10
#define IXGBE_FDIRCMD_CMD_POLL 10
+#define IXGBE_FDIRCMD_TUNNEL_FILTER 0x00800000
#define IXGBE_FDIR_DROP_QUEUE 127
@@ -2833,12 +2868,13 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
/* Software ATR input stream values and masks */
-#define IXGBE_ATR_HASH_MASK 0x7fff
-#define IXGBE_ATR_L4TYPE_MASK 0x3
-#define IXGBE_ATR_L4TYPE_UDP 0x1
-#define IXGBE_ATR_L4TYPE_TCP 0x2
-#define IXGBE_ATR_L4TYPE_SCTP 0x3
-#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+#define IXGBE_ATR_HASH_MASK 0x7fff
+#define IXGBE_ATR_L4TYPE_MASK 0x3
+#define IXGBE_ATR_L4TYPE_UDP 0x1
+#define IXGBE_ATR_L4TYPE_TCP 0x2
+#define IXGBE_ATR_L4TYPE_SCTP 0x3
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+#define IXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10
enum ixgbe_atr_flow_type {
IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
@@ -3035,9 +3071,8 @@ enum ixgbe_smart_speed {
/* PCI bus types */
enum ixgbe_bus_type {
ixgbe_bus_type_unknown = 0,
- ixgbe_bus_type_pci,
- ixgbe_bus_type_pcix,
ixgbe_bus_type_pci_express,
+ ixgbe_bus_type_internal,
ixgbe_bus_type_reserved
};
@@ -3227,9 +3262,11 @@ struct ixgbe_mac_operations {
void (*flap_tx_laser)(struct ixgbe_hw *);
void (*stop_link_on_d3)(struct ixgbe_hw *);
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
bool *);
+ void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed);
/* Packet Buffer Manipulation */
void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
@@ -3298,7 +3335,12 @@ struct ixgbe_phy_operations {
s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
s32 (*check_overtemp)(struct ixgbe_hw *);
s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
+ s32 (*enter_lplu)(struct ixgbe_hw *);
s32 (*handle_lasi)(struct ixgbe_hw *hw);
+ s32 (*read_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ u16 *value);
+ s32 (*write_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ u16 value);
};
struct ixgbe_eeprom_info {
@@ -3308,6 +3350,7 @@ struct ixgbe_eeprom_info {
u16 word_size;
u16 address_bits;
u16 word_page_size;
+ u16 ctrl_word_3;
};
#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
@@ -3351,10 +3394,10 @@ struct ixgbe_phy_info {
bool sfp_setup_needed;
u32 revision;
enum ixgbe_media_type media_type;
- u8 lan_id;
u32 phy_semaphore_mask;
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
+ ixgbe_link_speed speeds_supported;
enum ixgbe_smart_speed smart_speed;
bool smart_speed_active;
bool multispeed_fiber;
@@ -3460,16 +3503,21 @@ struct ixgbe_info {
#define IXGBE_ERR_PBA_SECTION -31
#define IXGBE_ERR_INVALID_ARGUMENT -32
#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
+#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010))
-#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C))
-#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634))
-#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638))
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00))
-#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00))
-#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520))
-#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00))
+#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
+#define IXGBE_FUSES0_300MHZ BIT(5)
+#define IXGBE_FUSES0_REV1 BIT(6)
+
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
+#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
+#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
+#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00)
+#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00)
+#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
+#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 032a5870abd1..c1d4584f6469 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -54,6 +54,11 @@ enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ /* set_phy_power was set by default to NULL */
+ if (!ixgbe_mng_present(hw))
+ phy->ops.set_phy_power = ixgbe_set_copper_phy_power;
mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
@@ -562,19 +567,25 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
**/
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
- u32 swfw_sync;
- u32 swmask = mask;
- u32 fwmask = mask << 5;
- u32 hwmask = 0;
+ u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK;
+ u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK;
+ u32 fwmask = swmask << 5;
u32 timeout = 200;
+ u32 hwmask = 0;
+ u32 swfw_sync;
u32 i;
- if (swmask == IXGBE_GSSR_EEP_SM)
+ if (swmask & IXGBE_GSSR_EEP_SM)
hwmask = IXGBE_GSSR_FLASH_SM;
+ /* SW only mask does not have FW bit pair */
+ if (mask & IXGBE_GSSR_SW_MNG_SM)
+ swmask |= IXGBE_GSSR_SW_MNG_SM;
+
+ swmask |= swi2c_mask;
+ fwmask |= swi2c_mask << 2;
for (i = 0; i < timeout; i++) {
- /*
- * SW NVM semaphore bit is used for access to all
+ /* SW NVM semaphore bit is used for access to all
* SW_FW_SYNC bits (not just NVM)
*/
if (ixgbe_get_swfw_sync_semaphore(hw))
@@ -585,39 +596,56 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
swfw_sync |= swmask;
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
- break;
- } else {
- /*
- * Firmware currently using resource (fwmask),
- * hardware currently using resource (hwmask),
- * or other software thread currently using
- * resource (swmask)
- */
- ixgbe_release_swfw_sync_semaphore(hw);
- usleep_range(5000, 10000);
+ usleep_range(5000, 6000);
+ return 0;
}
+ /* Firmware currently using resource (fwmask), hardware
+ * currently using resource (hwmask), or other software
+ * thread currently using resource (swmask)
+ */
+ ixgbe_release_swfw_sync_semaphore(hw);
+ usleep_range(5000, 10000);
}
- /*
- * If the resource is not released by the FW/HW the SW can assume that
- * the FW/HW malfunctions. In that case the SW should sets the
- * SW bit(s) of the requested resource(s) while ignoring the
- * corresponding FW/HW bits in the SW_FW_SYNC register.
- */
- if (i >= timeout) {
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
- if (swfw_sync & (fwmask | hwmask)) {
- if (ixgbe_get_swfw_sync_semaphore(hw))
- return IXGBE_ERR_SWFW_SYNC;
+ /* Failed to get SW only semaphore */
+ if (swmask == IXGBE_GSSR_SW_MNG_SM) {
+ hw_dbg(hw, "Failed to get SW only semaphore\n");
+ return IXGBE_ERR_SWFW_SYNC;
+ }
- swfw_sync |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
- ixgbe_release_swfw_sync_semaphore(hw);
- }
+ /* If the resource is not released by the FW/HW the SW can assume that
+ * the FW/HW malfunctions. In that case the SW should set the SW bit(s)
+ * of the requested resource(s) while ignoring the corresponding FW/HW
+ * bits in the SW_FW_SYNC register.
+ */
+ if (ixgbe_get_swfw_sync_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
+ if (swfw_sync & (fwmask | hwmask)) {
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ usleep_range(5000, 6000);
+ return 0;
}
+ /* If the resource is not released by other SW the SW can assume that
+ * the other SW malfunctions. In that case the SW should clear all SW
+ * flags that it does not own and then repeat the whole process once
+ * again.
+ */
+ if (swfw_sync & swmask) {
+ u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
+ IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM;
+
+ if (swi2c_mask)
+ rmask |= IXGBE_GSSR_I2C_MASK;
+ ixgbe_release_swfw_sync_X540(hw, rmask);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+ ixgbe_release_swfw_sync_semaphore(hw);
- usleep_range(5000, 10000);
- return 0;
+ return IXGBE_ERR_SWFW_SYNC;
}
/**
@@ -630,9 +658,11 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
**/
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
+ u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM);
u32 swfw_sync;
- u32 swmask = mask;
+ if (mask & IXGBE_GSSR_I2C_MASK)
+ swmask |= mask & IXGBE_GSSR_I2C_MASK;
ixgbe_get_swfw_sync_semaphore(hw);
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
@@ -640,7 +670,7 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
- usleep_range(5000, 10000);
+ usleep_range(5000, 6000);
}
/**
@@ -681,6 +711,11 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
usleep_range(50, 100);
}
+ /* Release semaphores and return error if SW NVM semaphore
+ * was not granted because we do not have access to the EEPROM
+ */
+ hw_dbg(hw, "REGSMP Software NVM semaphore not granted\n");
+ ixgbe_release_swfw_sync_semaphore(hw);
return IXGBE_ERR_EEPROM;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 7581da13e92a..ebe0ac950b14 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -26,6 +26,20 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
+static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ /* Start with X540 invariants, since so simular */
+ ixgbe_get_invariants_X540(hw);
+
+ if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
+ phy->ops.set_phy_power = NULL;
+
+ return 0;
+}
+
/** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
* @hw: pointer to hardware structure
**/
@@ -42,6 +56,292 @@ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
}
+/**
+ * ixgbe_read_cs4227 - Read CS4227 register
+ * @hw: pointer to hardware structure
+ * @reg: register number to write
+ * @value: pointer to receive value read
+ *
+ * Returns status code
+ */
+static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
+{
+ return hw->phy.ops.read_i2c_combined_unlocked(hw, IXGBE_CS4227, reg,
+ value);
+}
+
+/**
+ * ixgbe_write_cs4227 - Write CS4227 register
+ * @hw: pointer to hardware structure
+ * @reg: register number to write
+ * @value: value to write to register
+ *
+ * Returns status code
+ */
+static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
+{
+ return hw->phy.ops.write_i2c_combined_unlocked(hw, IXGBE_CS4227, reg,
+ value);
+}
+
+/**
+ * ixgbe_check_cs4227_reg - Perform diag on a CS4227 register
+ * @hw: pointer to hardware structure
+ * @reg: the register to check
+ *
+ * Performs a diagnostic on a register in the CS4227 chip. Returns an error
+ * if it is not operating correctly.
+ * This function assumes that the caller has acquired the proper semaphore.
+ */
+static s32 ixgbe_check_cs4227_reg(struct ixgbe_hw *hw, u16 reg)
+{
+ s32 status;
+ u32 retry;
+ u16 reg_val;
+
+ reg_val = (IXGBE_CS4227_EDC_MODE_DIAG << 1) | 1;
+ status = ixgbe_write_cs4227(hw, reg, reg_val);
+ if (status)
+ return status;
+ for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
+ msleep(IXGBE_CS4227_CHECK_DELAY);
+ reg_val = 0xFFFF;
+ ixgbe_read_cs4227(hw, reg, &reg_val);
+ if (!reg_val)
+ break;
+ }
+ if (reg_val) {
+ hw_err(hw, "CS4227 reg 0x%04X failed diagnostic\n", reg);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_cs4227_status - Return CS4227 status
+ * @hw: pointer to hardware structure
+ *
+ * Performs a diagnostic on the CS4227 chip. Returns an error if it is
+ * not operating correctly.
+ * This function assumes that the caller has acquired the proper semaphore.
+ */
+static s32 ixgbe_get_cs4227_status(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u16 value = 0;
+
+ /* Exit if the diagnostic has already been performed. */
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
+ if (status)
+ return status;
+ if (value == IXGBE_CS4227_RESET_COMPLETE)
+ return 0;
+
+ /* Check port 0. */
+ status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_LINE_SPARE24_LSB);
+ if (status)
+ return status;
+
+ status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_HOST_SPARE24_LSB);
+ if (status)
+ return status;
+
+ /* Check port 1. */
+ status = ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_LINE_SPARE24_LSB +
+ (1 << 12));
+ if (status)
+ return status;
+
+ return ixgbe_check_cs4227_reg(hw, IXGBE_CS4227_HOST_SPARE24_LSB +
+ (1 << 12));
+}
+
+/**
+ * ixgbe_read_pe - Read register from port expander
+ * @hw: pointer to hardware structure
+ * @reg: register number to read
+ * @value: pointer to receive read value
+ *
+ * Returns status code
+ */
+static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
+{
+ s32 status;
+
+ status = ixgbe_read_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value);
+ if (status)
+ hw_err(hw, "port expander access failed with %d\n", status);
+ return status;
+}
+
+/**
+ * ixgbe_write_pe - Write register to port expander
+ * @hw: pointer to hardware structure
+ * @reg: register number to write
+ * @value: value to write
+ *
+ * Returns status code
+ */
+static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
+{
+ s32 status;
+
+ status = ixgbe_write_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE,
+ value);
+ if (status)
+ hw_err(hw, "port expander access failed with %d\n", status);
+ return status;
+}
+
+/**
+ * ixgbe_reset_cs4227 - Reset CS4227 using port expander
+ * @hw: pointer to hardware structure
+ *
+ * This function assumes that the caller has acquired the proper semaphore.
+ * Returns error code
+ */
+static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 retry;
+ u16 value;
+ u8 reg;
+
+ /* Trigger hard reset. */
+ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg);
+ if (status)
+ return status;
+ reg |= IXGBE_PE_BIT1;
+ status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
+ if (status)
+ return status;
+
+ status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, &reg);
+ if (status)
+ return status;
+ reg &= ~IXGBE_PE_BIT1;
+ status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
+ if (status)
+ return status;
+
+ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg);
+ if (status)
+ return status;
+ reg &= ~IXGBE_PE_BIT1;
+ status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
+ if (status)
+ return status;
+
+ usleep_range(IXGBE_CS4227_RESET_HOLD, IXGBE_CS4227_RESET_HOLD + 100);
+
+ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg);
+ if (status)
+ return status;
+ reg |= IXGBE_PE_BIT1;
+ status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
+ if (status)
+ return status;
+
+ /* Wait for the reset to complete. */
+ msleep(IXGBE_CS4227_RESET_DELAY);
+ for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
+ &value);
+ if (!status && value == IXGBE_CS4227_EEPROM_LOAD_OK)
+ break;
+ msleep(IXGBE_CS4227_CHECK_DELAY);
+ }
+ if (retry == IXGBE_CS4227_RETRIES) {
+ hw_err(hw, "CS4227 reset did not complete\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
+ if (status || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
+ hw_err(hw, "CS4227 EEPROM did not load successfully\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_check_cs4227 - Check CS4227 and reset as needed
+ * @hw: pointer to hardware structure
+ */
+static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ s32 status;
+ u16 value;
+ u8 retry;
+
+ for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status) {
+ hw_err(hw, "semaphore failed with %d\n", status);
+ msleep(IXGBE_CS4227_CHECK_DELAY);
+ continue;
+ }
+
+ /* Get status of reset flow. */
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
+ if (!status && value == IXGBE_CS4227_RESET_COMPLETE)
+ goto out;
+
+ if (status || value != IXGBE_CS4227_RESET_PENDING)
+ break;
+
+ /* Reset is pending. Wait and check again. */
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msleep(IXGBE_CS4227_CHECK_DELAY);
+ }
+ /* If still pending, assume other instance failed. */
+ if (retry == IXGBE_CS4227_RETRIES) {
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status) {
+ hw_err(hw, "semaphore failed with %d\n", status);
+ return;
+ }
+ }
+
+ /* Reset the CS4227. */
+ status = ixgbe_reset_cs4227(hw);
+ if (status) {
+ hw_err(hw, "CS4227 reset failed: %d", status);
+ goto out;
+ }
+
+ /* Reset takes so long, temporarily release semaphore in case the
+ * other driver instance is waiting for the reset indication.
+ */
+ ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
+ IXGBE_CS4227_RESET_PENDING);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ usleep_range(10000, 12000);
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status) {
+ hw_err(hw, "semaphore failed with %d", status);
+ return;
+ }
+
+ /* Is the CS4227 working correctly? */
+ status = ixgbe_get_cs4227_status(hw);
+ if (status) {
+ hw_err(hw, "CS4227 status failed: %d", status);
+ goto out;
+ }
+
+ /* Record completion for next time. */
+ status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
+ IXGBE_CS4227_RESET_COMPLETE);
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msleep(hw->eeprom.semaphore_delay);
+}
+
/** ixgbe_identify_phy_x550em - Get PHY type based on device id
* @hw: pointer to hardware structure
*
@@ -54,7 +354,7 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
/* set up for CS4227 usage */
hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
-
+ ixgbe_check_cs4227(hw);
return ixgbe_identify_module_generic(hw);
case IXGBE_DEV_ID_X550EM_X_KX4:
hw->phy.type = ixgbe_phy_x550em_kx4;
@@ -597,6 +897,24 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
return status;
}
+/**
+ * ixgbe_get_bus_info_X550em - Set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets bus link width and speed to unknown because X550em is
+ * not a PCI device.
+ **/
+static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
+{
+ hw->bus.type = ixgbe_bus_type_internal;
+ hw->bus.width = ixgbe_bus_width_unknown;
+ hw->bus.speed = ixgbe_bus_speed_unknown;
+
+ hw->mac.ops.set_lan_id(hw);
+
+ return 0;
+}
+
/** ixgbe_disable_rx_x550 - Disable RX unit
*
* Enables the Rx DMA unit for x550
@@ -878,6 +1196,96 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
}
/**
+ * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
+ * @hw: pointer to hardware structure
+ * @linear: true if SFP module is linear
+ */
+static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
+{
+ switch (hw->phy.sfp_type) {
+ case ixgbe_sfp_type_not_present:
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+ case ixgbe_sfp_type_da_cu_core0:
+ case ixgbe_sfp_type_da_cu_core1:
+ *linear = true;
+ break;
+ case ixgbe_sfp_type_srlr_core0:
+ case ixgbe_sfp_type_srlr_core1:
+ case ixgbe_sfp_type_da_act_lmt_core0:
+ case ixgbe_sfp_type_da_act_lmt_core1:
+ case ixgbe_sfp_type_1g_sx_core0:
+ case ixgbe_sfp_type_1g_sx_core1:
+ case ixgbe_sfp_type_1g_lx_core0:
+ case ixgbe_sfp_type_1g_lx_core1:
+ *linear = false;
+ break;
+ case ixgbe_sfp_type_unknown:
+ case ixgbe_sfp_type_1g_cu_core0:
+ case ixgbe_sfp_type_1g_cu_core1:
+ default:
+ return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP.
+ * @hw: pointer to hardware structure
+ *
+ * Configures the extern PHY and the integrated KR PHY for SFP support.
+ */
+static s32
+ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ __always_unused bool autoneg_wait_to_complete)
+{
+ s32 status;
+ u16 slice, value;
+ bool setup_linear = false;
+
+ /* Check if SFP module is supported and linear */
+ status = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+ /* If no SFP module present, then return success. Return success since
+ * there is no reason to configure CS4227 and SFP not present error is
+ * not accepted in the setup MAC link flow.
+ */
+ if (status == IXGBE_ERR_SFP_NOT_PRESENT)
+ return 0;
+
+ if (status)
+ return status;
+
+ /* Configure CS4227 LINE side to 10G SR. */
+ slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12);
+ value = IXGBE_CS4227_SPEED_10G;
+ status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice,
+ value);
+
+ /* Configure CS4227 for HOST connection rate then type. */
+ slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12);
+ value = speed & IXGBE_LINK_SPEED_10GB_FULL ?
+ IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G;
+ status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice,
+ value);
+
+ slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12);
+ if (setup_linear)
+ value = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1;
+ else
+ value = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
+ status = ixgbe_write_i2c_combined_generic(hw, IXGBE_CS4227, slice,
+ value);
+
+ /* If internal link mode is XFI, then setup XFI internal link. */
+ if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))
+ status = ixgbe_setup_ixfi_x550em(hw, &speed);
+
+ return status;
+}
+
+/**
* ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
* @hw: pointer to hardware structure
* @speed: new link speed
@@ -971,6 +1379,10 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
mac->ops.disable_tx_laser = NULL;
mac->ops.enable_tx_laser = NULL;
mac->ops.flap_tx_laser = NULL;
+ mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
+ mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em;
+ mac->ops.set_rate_select_speed =
+ ixgbe_set_soft_rate_select_speed;
break;
case ixgbe_media_type_copper:
mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
@@ -986,53 +1398,18 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
*/
static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
{
- bool setup_linear;
- u16 reg_slice, edc_mode;
- s32 ret_val;
+ s32 status;
+ bool linear;
- switch (hw->phy.sfp_type) {
- case ixgbe_sfp_type_unknown:
- return 0;
- case ixgbe_sfp_type_not_present:
- return IXGBE_ERR_SFP_NOT_PRESENT;
- case ixgbe_sfp_type_da_cu_core0:
- case ixgbe_sfp_type_da_cu_core1:
- setup_linear = true;
- break;
- case ixgbe_sfp_type_srlr_core0:
- case ixgbe_sfp_type_srlr_core1:
- case ixgbe_sfp_type_da_act_lmt_core0:
- case ixgbe_sfp_type_da_act_lmt_core1:
- case ixgbe_sfp_type_1g_sx_core0:
- case ixgbe_sfp_type_1g_sx_core1:
- setup_linear = false;
- break;
- default:
- return IXGBE_ERR_SFP_NOT_SUPPORTED;
- }
+ /* Check if SFP module is supported */
+ status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
+ if (status)
+ return status;
ixgbe_init_mac_link_ops_X550em(hw);
hw->phy.ops.reset = NULL;
- /* The CS4227 slice address is the base address + the port-pair reg
- * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0.
- */
- reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12);
-
- if (setup_linear)
- edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
- else
- edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
-
- /* Configure CS4227 for connection type. */
- ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
- edc_mode);
-
- if (ret_val)
- ret_val = hw->phy.ops.write_i2c_combined(hw, 0x80, reg_slice,
- edc_mode);
-
- return ret_val;
+ return 0;
}
/** ixgbe_get_link_capabilities_x550em - Determines link capabilities
@@ -1240,7 +1617,7 @@ static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
if (status)
return status;
- if (lsc)
+ if (lsc && phy->ops.setup_internal_link)
return phy->ops.setup_internal_link(hw);
return 0;
@@ -1444,6 +1821,144 @@ static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
return ixgbe_enable_lasi_ext_t_x550em(hw);
}
+/** ixgbe_get_lcd_x550em - Determine lowest common denominator
+ * @hw: pointer to hardware structure
+ * @lcd_speed: pointer to lowest common link speed
+ *
+ * Determine lowest common link speed with link partner.
+ **/
+static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed *lcd_speed)
+{
+ u16 an_lp_status;
+ s32 status;
+ u16 word = hw->eeprom.ctrl_word_3;
+
+ *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &an_lp_status);
+ if (status)
+ return status;
+
+ /* If link partner advertised 1G, return 1G */
+ if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
+ *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
+ return status;
+ }
+
+ /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
+ if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
+ (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
+ return status;
+
+ /* Link partner not capable of lower speeds, return 10G */
+ *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ return status;
+}
+
+/** ixgbe_enter_lplu_x550em - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting
+ * the X557 PHY immediately prior to entering LPLU.
+ **/
+static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
+{
+ u16 an_10g_cntl_reg, autoneg_reg, speed;
+ s32 status;
+ ixgbe_link_speed lcd_speed;
+ u32 save_autoneg;
+ bool link_up;
+
+ /* SW LPLU not required on later HW revisions. */
+ if (IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))
+ return 0;
+
+ /* If blocked by MNG FW, then don't restart AN */
+ if (ixgbe_check_reset_blocked(hw))
+ return 0;
+
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+ if (status)
+ return status;
+
+ status = hw->eeprom.ops.read(hw, NVM_INIT_CTRL_3,
+ &hw->eeprom.ctrl_word_3);
+ if (status)
+ return status;
+
+ /* If link is down, LPLU disabled in NVM, WoL disabled, or
+ * manageability disabled, then force link down by entering
+ * low power mode.
+ */
+ if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
+ !(hw->wol_enabled || ixgbe_mng_present(hw)))
+ return ixgbe_set_copper_phy_power(hw, false);
+
+ /* Determine LCD */
+ status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
+ if (status)
+ return status;
+
+ /* If no valid LCD link speed, then force link down and exit. */
+ if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
+ return ixgbe_set_copper_phy_power(hw, false);
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &speed);
+ if (status)
+ return status;
+
+ /* If no link now, speed is invalid so take link down */
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+ if (status)
+ return ixgbe_set_copper_phy_power(hw, false);
+
+ /* clear everything but the speed bits */
+ speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
+
+ /* If current speed is already LCD, then exit. */
+ if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
+ (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
+ ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
+ (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
+ return status;
+
+ /* Clear AN completed indication */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+ if (status)
+ return status;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &an_10g_cntl_reg);
+ if (status)
+ return status;
+
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+ if (status)
+ return status;
+
+ save_autoneg = hw->phy.autoneg_advertised;
+
+ /* Setup link at least common link speed */
+ status = hw->mac.ops.setup_link(hw, lcd_speed, false);
+
+ /* restore autoneg from before setting lplu speed */
+ hw->phy.autoneg_advertised = save_autoneg;
+
+ return status;
+}
+
/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
* @hw: pointer to hardware structure
*
@@ -1514,6 +2029,11 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
ret_val = ixgbe_setup_kr_speed_x550em(hw, speed);
}
+ /* setup SW LPLU only for first revision */
+ if (!(IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw,
+ IXGBE_FUSES0_GROUP(0))))
+ phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
+
phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
phy->ops.reset = ixgbe_reset_phy_t_X550em;
break;
@@ -1752,6 +2272,62 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
}
+/**
+ * ixgbe_set_mux - Set mux for port 1 access with CS4227
+ * @hw: pointer to hardware structure
+ * @state: set mux if 1, clear if 0
+ */
+static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
+{
+ u32 esdp;
+
+ if (!hw->bus.lan_id)
+ return;
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (state)
+ esdp |= IXGBE_ESDP_SDP1;
+ else
+ esdp &= ~IXGBE_ESDP_SDP1;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore and sets the I2C MUX
+ */
+static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
+{
+ s32 status;
+
+ status = ixgbe_acquire_swfw_sync_X540(hw, mask);
+ if (status)
+ return status;
+
+ if (mask & IXGBE_GSSR_I2C_MASK)
+ ixgbe_set_mux(hw, 1);
+
+ return 0;
+}
+
+/**
+ * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore and sets the I2C MUX
+ */
+static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
+{
+ if (mask & IXGBE_GSSR_I2C_MASK)
+ ixgbe_set_mux(hw, 0);
+
+ ixgbe_release_swfw_sync_X540(hw, mask);
+}
+
#define X550_COMMON_MAC \
.init_hw = &ixgbe_init_hw_generic, \
.start_hw = &ixgbe_start_hw_X540, \
@@ -1760,7 +2336,6 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
.get_mac_addr = &ixgbe_get_mac_addr_generic, \
.get_device_caps = &ixgbe_get_device_caps_generic, \
.stop_adapter = &ixgbe_stop_adapter_generic, \
- .get_bus_info = &ixgbe_get_bus_info_generic, \
.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, \
.read_analog_reg8 = NULL, \
.write_analog_reg8 = NULL, \
@@ -1790,8 +2365,6 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
&ixgbe_set_source_address_pruning_X550, \
.set_ethertype_anti_spoofing = \
&ixgbe_set_ethertype_anti_spoofing_X550, \
- .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \
- .release_swfw_sync = &ixgbe_release_swfw_sync_X540, \
.disable_rx_buff = &ixgbe_disable_rx_buff_generic, \
.enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
.get_thermal_sensor_data = NULL, \
@@ -1809,7 +2382,10 @@ static struct ixgbe_mac_operations mac_ops_X550 = {
.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
.setup_link = &ixgbe_setup_mac_link_X540,
.get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
+ .get_bus_info = &ixgbe_get_bus_info_generic,
.setup_sfp = NULL,
+ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
+ .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
};
static struct ixgbe_mac_operations mac_ops_X550EM_x = {
@@ -1820,8 +2396,10 @@ static struct ixgbe_mac_operations mac_ops_X550EM_x = {
.get_wwn_prefix = NULL,
.setup_link = NULL, /* defined later */
.get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
+ .get_bus_info = &ixgbe_get_bus_info_X550em,
.setup_sfp = ixgbe_setup_sfp_modules_X550em,
-
+ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em,
+ .release_swfw_sync = &ixgbe_release_swfw_sync_X550em,
};
#define X550_COMMON_EEP \
@@ -1855,7 +2433,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
.read_reg = &ixgbe_read_phy_reg_generic, \
.write_reg = &ixgbe_write_phy_reg_generic, \
.setup_link = &ixgbe_setup_phy_link_generic, \
- .set_phy_power = &ixgbe_set_copper_phy_power, \
+ .set_phy_power = NULL, \
.check_overtemp = &ixgbe_tn_check_overtemp, \
.get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
@@ -1863,14 +2441,17 @@ static struct ixgbe_phy_operations phy_ops_X550 = {
X550_COMMON_PHY
.init = NULL,
.identify = &ixgbe_identify_phy_generic,
- .read_i2c_combined = &ixgbe_read_i2c_combined_generic,
- .write_i2c_combined = &ixgbe_write_i2c_combined_generic,
};
static struct ixgbe_phy_operations phy_ops_X550EM_x = {
X550_COMMON_PHY
.init = &ixgbe_init_phy_ops_X550em,
.identify = &ixgbe_identify_phy_x550em,
+ .read_i2c_combined = &ixgbe_read_i2c_combined_generic,
+ .write_i2c_combined = &ixgbe_write_i2c_combined_generic,
+ .read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked,
+ .write_i2c_combined_unlocked =
+ &ixgbe_write_i2c_combined_generic_unlocked,
};
static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
@@ -1893,7 +2474,7 @@ struct ixgbe_info ixgbe_X550_info = {
struct ixgbe_info ixgbe_X550EM_x_info = {
.mac = ixgbe_mac_X550EM_x,
- .get_invariants = &ixgbe_get_invariants_X540,
+ .get_invariants = &ixgbe_get_invariants_X550_x,
.mac_ops = &mac_ops_X550EM_x,
.eeprom_ops = &eeprom_ops_X550EM_x,
.phy_ops = &phy_ops_X550EM_x,
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 770e21a64388..58434584b16d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -161,6 +161,18 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
#define IXGBE_RXDADV_SPH 0x8000
+/* RSS Hash results */
+#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000
+#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
+#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
+#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004
+#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
IXGBE_RXD_ERR_CE | \
IXGBE_RXD_ERR_LE | \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index b2f5b161d792..d3e5f5b37999 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -813,22 +813,15 @@ static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- /* We support this operation only for 82599 and x540 at the moment */
- if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
- return IXGBEVF_82599_RETA_SIZE;
+ if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
+ return IXGBEVF_X550_VFRETA_SIZE;
- return 0;
+ return IXGBEVF_82599_RETA_SIZE;
}
static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-
- /* We support this operation only for 82599 and x540 at the moment */
- if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
- return IXGBEVF_RSS_HASH_KEY_SIZE;
-
- return 0;
+ return IXGBEVF_RSS_HASH_KEY_SIZE;
}
static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
@@ -840,21 +833,33 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
- /* If neither indirection table nor hash key was requested - just
- * return a success avoiding taking any locks.
- */
- if (!indir && !key)
- return 0;
+ if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
+ if (key)
+ memcpy(key, adapter->rss_key, sizeof(adapter->rss_key));
- spin_lock_bh(&adapter->mbx_lock);
- if (indir)
- err = ixgbevf_get_reta_locked(&adapter->hw, indir,
- adapter->num_rx_queues);
+ if (indir) {
+ int i;
- if (!err && key)
- err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+ for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
+ indir[i] = adapter->rss_indir_tbl[i];
+ }
+ } else {
+ /* If neither indirection table nor hash key was requested
+ * - just return a success avoiding taking any locks.
+ */
+ if (!indir && !key)
+ return 0;
- spin_unlock_bh(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
+ if (indir)
+ err = ixgbevf_get_reta_locked(&adapter->hw, indir,
+ adapter->num_rx_queues);
+
+ if (!err && key)
+ err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+
+ spin_unlock_bh(&adapter->mbx_lock);
+ }
return err;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 775d08900949..ec3147279621 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -144,9 +144,11 @@ struct ixgbevf_ring {
#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
-#define IXGBEVF_MAX_RSS_QUEUES 2
-#define IXGBEVF_82599_RETA_SIZE 128
+#define IXGBEVF_MAX_RSS_QUEUES 2
+#define IXGBEVF_82599_RETA_SIZE 128 /* 128 entries */
+#define IXGBEVF_X550_VFRETA_SIZE 64 /* 64 entries */
#define IXGBEVF_RSS_HASH_KEY_SIZE 40
+#define IXGBEVF_VFRSSRK_REGS 10 /* 10 registers for RSS key */
#define IXGBEVF_DEFAULT_TXD 1024
#define IXGBEVF_DEFAULT_RXD 512
@@ -447,6 +449,9 @@ struct ixgbevf_adapter {
spinlock_t mbx_lock;
unsigned long last_reset;
+
+ u32 rss_key[IXGBEVF_VFRSSRK_REGS];
+ u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
};
enum ixbgevf_state_t {
@@ -466,6 +471,12 @@ enum ixgbevf_boards {
board_X550EM_x_vf,
};
+enum ixgbevf_xcast_modes {
+ IXGBEVF_XCAST_MODE_NONE = 0,
+ IXGBEVF_XCAST_MODE_MULTI,
+ IXGBEVF_XCAST_MODE_ALLMULTI,
+};
+
extern const struct ixgbevf_info ixgbevf_82599_vf_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_info;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index e71cdde9cb01..592ff237d692 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -457,6 +457,32 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
napi_gro_receive(&q_vector->napi, skb);
}
+#define IXGBE_RSS_L4_TYPES_MASK \
+ ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
+ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
+
+static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u16 rss_type;
+
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
+ return;
+
+ rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
+ IXGBE_RXDADV_RSSTYPE_MASK;
+
+ if (!rss_type)
+ return;
+
+ skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+ (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
+}
+
/**
* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
* @ring: structure containig ring specific data
@@ -506,6 +532,7 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ ixgbevf_rx_hash(rx_ring, rx_desc, skb);
ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -649,46 +676,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
}
/**
- * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @skb: pointer to current skb being adjusted
- *
- * This function is an ixgbevf specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- **/
-static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
- struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned char *va;
- unsigned int pull_len;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-}
-
-/**
* ixgbevf_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
@@ -721,10 +708,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
}
}
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- ixgbevf_pull_tail(rx_ring, skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
@@ -765,7 +748,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
static inline bool ixgbevf_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
/**
@@ -789,16 +772,19 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = IXGBEVF_RX_BUFSZ;
#else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
#endif
+ unsigned int pull_len;
- if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
+ if (unlikely(skb_is_nonlinear(skb)))
+ goto add_tail_frag;
+ if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is not reserved, we can reuse buffer as is */
@@ -810,8 +796,21 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
return false;
}
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ va += pull_len;
+ size -= pull_len;
+
+add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, truesize);
+ (unsigned long)va & ~PAGE_MASK, size, truesize);
/* avoid re-using remote pages */
if (unlikely(ixgbevf_page_is_reserved(page)))
@@ -1009,7 +1008,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
container_of(napi, struct ixgbevf_q_vector, napi);
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct ixgbevf_ring *ring;
- int per_ring_budget;
+ int per_ring_budget, work_done = 0;
bool clean_complete = true;
ixgbevf_for_each_ring(ring, q_vector->tx)
@@ -1028,10 +1027,12 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
else
per_ring_budget = budget;
- ixgbevf_for_each_ring(ring, q_vector->rx)
- clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
- per_ring_budget)
- < per_ring_budget);
+ ixgbevf_for_each_ring(ring, q_vector->rx) {
+ int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
+ per_ring_budget);
+ work_done += cleaned;
+ clean_complete &= (cleaned < per_ring_budget);
+ }
#ifdef CONFIG_NET_RX_BUSY_POLL
ixgbevf_qv_unlock_napi(q_vector);
@@ -1041,7 +1042,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
/* all work done, exit the polling mode */
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (adapter->rx_itr_setting & 1)
ixgbevf_set_itr(q_vector);
if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
@@ -1697,22 +1698,25 @@ static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 vfmrqc = 0, vfreta = 0;
- u32 rss_key[10];
u16 rss_i = adapter->num_rx_queues;
- int i, j;
+ u8 i, j;
/* Fill out hash function seeds */
- netdev_rss_key_fill(rss_key, sizeof(rss_key));
- for (i = 0; i < 10; i++)
- IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
+ netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
+ for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
- /* Fill out redirection table */
- for (i = 0, j = 0; i < 64; i++, j++) {
+ for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
if (j == rss_i)
j = 0;
- vfreta = (vfreta << 8) | (j * 0x1);
- if ((i & 3) == 3)
+
+ adapter->rss_indir_tbl[i] = j;
+
+ vfreta |= j << (i & 0x3) * 8;
+ if ((i & 3) == 3) {
IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
+ vfreta = 0;
+ }
}
/* Perform hash on these packet types */
@@ -1890,9 +1894,17 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int flags = netdev->flags;
+ int xcast_mode;
+
+ xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
+ (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
+ IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
spin_lock_bh(&adapter->mbx_lock);
+ hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode);
+
/* reprogram multicast list */
hw->mac.ops.update_mc_addr_list(hw, netdev);
@@ -3894,6 +3906,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbevf_netpoll,
#endif
+ .ndo_features_check = passthru_features_check,
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 82f44e06e5fc..340cdd469455 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -112,6 +112,8 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */
#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS hash key */
+#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index d1339b050627..427f3605cbfc 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -469,6 +469,46 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
}
/**
+ * ixgbevf_update_xcast_mode - Update Multicast mode
+ * @hw: pointer to the HW structure
+ * @netdev: pointer to net device structure
+ * @xcast_mode: new multicast mode
+ *
+ * Updates the Multicast Mode of VF.
+ **/
+static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
+ struct net_device *netdev, int xcast_mode)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[2];
+ s32 err;
+
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_12:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
+ msgbuf[1] = xcast_mode;
+
+ err = mbx->ops.write_posted(hw, msgbuf, 2);
+ if (err)
+ return err;
+
+ err = mbx->ops.read_posted(hw, msgbuf, 2);
+ if (err)
+ return err;
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+ if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
+ return -EPERM;
+
+ return 0;
+}
+
+/**
* ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
@@ -727,6 +767,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.check_link = ixgbevf_check_mac_link_vf,
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
+ .update_xcast_mode = ixgbevf_update_xcast_mode,
.set_uc_addr = ixgbevf_set_uc_addr_vf,
.set_vfta = ixgbevf_set_vfta_vf,
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index d40f036b6df0..ef9f7736b4dc 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -63,6 +63,7 @@ struct ixgbe_mac_operations {
s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
+ s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 6e9a792097d3..060dd3922974 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -583,7 +583,7 @@ jme_setup_tx_resources(struct jme_adapter *jme)
atomic_set(&txring->next_to_clean, 0);
atomic_set(&txring->nr_free, jme->tx_ring_size);
- txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
+ txring->bufinf = kzalloc(sizeof(struct jme_buffer_info) *
jme->tx_ring_size, GFP_ATOMIC);
if (unlikely(!(txring->bufinf)))
goto err_free_txring;
@@ -592,8 +592,6 @@ jme_setup_tx_resources(struct jme_adapter *jme)
* Initialize Transmit Descriptors
*/
memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
- memset(txring->bufinf, 0,
- sizeof(struct jme_buffer_info) * jme->tx_ring_size);
return 0;
@@ -845,7 +843,7 @@ jme_setup_rx_resources(struct jme_adapter *jme)
rxring->next_to_use = 0;
atomic_set(&rxring->next_to_clean, 0);
- rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
+ rxring->bufinf = kzalloc(sizeof(struct jme_buffer_info) *
jme->rx_ring_size, GFP_ATOMIC);
if (unlikely(!(rxring->bufinf)))
goto err_free_rxring;
@@ -853,8 +851,6 @@ jme_setup_rx_resources(struct jme_adapter *jme)
/*
* Initiallize Receive Descriptors
*/
- memset(rxring->bufinf, 0,
- sizeof(struct jme_buffer_info) * jme->rx_ring_size);
for (i = 0 ; i < jme->rx_ring_size ; ++i) {
if (unlikely(jme_make_new_rx_buf(jme, i))) {
jme_free_rx_resources(jme);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index d52639bc491f..4182290fdbcf 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -759,11 +759,23 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
desc->l4i_chk = 0;
desc->byte_cnt = length;
- desc->buf_ptr = dma_map_single(dev->dev.parent, data,
- length, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) {
- WARN(1, "dma_map_single failed!\n");
- return -ENOMEM;
+
+ if (length <= 8 && (uintptr_t)data & 0x7) {
+ /* Copy unaligned small data fragment to TSO header data area */
+ memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE,
+ data, length);
+ desc->buf_ptr = txq->tso_hdrs_dma
+ + txq->tx_curr_desc * TSO_HEADER_SIZE;
+ } else {
+ /* Alignment is okay, map buffer and hand off to hardware */
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
+ desc->buf_ptr = dma_map_single(dev->dev.parent, data,
+ length, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent,
+ desc->buf_ptr))) {
+ WARN(1, "dma_map_single failed!\n");
+ return -ENOMEM;
+ }
}
cmd_sts = BUFFER_OWNED_BY_DMA;
@@ -779,7 +791,8 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
}
static inline void
-txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
+txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
+ u32 *first_cmd_sts, bool first_desc)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
@@ -788,6 +801,7 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
int ret;
u32 cmd_csum = 0;
u16 l4i_chk = 0;
+ u32 cmd_sts;
tx_index = txq->tx_curr_desc;
desc = &txq->tx_desc_area[tx_index];
@@ -803,9 +817,17 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
desc->byte_cnt = hdr_len;
desc->buf_ptr = txq->tso_hdrs_dma +
txq->tx_curr_desc * TSO_HEADER_SIZE;
- desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
+ cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
GEN_CRC;
+ /* Defer updating the first command descriptor until all
+ * following descriptors have been written.
+ */
+ if (first_desc)
+ *first_cmd_sts = cmd_sts;
+ else
+ desc->cmd_sts = cmd_sts;
+
txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
@@ -819,6 +841,8 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
int desc_count = 0;
struct tso_t tso;
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ struct tx_desc *first_tx_desc;
+ u32 first_cmd_sts = 0;
/* Count needed descriptors */
if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
@@ -826,11 +850,14 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
return -EBUSY;
}
+ first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
+
/* Initialize the TSO handler, and prepare the first payload */
tso_start(skb, &tso);
total_len = skb->len - hdr_len;
while (total_len > 0) {
+ bool first_desc = (desc_count == 0);
char *hdr;
data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
@@ -840,7 +867,8 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
/* prepare packet headers: MAC + IP + TCP */
hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
- txq_put_hdr_tso(skb, txq, data_left);
+ txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
+ first_desc);
while (data_left > 0) {
int size;
@@ -860,6 +888,10 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
__skb_queue_tail(&txq->tx_skb, skb);
skb_tx_timestamp(skb);
+ /* ensure all other descriptors are written before first cmd_sts */
+ wmb();
+ first_tx_desc->cmd_sts = first_cmd_sts;
+
/* clear TX_END status */
mp->work_tx_end &= ~(1 << txq->index);
@@ -1586,7 +1618,6 @@ static void mv643xx_eth_get_drvinfo(struct net_device *dev,
sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
- drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
}
static int mv643xx_eth_nway_reset(struct net_device *dev)
@@ -1845,32 +1876,19 @@ static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
struct netdev_hw_addr *ha;
int i;
- if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
- int port_num;
- u32 accept;
+ if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
+ goto promiscuous;
-oom:
- port_num = mp->port_num;
- accept = 0x01010101;
- for (i = 0; i < 0x100; i += 4) {
- wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
- wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
- }
- return;
- }
-
- mc_spec = kmalloc(0x200, GFP_ATOMIC);
- if (mc_spec == NULL)
- goto oom;
- mc_other = mc_spec + (0x100 >> 2);
-
- memset(mc_spec, 0, 0x100);
- memset(mc_other, 0, 0x100);
+ /* Allocate both mc_spec and mc_other tables */
+ mc_spec = kcalloc(128, sizeof(u32), GFP_ATOMIC);
+ if (!mc_spec)
+ goto promiscuous;
+ mc_other = &mc_spec[64];
netdev_for_each_mc_addr(ha, dev) {
u8 *a = ha->addr;
u32 *table;
- int entry;
+ u8 entry;
if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
table = mc_spec;
@@ -1883,12 +1901,23 @@ oom:
table[entry >> 2] |= 1 << (8 * (entry & 3));
}
- for (i = 0; i < 0x100; i += 4) {
- wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]);
- wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]);
+ for (i = 0; i < 64; i++) {
+ wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
+ mc_spec[i]);
+ wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
+ mc_other[i]);
}
kfree(mc_spec);
+ return;
+
+promiscuous:
+ for (i = 0; i < 64; i++) {
+ wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
+ 0x01010101u);
+ wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
+ 0x01010101u);
+ }
}
static void mv643xx_eth_set_rx_mode(struct net_device *dev)
@@ -2788,8 +2817,10 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
for_each_available_child_of_node(np, pnp) {
ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
- if (ret)
+ if (ret) {
+ of_node_put(pnp);
return ret;
+ }
}
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 62e48bc0cb23..a47496a020d9 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -32,6 +32,7 @@
#include <linux/of_address.h>
#include <linux/phy.h>
#include <linux/clk.h>
+#include <linux/cpu.h>
/* Registers */
#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
@@ -100,6 +101,8 @@
#define MVNETA_TXQ_CMD 0x2448
#define MVNETA_TXQ_DISABLE_SHIFT 8
#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
+#define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
+#define MVNETA_OVERRUN_FRAME_COUNT 0x2488
#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
#define MVNETA_ACC_MODE 0x2500
@@ -191,7 +194,7 @@
#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
-#define MVNETA_MIB_COUNTERS_BASE 0x3080
+#define MVNETA_MIB_COUNTERS_BASE 0x3000
#define MVNETA_MIB_LATE_COLLISION 0x7c
#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
#define MVNETA_DA_FILT_OTH_MCAST 0x3500
@@ -277,6 +280,50 @@
#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
+struct mvneta_statistic {
+ unsigned short offset;
+ unsigned short type;
+ const char name[ETH_GSTRING_LEN];
+};
+
+#define T_REG_32 32
+#define T_REG_64 64
+
+static const struct mvneta_statistic mvneta_statistics[] = {
+ { 0x3000, T_REG_64, "good_octets_received", },
+ { 0x3010, T_REG_32, "good_frames_received", },
+ { 0x3008, T_REG_32, "bad_octets_received", },
+ { 0x3014, T_REG_32, "bad_frames_received", },
+ { 0x3018, T_REG_32, "broadcast_frames_received", },
+ { 0x301c, T_REG_32, "multicast_frames_received", },
+ { 0x3050, T_REG_32, "unrec_mac_control_received", },
+ { 0x3058, T_REG_32, "good_fc_received", },
+ { 0x305c, T_REG_32, "bad_fc_received", },
+ { 0x3060, T_REG_32, "undersize_received", },
+ { 0x3064, T_REG_32, "fragments_received", },
+ { 0x3068, T_REG_32, "oversize_received", },
+ { 0x306c, T_REG_32, "jabber_received", },
+ { 0x3070, T_REG_32, "mac_receive_error", },
+ { 0x3074, T_REG_32, "bad_crc_event", },
+ { 0x3078, T_REG_32, "collision", },
+ { 0x307c, T_REG_32, "late_collision", },
+ { 0x2484, T_REG_32, "rx_discard", },
+ { 0x2488, T_REG_32, "rx_overrun", },
+ { 0x3020, T_REG_32, "frames_64_octets", },
+ { 0x3024, T_REG_32, "frames_65_to_127_octets", },
+ { 0x3028, T_REG_32, "frames_128_to_255_octets", },
+ { 0x302c, T_REG_32, "frames_256_to_511_octets", },
+ { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
+ { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
+ { 0x3038, T_REG_64, "good_octets_sent", },
+ { 0x3040, T_REG_32, "good_frames_sent", },
+ { 0x3044, T_REG_32, "excessive_collision", },
+ { 0x3048, T_REG_32, "multicast_frames_sent", },
+ { 0x304c, T_REG_32, "broadcast_frames_sent", },
+ { 0x3054, T_REG_32, "fc_sent", },
+ { 0x300c, T_REG_32, "internal_mac_transmit_err", },
+};
+
struct mvneta_pcpu_stats {
struct u64_stats_sync syncp;
u64 rx_packets;
@@ -285,23 +332,34 @@ struct mvneta_pcpu_stats {
u64 tx_bytes;
};
+struct mvneta_pcpu_port {
+ /* Pointer to the shared port */
+ struct mvneta_port *pp;
+
+ /* Pointer to the CPU-local NAPI struct */
+ struct napi_struct napi;
+
+ /* Cause of the previous interrupt */
+ u32 cause_rx_tx;
+};
+
struct mvneta_port {
+ struct mvneta_pcpu_port __percpu *ports;
+ struct mvneta_pcpu_stats __percpu *stats;
+
int pkt_size;
unsigned int frag_size;
void __iomem *base;
struct mvneta_rx_queue *rxqs;
struct mvneta_tx_queue *txqs;
struct net_device *dev;
-
- u32 cause_rx_tx;
- struct napi_struct napi;
+ struct notifier_block cpu_notifier;
/* Core clock */
struct clk *clk;
u8 mcast_count[256];
u16 tx_ring_size;
u16 rx_ring_size;
- struct mvneta_pcpu_stats *stats;
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
@@ -312,6 +370,8 @@ struct mvneta_port {
unsigned int speed;
unsigned int tx_csum_limit;
int use_inband_status:1;
+
+ u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
};
/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -468,7 +528,7 @@ struct mvneta_rx_queue {
/* The hardware supports eight (8) rx queues, but we are only allowing
* the first one to be used. Therefore, let's just allocate one queue.
*/
-static int rxq_number = 1;
+static int rxq_number = 8;
static int txq_number = 8;
static int rxq_def;
@@ -518,6 +578,8 @@ static void mvneta_mib_counters_clear(struct mvneta_port *pp)
/* Perform dummy reads from MIB counters */
for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
+ dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
+ dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
}
/* Get System Network Statistics */
@@ -746,7 +808,6 @@ static void mvneta_port_up(struct mvneta_port *pp)
u32 q_map;
/* Enable all initialized TXs. */
- mvneta_mib_counters_clear(pp);
q_map = 0;
for (queue = 0; queue < txq_number; queue++) {
struct mvneta_tx_queue *txq = &pp->txqs[queue];
@@ -756,14 +817,7 @@ static void mvneta_port_up(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
/* Enable all initialized RXQs. */
- q_map = 0;
- for (queue = 0; queue < rxq_number; queue++) {
- struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
- if (rxq->descs != NULL)
- q_map |= (1 << queue);
- }
-
- mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
+ mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def));
}
/* Stop the Ethernet port activity */
@@ -949,7 +1003,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
/* Set CPU queue access map - all CPUs have access to all RX
* queues and to all TX queues
*/
- for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
+ for_each_present_cpu(cpu)
mvreg_write(pp, MVNETA_CPU_MAP(cpu),
(MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
@@ -1030,6 +1084,8 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_INTR_ENABLE,
(MVNETA_RXQ_INTR_ENABLE_ALL_MASK
| MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
+
+ mvneta_mib_counters_clear(pp);
}
/* Set max sizes for tx queues */
@@ -1426,17 +1482,6 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
return MVNETA_TX_L4_CSUM_NOT;
}
-/* Returns rx queue pointer (find last set bit) according to causeRxTx
- * value
- */
-static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
- u32 cause)
-{
- int queue = fls(cause >> 8) - 1;
-
- return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
-}
-
/* Drop packets received by the RXQ and free buffers */
static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
struct mvneta_rx_queue *rxq)
@@ -1461,6 +1506,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
struct mvneta_rx_queue *rxq)
{
+ struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
struct net_device *dev = pp->dev;
int rx_done;
u32 rcvd_pkts = 0;
@@ -1479,6 +1525,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
struct sk_buff *skb;
unsigned char *data;
+ dma_addr_t phys_addr;
u32 rx_status;
int rx_bytes, err;
@@ -1486,6 +1533,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
rx_status = rx_desc->status;
rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
data = (unsigned char *)rx_desc->buf_cookie;
+ phys_addr = rx_desc->buf_phys_addr;
if (!mvneta_rxq_desc_is_first_last(rx_status) ||
(rx_status & MVNETA_RXD_ERR_SUMMARY)) {
@@ -1513,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
skb->protocol = eth_type_trans(skb, dev);
mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&pp->napi, skb);
+ napi_gro_receive(&port->napi, skb);
rcvd_pkts++;
rcvd_bytes += rx_bytes;
@@ -1534,7 +1582,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
if (!skb)
goto err_drop_frame;
- dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr,
+ dma_unmap_single(dev->dev.parent, phys_addr,
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
rcvd_pkts++;
@@ -1548,7 +1596,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&pp->napi, skb);
+ napi_gro_receive(&port->napi, skb);
}
if (rcvd_pkts) {
@@ -2059,12 +2107,10 @@ static void mvneta_set_rx_mode(struct net_device *dev)
/* Interrupt handling - the callback for request_irq() */
static irqreturn_t mvneta_isr(int irq, void *dev_id)
{
- struct mvneta_port *pp = (struct mvneta_port *)dev_id;
-
- /* Mask all interrupts */
- mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+ struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
- napi_schedule(&pp->napi);
+ disable_percpu_irq(port->pp->dev->irq);
+ napi_schedule(&port->napi);
return IRQ_HANDLED;
}
@@ -2102,11 +2148,11 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
{
int rx_done = 0;
u32 cause_rx_tx;
- unsigned long flags;
struct mvneta_port *pp = netdev_priv(napi->dev);
+ struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
if (!netif_running(pp->dev)) {
- napi_complete(napi);
+ napi_complete(&port->napi);
return rx_done;
}
@@ -2133,47 +2179,17 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
/* For the case where the last mvneta_poll did not process all
* RX packets
*/
- cause_rx_tx |= pp->cause_rx_tx;
- if (rxq_number > 1) {
- while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
- int count;
- struct mvneta_rx_queue *rxq;
- /* get rx queue number from cause_rx_tx */
- rxq = mvneta_rx_policy(pp, cause_rx_tx);
- if (!rxq)
- break;
-
- /* process the packet in that rx queue */
- count = mvneta_rx(pp, budget, rxq);
- rx_done += count;
- budget -= count;
- if (budget > 0) {
- /* set off the rx bit of the
- * corresponding bit in the cause rx
- * tx register, so that next iteration
- * will find the next rx queue where
- * packets are received on
- */
- cause_rx_tx &= ~((1 << rxq->id) << 8);
- }
- }
- } else {
- rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
- budget -= rx_done;
- }
+ cause_rx_tx |= port->cause_rx_tx;
+ rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
+ budget -= rx_done;
if (budget > 0) {
cause_rx_tx = 0;
- napi_complete(napi);
- local_irq_save(flags);
- mvreg_write(pp, MVNETA_INTR_NEW_MASK,
- MVNETA_RX_INTR_MASK(rxq_number) |
- MVNETA_TX_INTR_MASK(txq_number) |
- MVNETA_MISCINTR_INTR_MASK);
- local_irq_restore(flags);
+ napi_complete(&port->napi);
+ enable_percpu_irq(pp->dev->irq, 0);
}
- pp->cause_rx_tx = cause_rx_tx;
+ port->cause_rx_tx = cause_rx_tx;
return rx_done;
}
@@ -2377,26 +2393,19 @@ static void mvneta_cleanup_txqs(struct mvneta_port *pp)
/* Cleanup all Rx queues */
static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
{
- int queue;
-
- for (queue = 0; queue < rxq_number; queue++)
- mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
+ mvneta_rxq_deinit(pp, &pp->rxqs[rxq_def]);
}
/* Init all Rx queues */
static int mvneta_setup_rxqs(struct mvneta_port *pp)
{
- int queue;
-
- for (queue = 0; queue < rxq_number; queue++) {
- int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
- if (err) {
- netdev_err(pp->dev, "%s: can't create rxq=%d\n",
- __func__, queue);
- mvneta_cleanup_rxqs(pp);
- return err;
- }
+ int err = mvneta_rxq_init(pp, &pp->rxqs[rxq_def]);
+ if (err) {
+ netdev_err(pp->dev, "%s: can't create rxq=%d\n",
+ __func__, rxq_def);
+ mvneta_cleanup_rxqs(pp);
+ return err;
}
return 0;
@@ -2422,6 +2431,8 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
static void mvneta_start_dev(struct mvneta_port *pp)
{
+ unsigned int cpu;
+
mvneta_max_rx_size_set(pp, pp->pkt_size);
mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -2429,7 +2440,11 @@ static void mvneta_start_dev(struct mvneta_port *pp)
mvneta_port_enable(pp);
/* Enable polling on the port */
- napi_enable(&pp->napi);
+ for_each_present_cpu(cpu) {
+ struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+ napi_enable(&port->napi);
+ }
/* Unmask interrupts */
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
@@ -2447,9 +2462,15 @@ static void mvneta_start_dev(struct mvneta_port *pp)
static void mvneta_stop_dev(struct mvneta_port *pp)
{
+ unsigned int cpu;
+
phy_stop(pp->phy_dev);
- napi_disable(&pp->napi);
+ for_each_present_cpu(cpu) {
+ struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+ napi_disable(&port->napi);
+ }
netif_carrier_off(pp->dev);
@@ -2689,6 +2710,125 @@ static void mvneta_mdio_remove(struct mvneta_port *pp)
pp->phy_dev = NULL;
}
+static void mvneta_percpu_enable(void *arg)
+{
+ struct mvneta_port *pp = arg;
+
+ enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
+}
+
+static void mvneta_percpu_disable(void *arg)
+{
+ struct mvneta_port *pp = arg;
+
+ disable_percpu_irq(pp->dev->irq);
+}
+
+static void mvneta_percpu_elect(struct mvneta_port *pp)
+{
+ int online_cpu_idx, cpu, i = 0;
+
+ online_cpu_idx = rxq_def % num_online_cpus();
+
+ for_each_online_cpu(cpu) {
+ if (i == online_cpu_idx)
+ /* Enable per-CPU interrupt on the one CPU we
+ * just elected
+ */
+ smp_call_function_single(cpu, mvneta_percpu_enable,
+ pp, true);
+ else
+ /* Disable per-CPU interrupt on all the other CPU */
+ smp_call_function_single(cpu, mvneta_percpu_disable,
+ pp, true);
+ i++;
+ }
+};
+
+static int mvneta_percpu_notifier(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ struct mvneta_port *pp = container_of(nfb, struct mvneta_port,
+ cpu_notifier);
+ int cpu = (unsigned long)hcpu, other_cpu;
+ struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ netif_tx_stop_all_queues(pp->dev);
+
+ /* We have to synchronise on tha napi of each CPU
+ * except the one just being waked up
+ */
+ for_each_online_cpu(other_cpu) {
+ if (other_cpu != cpu) {
+ struct mvneta_pcpu_port *other_port =
+ per_cpu_ptr(pp->ports, other_cpu);
+
+ napi_synchronize(&other_port->napi);
+ }
+ }
+
+ /* Mask all ethernet port interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+ napi_enable(&port->napi);
+
+ /* Enable per-CPU interrupt on the one CPU we care
+ * about.
+ */
+ mvneta_percpu_elect(pp);
+
+ /* Unmask all ethernet port interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+ MVNETA_RX_INTR_MASK(rxq_number) |
+ MVNETA_TX_INTR_MASK(txq_number) |
+ MVNETA_MISCINTR_INTR_MASK);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+ MVNETA_CAUSE_PHY_STATUS_CHANGE |
+ MVNETA_CAUSE_LINK_CHANGE |
+ MVNETA_CAUSE_PSC_SYNC_CHANGE);
+ netif_tx_start_all_queues(pp->dev);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ netif_tx_stop_all_queues(pp->dev);
+ /* Mask all ethernet port interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+
+ napi_synchronize(&port->napi);
+ napi_disable(&port->napi);
+ /* Disable per-CPU interrupts on the CPU that is
+ * brought down.
+ */
+ smp_call_function_single(cpu, mvneta_percpu_disable,
+ pp, true);
+
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ /* Check if a new CPU must be elected now this on is down */
+ mvneta_percpu_elect(pp);
+ /* Unmask all ethernet port interrupts */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+ MVNETA_RX_INTR_MASK(rxq_number) |
+ MVNETA_TX_INTR_MASK(txq_number) |
+ MVNETA_MISCINTR_INTR_MASK);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+ MVNETA_CAUSE_PHY_STATUS_CHANGE |
+ MVNETA_CAUSE_LINK_CHANGE |
+ MVNETA_CAUSE_PSC_SYNC_CHANGE);
+ netif_tx_start_all_queues(pp->dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
static int mvneta_open(struct net_device *dev)
{
struct mvneta_port *pp = netdev_priv(dev);
@@ -2707,13 +2847,29 @@ static int mvneta_open(struct net_device *dev)
goto err_cleanup_rxqs;
/* Connect to port interrupt line */
- ret = request_irq(pp->dev->irq, mvneta_isr, 0,
- MVNETA_DRIVER_NAME, pp);
+ ret = request_percpu_irq(pp->dev->irq, mvneta_isr,
+ MVNETA_DRIVER_NAME, pp->ports);
if (ret) {
netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
goto err_cleanup_txqs;
}
+ /* Even though the documentation says that request_percpu_irq
+ * doesn't enable the interrupts automatically, it actually
+ * does so on the local CPU.
+ *
+ * Make sure it's disabled.
+ */
+ mvneta_percpu_disable(pp);
+
+ /* Elect a CPU to handle our RX queue interrupt */
+ mvneta_percpu_elect(pp);
+
+ /* Register a CPU notifier to handle the case where our CPU
+ * might be taken offline.
+ */
+ register_cpu_notifier(&pp->cpu_notifier);
+
/* In default link is down */
netif_carrier_off(pp->dev);
@@ -2728,7 +2884,7 @@ static int mvneta_open(struct net_device *dev)
return 0;
err_free_irq:
- free_irq(pp->dev->irq, pp);
+ free_percpu_irq(pp->dev->irq, pp->ports);
err_cleanup_txqs:
mvneta_cleanup_txqs(pp);
err_cleanup_rxqs:
@@ -2740,10 +2896,14 @@ err_cleanup_rxqs:
static int mvneta_stop(struct net_device *dev)
{
struct mvneta_port *pp = netdev_priv(dev);
+ int cpu;
mvneta_stop_dev(pp);
mvneta_mdio_remove(pp);
- free_irq(dev->irq, pp);
+ unregister_cpu_notifier(&pp->cpu_notifier);
+ for_each_present_cpu(cpu)
+ smp_call_function_single(cpu, mvneta_percpu_disable, pp, true);
+ free_percpu_irq(dev->irq, pp->ports);
mvneta_cleanup_rxqs(pp);
mvneta_cleanup_txqs(pp);
@@ -2873,6 +3033,65 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev,
return 0;
}
+static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ if (sset == ETH_SS_STATS) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ mvneta_statistics[i].name, ETH_GSTRING_LEN);
+ }
+}
+
+static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
+{
+ const struct mvneta_statistic *s;
+ void __iomem *base = pp->base;
+ u32 high, low, val;
+ int i;
+
+ for (i = 0, s = mvneta_statistics;
+ s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
+ s++, i++) {
+ val = 0;
+
+ switch (s->type) {
+ case T_REG_32:
+ val = readl_relaxed(base + s->offset);
+ break;
+ case T_REG_64:
+ /* Docs say to read low 32-bit then high */
+ low = readl_relaxed(base + s->offset);
+ high = readl_relaxed(base + s->offset + 4);
+ val = (u64)high << 32 | low;
+ break;
+ }
+
+ pp->ethtool_stats[i] += val;
+ }
+}
+
+static void mvneta_ethtool_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int i;
+
+ mvneta_ethtool_update_stats(pp);
+
+ for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+ *data++ = pp->ethtool_stats[i];
+}
+
+static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(mvneta_statistics);
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops mvneta_netdev_ops = {
.ndo_open = mvneta_open,
.ndo_stop = mvneta_stop,
@@ -2894,6 +3113,9 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
.get_drvinfo = mvneta_ethtool_get_drvinfo,
.get_ringparam = mvneta_ethtool_get_ringparam,
.set_ringparam = mvneta_ethtool_set_ringparam,
+ .get_strings = mvneta_ethtool_get_strings,
+ .get_ethtool_stats = mvneta_ethtool_get_stats,
+ .get_sset_count = mvneta_ethtool_get_sset_count,
};
/* Initialize hw */
@@ -3027,17 +3249,10 @@ static int mvneta_probe(struct platform_device *pdev)
const char *dt_mac_addr;
char hw_mac_addr[ETH_ALEN];
const char *mac_from;
+ const char *managed;
int phy_mode;
- int fixed_phy = 0;
int err;
-
- /* Our multiqueue support is not complete, so for now, only
- * allow the usage of the first RX queue
- */
- if (rxq_def != 0) {
- dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
- return -EINVAL;
- }
+ int cpu;
dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
if (!dev)
@@ -3062,7 +3277,6 @@ static int mvneta_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot register fixed PHY\n");
goto err_free_irq;
}
- fixed_phy = 1;
/* In the case of a fixed PHY, the DT node associated
* to the PHY is the Ethernet MAC DT node.
@@ -3086,8 +3300,11 @@ static int mvneta_probe(struct platform_device *pdev)
pp = netdev_priv(dev);
pp->phy_node = phy_node;
pp->phy_interface = phy_mode;
- pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) &&
- fixed_phy;
+
+ err = of_property_read_string(dn, "managed", &managed);
+ pp->use_inband_status = (err == 0 &&
+ strcmp(managed, "in-band-status") == 0);
+ pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
pp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pp->clk)) {
@@ -3104,11 +3321,18 @@ static int mvneta_probe(struct platform_device *pdev)
goto err_clk;
}
+ /* Alloc per-cpu port structure */
+ pp->ports = alloc_percpu(struct mvneta_pcpu_port);
+ if (!pp->ports) {
+ err = -ENOMEM;
+ goto err_clk;
+ }
+
/* Alloc per-cpu stats */
pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
if (!pp->stats) {
err = -ENOMEM;
- goto err_clk;
+ goto err_free_ports;
}
dt_mac_addr = of_get_mac_address(dn);
@@ -3149,7 +3373,12 @@ static int mvneta_probe(struct platform_device *pdev)
if (dram_target_info)
mvneta_conf_mbus_windows(pp, dram_target_info);
- netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
+ for_each_present_cpu(cpu) {
+ struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+ netif_napi_add(dev, &port->napi, mvneta_poll, NAPI_POLL_WEIGHT);
+ port->pp = pp;
+ }
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
dev->hw_features |= dev->features;
@@ -3172,12 +3401,16 @@ static int mvneta_probe(struct platform_device *pdev)
struct phy_device *phy = of_phy_find_device(dn);
mvneta_fixed_link_update(pp, phy);
+
+ put_device(&phy->dev);
}
return 0;
err_free_stats:
free_percpu(pp->stats);
+err_free_ports:
+ free_percpu(pp->ports);
err_clk:
clk_disable_unprepare(pp->clk);
err_put_phy_node:
@@ -3197,6 +3430,7 @@ static int mvneta_remove(struct platform_device *pdev)
unregister_netdev(dev);
clk_disable_unprepare(pp->clk);
+ free_percpu(pp->ports);
free_percpu(pp->stats);
irq_dispose_mapping(dev->irq);
of_node_put(pp->phy_node);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 3e8b1bfb1f2e..d9884fd15b45 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -27,6 +27,8 @@
#include <linux/of_address.h>
#include <linux/phy.h>
#include <linux/clk.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
#include <uapi/linux/ppp_defs.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -299,6 +301,7 @@
/* Coalescing */
#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
+#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
#define MVPP2_RX_COAL_PKTS 32
#define MVPP2_RX_COAL_USEC 100
@@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats {
u64 tx_bytes;
};
+/* Per-CPU port control */
+struct mvpp2_port_pcpu {
+ struct hrtimer tx_done_timer;
+ bool timer_scheduled;
+ /* Tasklet for egress finalization */
+ struct tasklet_struct tx_done_tasklet;
+};
+
struct mvpp2_port {
u8 id;
@@ -679,6 +690,9 @@ struct mvpp2_port {
u32 pending_cause_rx;
struct napi_struct napi;
+ /* Per-CPU port control */
+ struct mvpp2_port_pcpu __percpu *pcpu;
+
/* Flags */
unsigned long flags;
@@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu {
/* Array of transmitted skb */
struct sk_buff **tx_skb;
+ /* Array of transmitted buffers' physical addresses */
+ dma_addr_t *tx_buffs;
+
/* Index of last TX DMA descriptor that was inserted */
int txq_put_index;
@@ -913,8 +930,6 @@ struct mvpp2_bm_pool {
/* Occupied buffers indicator */
atomic_t in_use;
int in_use_thresh;
-
- spinlock_t lock;
};
struct mvpp2_buff_hdr {
@@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
}
static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct mvpp2_tx_desc *tx_desc)
{
txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
+ if (skb)
+ txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
+ tx_desc->buf_phys_addr;
txq_pcpu->txq_put_index++;
if (txq_pcpu->txq_put_index == txq_pcpu->size)
txq_pcpu->txq_put_index = 0;
@@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
bm_pool->pkt_size = 0;
bm_pool->buf_num = 0;
atomic_set(&bm_pool->in_use, 0);
- spin_lock_init(&bm_pool->lock);
return 0;
}
@@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool *
mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
int pkt_size)
{
- unsigned long flags = 0;
struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
int num;
@@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
return NULL;
}
- spin_lock_irqsave(&new_pool->lock, flags);
-
if (new_pool->type == MVPP2_BM_FREE)
new_pool->type = type;
@@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
if (num != pkts_num) {
WARN(1, "pool %d: %d of %d allocated\n",
new_pool->id, num, pkts_num);
- /* We need to undo the bufs_add() allocations */
- spin_unlock_irqrestore(&new_pool->lock, flags);
return NULL;
}
}
@@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
- spin_unlock_irqrestore(&new_pool->lock, flags);
-
return new_pool;
}
/* Initialize pools for swf */
static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
{
- unsigned long flags = 0;
int rxq;
if (!port->pool_long) {
@@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
if (!port->pool_long)
return -ENOMEM;
- spin_lock_irqsave(&port->pool_long->lock, flags);
port->pool_long->port_map |= (1 << port->id);
- spin_unlock_irqrestore(&port->pool_long->lock, flags);
for (rxq = 0; rxq < rxq_number; rxq++)
mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
@@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
if (!port->pool_short)
return -ENOMEM;
- spin_lock_irqsave(&port->pool_short->lock, flags);
port->pool_short->port_map |= (1 << port->id);
- spin_unlock_irqrestore(&port->pool_short->lock, flags);
for (rxq = 0; rxq < rxq_number; rxq++)
mvpp2_rxq_short_pool_set(port, rxq,
@@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg)
mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
(MVPP2_CAUSE_MISC_SUM_MASK |
- MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
}
@@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
rxq->time_coal = usec;
}
-/* Set threshold for TX_DONE pkts coalescing */
-static void mvpp2_tx_done_pkts_coal_set(void *arg)
-{
- struct mvpp2_port *port = arg;
- int queue;
- u32 val;
-
- for (queue = 0; queue < txq_number; queue++) {
- struct mvpp2_tx_queue *txq = port->txqs[queue];
-
- val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
- MVPP2_TRANSMITTED_THRESH_MASK;
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
- }
-}
-
/* Free Tx queue skbuffs */
static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq,
@@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
int i;
for (i = 0; i < num; i++) {
- struct mvpp2_tx_desc *tx_desc = txq->descs +
- txq_pcpu->txq_get_index;
+ dma_addr_t buf_phys_addr =
+ txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
mvpp2_txq_inc_get(txq_pcpu);
@@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
if (!skb)
continue;
- dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr,
- tx_desc->data_size, DMA_TO_DEVICE);
+ dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
}
@@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
u32 cause)
{
- int queue = fls(cause >> 16) - 1;
+ int queue = fls(cause) - 1;
return port->txqs[queue];
}
@@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
netif_tx_wake_queue(nq);
}
+static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
+{
+ struct mvpp2_tx_queue *txq;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ unsigned int tx_todo = 0;
+
+ while (cause) {
+ txq = mvpp2_get_tx_queue(port, cause);
+ if (!txq)
+ break;
+
+ txq_pcpu = this_cpu_ptr(txq->pcpu);
+
+ if (txq_pcpu->count) {
+ mvpp2_txq_done(port, txq, txq_pcpu);
+ tx_todo += txq_pcpu->count;
+ }
+
+ cause &= ~(1 << txq->log_id);
+ }
+ return tx_todo;
+}
+
/* Rx/Tx queue initialization/cleanup methods */
/* Allocate and initialize descriptors for aggr TXQ */
@@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
sizeof(*txq_pcpu->tx_skb),
GFP_KERNEL);
- if (!txq_pcpu->tx_skb) {
- dma_free_coherent(port->dev->dev.parent,
- txq->size * MVPP2_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_phys);
- return -ENOMEM;
- }
+ if (!txq_pcpu->tx_skb)
+ goto error;
+
+ txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (!txq_pcpu->tx_buffs)
+ goto error;
txq_pcpu->count = 0;
txq_pcpu->reserved_num = 0;
@@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
}
return 0;
+
+error:
+ for_each_present_cpu(cpu) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ kfree(txq_pcpu->tx_skb);
+ kfree(txq_pcpu->tx_buffs);
+ }
+
+ dma_free_coherent(port->dev->dev.parent,
+ txq->size * MVPP2_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+
+ return -ENOMEM;
}
/* Free allocated TXQ resources */
@@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
kfree(txq_pcpu->tx_skb);
+ kfree(txq_pcpu->tx_buffs);
}
if (txq->descs)
@@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
goto err_cleanup;
}
- on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
return 0;
@@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev)
}
}
+static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
+{
+ ktime_t interval;
+
+ if (!port_pcpu->timer_scheduled) {
+ port_pcpu->timer_scheduled = true;
+ interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+ hrtimer_start(&port_pcpu->tx_done_timer, interval,
+ HRTIMER_MODE_REL_PINNED);
+ }
+}
+
+static void mvpp2_tx_proc_cb(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+ unsigned int tx_todo, cause;
+
+ if (!netif_running(dev))
+ return;
+ port_pcpu->timer_scheduled = false;
+
+ /* Process all the Tx queues */
+ cause = (1 << txq_number) - 1;
+ tx_todo = mvpp2_tx_done(port, cause);
+
+ /* Set the timer in case not all the packets were processed */
+ if (tx_todo)
+ mvpp2_timer_set(port_pcpu);
+}
+
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
+{
+ struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
+ struct mvpp2_port_pcpu,
+ tx_done_timer);
+
+ tasklet_schedule(&port_pcpu->tx_done_tasklet);
+
+ return HRTIMER_NORESTART;
+}
+
/* Main RX/TX processing routines */
/* Display more error info */
@@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
if (i == (skb_shinfo(skb)->nr_frags - 1)) {
/* Last descriptor */
tx_desc->command = MVPP2_TXD_L_DESC;
- mvpp2_txq_inc_put(txq_pcpu, skb);
+ mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
} else {
/* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0;
- mvpp2_txq_inc_put(txq_pcpu, NULL);
+ mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
}
}
@@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
/* First and Last descriptor */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, skb);
+ mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
} else {
/* First but not Last */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, NULL);
+ mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
/* Continue with other skb fragments */
if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
@@ -5255,6 +5323,17 @@ out:
dev_kfree_skb_any(skb);
}
+ /* Finalize TX processing */
+ if (txq_pcpu->count >= txq->done_pkts_coal)
+ mvpp2_txq_done(port, txq, txq_pcpu);
+
+ /* Set the timer in case not all frags were processed */
+ if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
+ struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+
+ mvpp2_timer_set(port_pcpu);
+ }
+
return NETDEV_TX_OK;
}
@@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause)
netdev_err(dev, "tx fifo underrun error\n");
}
-static void mvpp2_txq_done_percpu(void *arg)
+static int mvpp2_poll(struct napi_struct *napi, int budget)
{
- struct mvpp2_port *port = arg;
- u32 cause_rx_tx, cause_tx, cause_misc;
+ u32 cause_rx_tx, cause_rx, cause_misc;
+ int rx_done = 0;
+ struct mvpp2_port *port = netdev_priv(napi->dev);
/* Rx/Tx cause register
*
@@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg)
*/
cause_rx_tx = mvpp2_read(port->priv,
MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
- cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+ cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
if (cause_misc) {
@@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg)
cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
}
- /* Release TX descriptors */
- if (cause_tx) {
- struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
- struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
-
- if (txq_pcpu->count)
- mvpp2_txq_done(port, txq, txq_pcpu);
- }
-}
-
-static int mvpp2_poll(struct napi_struct *napi, int budget)
-{
- u32 cause_rx_tx, cause_rx;
- int rx_done = 0;
- struct mvpp2_port *port = netdev_priv(napi->dev);
-
- on_each_cpu(mvpp2_txq_done_percpu, port, 1);
-
- cause_rx_tx = mvpp2_read(port->priv,
- MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
/* Process RX packets */
@@ -5561,6 +5621,8 @@ err_cleanup_rxqs:
static int mvpp2_stop(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port_pcpu *port_pcpu;
+ int cpu;
mvpp2_stop_dev(port);
mvpp2_phy_disconnect(port);
@@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev)
on_each_cpu(mvpp2_interrupts_mask, port, 1);
free_irq(port->irq, port);
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ hrtimer_cancel(&port_pcpu->tx_done_timer);
+ port_pcpu->timer_scheduled = false;
+ tasklet_kill(&port_pcpu->tx_done_tasklet);
+ }
mvpp2_cleanup_rxqs(port);
mvpp2_cleanup_txqs(port);
@@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
txq->done_pkts_coal = c->tx_max_coalesced_frames;
}
- on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
return 0;
}
@@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
{
struct device_node *phy_node;
struct mvpp2_port *port;
+ struct mvpp2_port_pcpu *port_pcpu;
struct net_device *dev;
struct resource *res;
const char *dt_mac_addr;
@@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
int features;
int phy_mode;
int priv_common_regs_num = 2;
- int err, i;
+ int err, i, cpu;
dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
rxq_number);
@@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
mvpp2_port_power_up(port);
+ port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
+ if (!port->pcpu) {
+ err = -ENOMEM;
+ goto err_free_txq_pcpu;
+ }
+
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+ port_pcpu->timer_scheduled = false;
+
+ tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
+ (unsigned long)dev);
+ }
+
netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
features = NETIF_F_SG | NETIF_F_IP_CSUM;
dev->features = features | NETIF_F_RXCSUM;
@@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register netdev\n");
- goto err_free_txq_pcpu;
+ goto err_free_port_pcpu;
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
@@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
priv->port_list[id] = port;
return 0;
+err_free_port_pcpu:
+ free_percpu(port->pcpu);
err_free_txq_pcpu:
for (i = 0; i < txq_number; i++)
free_percpu(port->txqs[i]->pcpu);
@@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
int i;
unregister_netdev(port->dev);
+ free_percpu(port->pcpu);
free_percpu(port->stats);
for (i = 0; i < txq_number; i++)
free_percpu(port->txqs[i]->pcpu);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index d9f4498832a1..5606a043063e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4819,6 +4819,18 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
ETH_ALEN);
+ /* if the address is invalid, use a random value */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ struct sockaddr sa = { AF_UNSPEC };
+
+ netdev_warn(dev,
+ "Invalid MAC address, defaulting to random\n");
+ eth_hw_addr_random(dev);
+ memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
+ if (sky2_set_mac_address(dev, &sa))
+ netdev_warn(dev, "Failed to set MAC address.\n");
+ }
+
return dev;
}
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index 52a6665b7abf..d54701047401 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -18,5 +18,6 @@ if NET_VENDOR_MELLANOX
source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
+source "drivers/net/ethernet/mellanox/mlxsw/Kconfig"
endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
index 38fe32ef5e5f..2e2a5ec509ac 100644
--- a/drivers/net/ethernet/mellanox/Makefile
+++ b/drivers/net/ethernet/mellanox/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_MLX4_CORE) += mlx4/
obj-$(CONFIG_MLX5_CORE) += mlx5/core/
+obj-$(CONFIG_MLXSW_CORE) += mlxsw/
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 0a3202047569..2177e56ed0be 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2398,7 +2398,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
}
}
- memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
+ memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
INIT_WORK(&priv->mfunc.master.comm_work,
mlx4_master_comm_channel);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 63769df872a4..eb8a4988de63 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -100,7 +100,6 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
{
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
- char name[25];
int timestamp_en = 0;
bool assigned_eq = false;
@@ -119,8 +118,8 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
err = mlx4_assign_eq(mdev->dev, priv->port,
&cq->vector);
if (err) {
- mlx4_err(mdev, "Failed assigning an EQ to %s\n",
- name);
+ mlx4_err(mdev, "Failed assigning an EQ to CQ vector %d\n",
+ cq->vector);
goto free_eq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 99ba1c50e585..ddb5541882f5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -95,13 +95,11 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
(u16) (mdev->dev->caps.fw_ver & 0xffff));
strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = 0;
- drvinfo->regdump_len = 0;
- drvinfo->eedump_len = 0;
}
static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
"blueflame",
+ "phv-bit"
};
static const char main_strings[][ETH_GSTRING_LEN] = {
@@ -1797,35 +1795,49 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
+ bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
+ bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
int i;
+ int ret = 0;
- if (bf_enabled_new == bf_enabled_old)
- return 0; /* Nothing to do */
+ if (bf_enabled_new != bf_enabled_old) {
+ if (bf_enabled_new) {
+ bool bf_supported = true;
- if (bf_enabled_new) {
- bool bf_supported = true;
+ for (i = 0; i < priv->tx_ring_num; i++)
+ bf_supported &= priv->tx_ring[i]->bf_alloced;
- for (i = 0; i < priv->tx_ring_num; i++)
- bf_supported &= priv->tx_ring[i]->bf_alloced;
+ if (!bf_supported) {
+ en_err(priv, "BlueFlame is not supported\n");
+ return -EINVAL;
+ }
- if (!bf_supported) {
- en_err(priv, "BlueFlame is not supported\n");
- return -EINVAL;
+ priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+ } else {
+ priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
}
- priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
- } else {
- priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
- }
-
- for (i = 0; i < priv->tx_ring_num; i++)
- priv->tx_ring[i]->bf_enabled = bf_enabled_new;
+ for (i = 0; i < priv->tx_ring_num; i++)
+ priv->tx_ring[i]->bf_enabled = bf_enabled_new;
- en_info(priv, "BlueFlame %s\n",
- bf_enabled_new ? "Enabled" : "Disabled");
+ en_info(priv, "BlueFlame %s\n",
+ bf_enabled_new ? "Enabled" : "Disabled");
+ }
+ if (phv_enabled_new != phv_enabled_old) {
+ ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
+ if (ret)
+ return ret;
+ else if (phv_enabled_new)
+ priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+ else
+ priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
+ en_info(priv, "PHV bit %s\n",
+ phv_enabled_new ? "Enabled" : "Disabled");
+ }
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 913b716ed2e1..a946e4bf71d2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -224,6 +224,26 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
kfree(mdev);
}
+static void mlx4_en_activate(struct mlx4_dev *dev, void *ctx)
+{
+ int i;
+ struct mlx4_en_dev *mdev = ctx;
+
+ /* Create a netdev for each port */
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ mlx4_info(mdev, "Activating port:%d\n", i);
+ if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
+ mdev->pndev[i] = NULL;
+ }
+
+ /* register notifier */
+ mdev->nb.notifier_call = mlx4_en_netdev_event;
+ if (register_netdevice_notifier(&mdev->nb)) {
+ mdev->nb.notifier_call = NULL;
+ mlx4_err(mdev, "Failed to create notifier\n");
+ }
+}
+
static void *mlx4_en_add(struct mlx4_dev *dev)
{
struct mlx4_en_dev *mdev;
@@ -297,21 +317,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mutex_init(&mdev->state_lock);
mdev->device_up = true;
- /* Setup ports */
-
- /* Create a netdev for each port */
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
- mlx4_info(mdev, "Activating port:%d\n", i);
- if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
- mdev->pndev[i] = NULL;
- }
- /* register notifier */
- mdev->nb.notifier_call = mlx4_en_netdev_event;
- if (register_netdevice_notifier(&mdev->nb)) {
- mdev->nb.notifier_call = NULL;
- mlx4_err(mdev, "Failed to create notifier\n");
- }
-
return mdev;
err_mr:
@@ -335,6 +340,7 @@ static struct mlx4_interface mlx4_en_interface = {
.event = mlx4_en_event,
.get_dev = mlx4_en_get_netdev,
.protocol = MLX4_PROT_ETH,
+ .activate = mlx4_en_activate,
};
static void mlx4_en_verify_params(void)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index e0de2fd1ce12..886e1bc86374 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -573,10 +573,8 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
- struct mlx4_mac_entry *entry;
int index = 0;
int err = 0;
- u64 reg_id = 0;
int *qpn = &priv->base_qpn;
u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
@@ -600,44 +598,11 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
if (err) {
en_err(priv, "Failed to reserve qp for mac registration\n");
- goto qp_err;
- }
-
- err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
- if (err)
- goto steer_err;
-
- err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
- &priv->tunnel_reg_id);
- if (err)
- goto tunnel_err;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- err = -ENOMEM;
- goto alloc_err;
+ mlx4_unregister_mac(dev, priv->port, mac);
+ return err;
}
- memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
- memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
- entry->reg_id = reg_id;
-
- hlist_add_head_rcu(&entry->hlist,
- &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
return 0;
-
-alloc_err:
- if (priv->tunnel_reg_id)
- mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
-tunnel_err:
- mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
-
-steer_err:
- mlx4_qp_release_range(dev, *qpn, 1);
-
-qp_err:
- mlx4_unregister_mac(dev, priv->port, mac);
- return err;
}
static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
@@ -645,39 +610,13 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int qpn = priv->base_qpn;
- u64 mac;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
- mac = mlx4_mac_to_u64(priv->dev->dev_addr);
+ u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
priv->dev->dev_addr);
mlx4_unregister_mac(dev, priv->port, mac);
} else {
- struct mlx4_mac_entry *entry;
- struct hlist_node *tmp;
- struct hlist_head *bucket;
- unsigned int i;
-
- for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
- bucket = &priv->mac_hash[i];
- hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
- mac = mlx4_mac_to_u64(entry->mac);
- en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
- entry->mac);
- mlx4_en_uc_steer_release(priv, entry->mac,
- qpn, entry->reg_id);
-
- mlx4_unregister_mac(dev, priv->port, mac);
- hlist_del_rcu(&entry->hlist);
- kfree_rcu(entry, rcu);
- }
- }
-
- if (priv->tunnel_reg_id) {
- mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
- priv->tunnel_reg_id = 0;
- }
-
en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
priv->port, qpn);
mlx4_qp_release_range(dev, qpn, 1);
@@ -1283,6 +1222,75 @@ static void mlx4_en_netpoll(struct net_device *dev)
}
#endif
+static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
+{
+ u64 reg_id;
+ int err = 0;
+ int *qpn = &priv->base_qpn;
+ struct mlx4_mac_entry *entry;
+
+ err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
+ if (err)
+ return err;
+
+ err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
+ &priv->tunnel_reg_id);
+ if (err)
+ goto tunnel_err;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+
+ memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
+ memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
+ entry->reg_id = reg_id;
+ hlist_add_head_rcu(&entry->hlist,
+ &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
+
+ return 0;
+
+alloc_err:
+ if (priv->tunnel_reg_id)
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+
+tunnel_err:
+ mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
+ return err;
+}
+
+static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
+{
+ u64 mac;
+ unsigned int i;
+ int qpn = priv->base_qpn;
+ struct hlist_head *bucket;
+ struct hlist_node *tmp;
+ struct mlx4_mac_entry *entry;
+
+ for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
+ bucket = &priv->mac_hash[i];
+ hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
+ mac = mlx4_mac_to_u64(entry->mac);
+ en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
+ entry->mac);
+ mlx4_en_uc_steer_release(priv, entry->mac,
+ qpn, entry->reg_id);
+
+ mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
+ hlist_del_rcu(&entry->hlist);
+ kfree_rcu(entry, rcu);
+ }
+ }
+
+ if (priv->tunnel_reg_id) {
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+ priv->tunnel_reg_id = 0;
+ }
+}
+
static void mlx4_en_tx_timeout(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -1684,6 +1692,11 @@ int mlx4_en_start_port(struct net_device *dev)
goto tx_err;
}
+ /* Set Unicast and VXLAN steering rules */
+ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
+ mlx4_en_set_rss_steer_rules(priv))
+ mlx4_warn(mdev, "Failed setting steering rules\n");
+
/* Attach rx QP to bradcast address */
eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
@@ -1831,6 +1844,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
for (i = 0; i < priv->tx_ring_num; i++)
mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
+ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
+ mlx4_en_delete_rss_steer_rules(priv);
+
/* Free RSS qps */
mlx4_en_release_rss_steer(priv);
@@ -2184,6 +2200,25 @@ static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
+static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx4_en_priv *en_priv = netdev_priv(netdev);
+ struct mlx4_en_dev *mdev = en_priv->mdev;
+
+ /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
+ * enable/disable make sure S-TAG flag is always in same state as
+ * C-TAG.
+ */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX &&
+ !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+ features |= NETIF_F_HW_VLAN_STAG_RX;
+ else
+ features &= ~NETIF_F_HW_VLAN_STAG_RX;
+
+ return features;
+}
+
static int mlx4_en_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -2218,6 +2253,10 @@ static int mlx4_en_set_features(struct net_device *netdev,
en_info(priv, "Turn %s TX vlan strip offload\n",
(features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
+ if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
+ en_info(priv, "Turn %s TX S-VLAN strip offload\n",
+ (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
+
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
en_info(priv, "Turn %s loopback\n",
(features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
@@ -2460,6 +2499,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_poll_controller = mlx4_en_netpoll,
#endif
.ndo_set_features = mlx4_en_set_features,
+ .ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
@@ -2500,6 +2540,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_poll_controller = mlx4_en_netpoll,
#endif
.ndo_set_features = mlx4_en_set_features,
+ .ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
@@ -2775,7 +2816,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_priv *priv;
int i;
int err;
- u64 mac_u64;
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
MAX_TX_RINGS, MAX_RX_RINGS);
@@ -2867,17 +2907,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->addr_len = ETH_ALEN;
mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
if (!is_valid_ether_addr(dev->dev_addr)) {
- if (mlx4_is_slave(priv->mdev->dev)) {
- eth_hw_addr_random(dev);
- en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
- mac_u64 = mlx4_mac_to_u64(dev->dev_addr);
- mdev->dev->caps.def_mac[priv->port] = mac_u64;
- } else {
- en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
- priv->port, dev->dev_addr);
- err = -EINVAL;
- goto out;
- }
+ en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+ priv->port, dev->dev_addr);
+ err = -EINVAL;
+ goto out;
+ } else if (mlx4_is_slave(priv->mdev->dev) &&
+ (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
+ /* Random MAC was assigned in mlx4_slave_cap
+ * in mlx4_core module
+ */
+ dev->addr_assign_type |= NET_ADDR_RANDOM;
+ en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
}
memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
@@ -2931,6 +2971,27 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->hw_features |= NETIF_F_LOOPBACK |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+ dev->features |= NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_FILTER;
+ dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+ }
+
+ if (mlx4_is_slave(mdev->dev)) {
+ int phv;
+
+ err = get_phv_bit(mdev->dev, port, &phv);
+ if (!err && phv) {
+ dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+ }
+ } else {
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+ !(mdev->dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+ dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ }
+
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
dev->hw_features |= NETIF_F_RXFCS;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9c145dddd717..e7a5000aa12c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -725,7 +725,7 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
- if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
+ if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
!(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
hdr += sizeof(struct vlan_hdr);
@@ -906,17 +906,25 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
gro_skb->csum_level = 1;
if ((cqe->vlan_my_qpn &
- cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
+ cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vid = be16_to_cpu(cqe->sl_vid);
__vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
+ } else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_SVLAN_PRESENT_MASK) &&
+ (dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
+ __vlan_hwaccel_put_tag(gro_skb,
+ htons(ETH_P_8021AD),
+ be16_to_cpu(cqe->sl_vid));
}
if (dev->features & NETIF_F_RXHASH)
skb_set_hash(gro_skb,
be32_to_cpu(cqe->immed_rss_invalid),
- PKT_HASH_TYPE_L3);
+ (ip_summed == CHECKSUM_UNNECESSARY) ?
+ PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_L3);
skb_record_rx_queue(gro_skb, cq->ring);
skb_mark_napi_id(gro_skb, &cq->napi);
@@ -962,12 +970,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (dev->features & NETIF_F_RXHASH)
skb_set_hash(skb,
be32_to_cpu(cqe->immed_rss_invalid),
- PKT_HASH_TYPE_L3);
+ (ip_summed == CHECKSUM_UNNECESSARY) ?
+ PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_L3);
if ((be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_VLAN_PRESENT_MASK) &&
+ MLX4_CQE_CVLAN_PRESENT_MASK) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
+ else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_SVLAN_PRESENT_MASK) &&
+ (dev->features & NETIF_F_HW_VLAN_STAG_RX))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
+ be16_to_cpu(cqe->sl_vid));
if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -1032,13 +1047,15 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
/* If we used up all the quota - we're probably not done yet... */
if (done == budget) {
- int cpu_curr;
const struct cpumask *aff;
+ struct irq_data *idata;
+ int cpu_curr;
INC_PERF_COUNTER(priv->pstats.napi_quota);
cpu_curr = smp_processor_id();
- aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
+ idata = irq_desc_get_irq_data(cq->irq_desc);
+ aff = irq_data_get_affinity_mask(idata);
if (likely(cpumask_test_cpu(cpu_curr, aff)))
return budget;
@@ -1065,7 +1082,10 @@ static const int frag_sizes[] = {
void mlx4_en_calc_rx_buf(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN;
+ /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
+ * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
+ */
+ int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN);
int buf_size = 0;
int i = 0;
@@ -1250,8 +1270,6 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
rss_context->hash_fn = MLX4_RSS_HASH_TOP;
memcpy(rss_context->rss_key, priv->rss_key,
MLX4_EN_RSS_KEY_SIZE);
- netdev_rss_key_fill(rss_context->rss_key,
- MLX4_EN_RSS_KEY_SIZE);
} else {
en_err(priv, "Unknown RSS hash function requested\n");
err = -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c10d98f6ad96..4421bf5463f6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -718,6 +718,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
u32 index, bf_index;
__be32 op_own;
u16 vlan_tag = 0;
+ u16 vlan_proto = 0;
int i_frag;
int lso_header_size;
void *fragptr = NULL;
@@ -750,9 +751,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
}
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
vlan_tag = skb_vlan_tag_get(skb);
-
+ vlan_proto = be16_to_cpu(skb->vlan_proto);
+ }
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
@@ -958,8 +960,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->bf.offset ^= ring->bf.buf_size;
} else {
tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
- !!skb_vlan_tag_present(skb);
+ if (vlan_proto == ETH_P_8021AD)
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
+ else if (vlan_proto == ETH_P_8021Q)
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
+ else
+ tx_desc->ctrl.ins_vlan = 0;
+
tx_desc->ctrl.fence_size = real_size;
/* Ensure new descriptor hits memory
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 8e81e53c370e..603d1c3d3b2e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -196,7 +196,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
return;
}
- memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
+ memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
s_eqe->slave_id = slave;
/* ensure all information is written before setting the ownersip bit */
dma_wmb();
@@ -1364,6 +1364,10 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
* and performing a NOP command
*/
for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
+ /* Make sure request_irq was called */
+ if (!priv->eq_table.eq[i].have_irq)
+ continue;
+
/* Temporary use polling for command completions */
mlx4_cmd_use_polling(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index e30bf57ad7a1..f13a4d7bbf95 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -154,6 +154,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[26] = "Port ETS Scheduler support",
[27] = "Port beacon support",
[28] = "RX-ALL support",
+ [29] = "802.1ad offload support",
};
int i;
@@ -307,6 +308,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
+#define QUERY_FUNC_CAP_PHV_BIT 0x40
if (vhcr->op_modifier == 1) {
struct mlx4_active_ports actv_ports =
@@ -351,6 +353,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
QUERY_FUNC_CAP_PHYS_PORT_ID);
+ if (dev->caps.phv_bit[port]) {
+ field = QUERY_FUNC_CAP_PHV_BIT;
+ MLX4_PUT(outbox->buf, field,
+ QUERY_FUNC_CAP_FLAGS0_OFFSET);
+ }
+
} else if (vhcr->op_modifier == 0) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave);
@@ -600,6 +608,9 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
MLX4_GET(func_cap->phys_port_id, outbox,
QUERY_FUNC_CAP_PHYS_PORT_ID);
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
+ func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT);
+
/* All other resources are allocated by the master, but we still report
* 'num' and 'reserved' capabilities as follows:
* - num remains the maximum resource index
@@ -700,6 +711,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
#define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94
+#define QUERY_DEV_CAP_PHV_EN_OFFSET 0x96
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
@@ -898,6 +910,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
if (field & (1 << 2))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET);
+ if (field & 0x80)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN;
+ if (field & 0x40)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN;
+
MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
@@ -1992,6 +2010,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+ /* phv_check enable */
+ MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
+ if (byte_field & 0x2)
+ param->phv_check_en = 1;
out:
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -2758,3 +2780,79 @@ int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
}
+
+static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
+{
+#define SET_PORT_GEN_PHV_VALID 0x10
+#define SET_PORT_GEN_PHV_EN 0x80
+
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_general_context *context;
+ u32 in_mod;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+
+ context->v_ignore_fcs |= SET_PORT_GEN_PHV_VALID;
+ if (phv_bit)
+ context->phv_en |= SET_PORT_GEN_PHV_EN;
+
+ in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
+{
+ int err;
+ struct mlx4_func_cap func_cap;
+
+ memset(&func_cap, 0, sizeof(func_cap));
+ err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
+ if (!err)
+ *phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT;
+ return err;
+}
+EXPORT_SYMBOL(get_phv_bit);
+
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
+{
+ int ret;
+
+ if (mlx4_is_slave(dev))
+ return -EPERM;
+
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+ !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+ ret = mlx4_SET_PORT_phv_bit(dev, port, new_val);
+ if (!ret)
+ dev->caps.phv_bit[port] = new_val;
+ return ret;
+ }
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(set_phv_bit);
+
+void mlx4_replace_zero_macs(struct mlx4_dev *dev)
+{
+ int i;
+ u8 mac_addr[ETH_ALEN];
+
+ dev->port_random_macs = 0;
+ for (i = 1; i <= dev->caps.num_ports; ++i)
+ if (!dev->caps.def_mac[i] &&
+ dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
+ eth_random_addr(mac_addr);
+ dev->port_random_macs |= 1 << i;
+ dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr);
+ }
+}
+EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 07cb7c2461ad..08de5555c2f4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -204,6 +204,7 @@ struct mlx4_init_hca_param {
u16 cqe_size; /* For use only when CQE stride feature enabled */
u16 eqe_size; /* For use only when EQE stride feature enabled */
u8 rss_ip_frags;
+ u8 phv_check_en; /* for QUERY_HCA */
};
struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 0d80aed59043..0472941af820 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -63,8 +63,11 @@ static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
spin_unlock_irq(&priv->ctx_lock);
+ if (intf->activate)
+ intf->activate(&priv->dev, dev_ctx->context);
} else
kfree(dev_ctx);
+
}
static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 29c2a017a450..85f1b1e7e505 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -405,6 +405,21 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
+ if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
+ struct mlx4_init_hca_param hca_param;
+
+ memset(&hca_param, 0, sizeof(hca_param));
+ err = mlx4_QUERY_HCA(dev, &hca_param);
+ /* Turn off PHV_EN flag in case phv_check_en is set.
+ * phv_check_en is a HW check that parse the packet and verify
+ * phv bit was reported correctly in the wqe. To allow QinQ
+ * PHV_EN flag should be set and phv_check_en must be cleared
+ * otherwise QinQ packets will be drop by the HW.
+ */
+ if (err || hca_param.phv_check_en)
+ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
+ }
+
/* Sense port always allowed on supported devices for ConnectX-1 and -2 */
if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -848,6 +863,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
return -ENODEV;
}
+ mlx4_replace_zero_macs(dev);
+
dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
@@ -2657,6 +2674,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
nreq);
+ if (nreq > MAX_MSIX)
+ nreq = MAX_MSIX;
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
if (!entries)
@@ -2912,6 +2931,8 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
{
u64 dev_flags = dev->flags;
int err = 0;
+ int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev),
+ MLX4_MAX_NUM_VF);
if (reset_flow) {
dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
@@ -2937,6 +2958,12 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
}
if (!(dev->flags & MLX4_FLAG_SRIOV)) {
+ if (total_vfs > fw_enabled_sriov_vfs) {
+ mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
+ total_vfs, fw_enabled_sriov_vfs);
+ err = -ENOMEM;
+ goto disable_sriov;
+ }
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
err = pci_enable_sriov(pdev, total_vfs);
}
@@ -3418,20 +3445,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
goto err_disable_pdev;
}
}
- if (total_vfs >= MLX4_MAX_NUM_VF) {
+ if (total_vfs > MLX4_MAX_NUM_VF) {
dev_err(&pdev->dev,
- "Requested more VF's (%d) than allowed (%d)\n",
- total_vfs, MLX4_MAX_NUM_VF - 1);
+ "Requested more VF's (%d) than allowed by hw (%d)\n",
+ total_vfs, MLX4_MAX_NUM_VF);
err = -EINVAL;
goto err_disable_pdev;
}
for (i = 0; i < MLX4_MAX_PORTS; i++) {
- if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
+ if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
dev_err(&pdev->dev,
- "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
+ "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
nvfs[i] + nvfs[2], i + 1,
- MLX4_MAX_NUM_VF_P_PORT - 1);
+ MLX4_MAX_NUM_VF_P_PORT);
err = -EINVAL;
goto err_disable_pdev;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index bd9ea0d01aae..1d4e2e054647 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1184,10 +1184,11 @@ out:
if (prot == MLX4_PROT_ETH) {
/* manage the steering entry for promisc mode */
if (new_entry)
- new_steering_entry(dev, port, steer, index, qp->qpn);
+ err = new_steering_entry(dev, port, steer,
+ index, qp->qpn);
else
- existing_steering_entry(dev, port, steer,
- index, qp->qpn);
+ err = existing_steering_entry(dev, port, steer,
+ index, qp->qpn);
}
if (err && link && index != -1) {
if (index < dev->caps.num_mgms)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index a092c5c34d43..e1cf9036af22 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -787,6 +787,9 @@ struct mlx4_set_port_general_context {
u8 pprx;
u8 pfcrx;
u16 reserved4;
+ u32 reserved5;
+ u8 phv_en;
+ u8 reserved6[3];
};
struct mlx4_set_port_rqp_calc_context {
@@ -1375,6 +1378,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
void mlx4_init_quotas(struct mlx4_dev *dev);
+/* for VFs, replace zero MACs with randomly-generated MACs at driver start */
+void mlx4_replace_zero_macs(struct mlx4_dev *dev);
int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
/* Returns the VF index of slave */
int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 666d1669eb52..defcf8c395bf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -95,6 +95,7 @@
*/
#define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1
+#define MLX4_EN_PRIV_FLAGS_PHV 2
#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 78f51e103880..93195191f45b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -318,7 +318,7 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
key, NULL);
} else {
mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR_OR_NULL(mailbox))
+ if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, key,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 20268634a9ab..3311f35d08e0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -422,15 +422,15 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
u64 qp_mask = 0;
int err = 0;
+ if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
+ return -EINVAL;
+
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
cmd = (struct mlx4_update_qp_context *)mailbox->buf;
- if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
- return -EINVAL;
-
if (attr & MLX4_UPDATE_QP_SMAC) {
pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 731423ca575d..ac4b99ab1f85 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1238,8 +1238,10 @@ static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
return 0;
undo:
- for (--i; i >= base; --i)
+ for (--i; i >= 0; --i) {
rb_erase(&res_arr[i]->node, root);
+ list_del_init(&res_arr[i]->list);
+ }
spin_unlock_irq(mlx4_tlock(dev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 0715b497511f..6cb38304669f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -45,15 +45,34 @@
* register it in a memory region at HCA virtual address 0.
*/
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
+ size_t size, dma_addr_t *dma_handle,
+ int node)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ int original_node;
+ void *cpu_handle;
+
+ mutex_lock(&priv->alloc_mutex);
+ original_node = dev_to_node(&dev->pdev->dev);
+ set_dev_node(&dev->pdev->dev, node);
+ cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
+ dma_handle, GFP_KERNEL);
+ set_dev_node(&dev->pdev->dev, original_node);
+ mutex_unlock(&priv->alloc_mutex);
+ return cpu_handle;
+}
+
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+ struct mlx5_buf *buf, int node)
{
dma_addr_t t;
buf->size = size;
buf->npages = 1;
buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
- buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
- size, &t, GFP_KERNEL);
+ buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size,
+ &t, node);
if (!buf->direct.buf)
return -ENOMEM;
@@ -66,6 +85,11 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
return 0;
}
+
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+{
+ return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
+}
EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
@@ -75,7 +99,8 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
}
EXPORT_SYMBOL_GPL(mlx5_buf_free);
-static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
+static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
+ int node)
{
struct mlx5_db_pgdir *pgdir;
@@ -84,8 +109,9 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
return NULL;
bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
- pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
- &pgdir->db_dma, GFP_KERNEL);
+
+ pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
+ &pgdir->db_dma, node);
if (!pgdir->db_page) {
kfree(pgdir);
return NULL;
@@ -118,7 +144,7 @@ static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
return 0;
}
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
{
struct mlx5_db_pgdir *pgdir;
int ret = 0;
@@ -129,7 +155,7 @@ int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
if (!mlx5_alloc_db_from_pgdir(pgdir, db))
goto out;
- pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev));
+ pgdir = mlx5_alloc_db_pgdir(dev, node);
if (!pgdir) {
ret = -ENOMEM;
goto out;
@@ -145,6 +171,12 @@ out:
return ret;
}
+EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
+
+int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+ return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
+}
EXPORT_SYMBOL_GPL(mlx5_db_alloc);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 75ff58dc1ff5..037fc4cdf5af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -30,7 +30,7 @@
* SOFTWARE.
*/
-#include <asm-generic/kmap_types.h>
+#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/pci.h>
@@ -254,6 +254,156 @@ static void dump_buf(void *buf, int size, int data_only, int offset)
pr_debug("\n");
}
+enum {
+ MLX5_DRIVER_STATUS_ABORTED = 0xfe,
+ MLX5_DRIVER_SYND = 0xbadd00de,
+};
+
+static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
+ u32 *synd, u8 *status)
+{
+ *synd = 0;
+ *status = 0;
+
+ switch (op) {
+ case MLX5_CMD_OP_TEARDOWN_HCA:
+ case MLX5_CMD_OP_DISABLE_HCA:
+ case MLX5_CMD_OP_MANAGE_PAGES:
+ case MLX5_CMD_OP_DESTROY_MKEY:
+ case MLX5_CMD_OP_DESTROY_EQ:
+ case MLX5_CMD_OP_DESTROY_CQ:
+ case MLX5_CMD_OP_DESTROY_QP:
+ case MLX5_CMD_OP_DESTROY_PSV:
+ case MLX5_CMD_OP_DESTROY_SRQ:
+ case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+ case MLX5_CMD_OP_DESTROY_DCT:
+ case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
+ case MLX5_CMD_OP_DEALLOC_PD:
+ case MLX5_CMD_OP_DEALLOC_UAR:
+ case MLX5_CMD_OP_DETTACH_FROM_MCG:
+ case MLX5_CMD_OP_DEALLOC_XRCD:
+ case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
+ case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_DESTROY_TIR:
+ case MLX5_CMD_OP_DESTROY_SQ:
+ case MLX5_CMD_OP_DESTROY_RQ:
+ case MLX5_CMD_OP_DESTROY_RMP:
+ case MLX5_CMD_OP_DESTROY_TIS:
+ case MLX5_CMD_OP_DESTROY_RQT:
+ case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
+ case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
+ case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
+ return MLX5_CMD_STAT_OK;
+
+ case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_ADAPTER:
+ case MLX5_CMD_OP_INIT_HCA:
+ case MLX5_CMD_OP_ENABLE_HCA:
+ case MLX5_CMD_OP_QUERY_PAGES:
+ case MLX5_CMD_OP_SET_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_ISSI:
+ case MLX5_CMD_OP_SET_ISSI:
+ case MLX5_CMD_OP_CREATE_MKEY:
+ case MLX5_CMD_OP_QUERY_MKEY:
+ case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
+ case MLX5_CMD_OP_PAGE_FAULT_RESUME:
+ case MLX5_CMD_OP_CREATE_EQ:
+ case MLX5_CMD_OP_QUERY_EQ:
+ case MLX5_CMD_OP_GEN_EQE:
+ case MLX5_CMD_OP_CREATE_CQ:
+ case MLX5_CMD_OP_QUERY_CQ:
+ case MLX5_CMD_OP_MODIFY_CQ:
+ case MLX5_CMD_OP_CREATE_QP:
+ case MLX5_CMD_OP_RST2INIT_QP:
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ case MLX5_CMD_OP_2ERR_QP:
+ case MLX5_CMD_OP_2RST_QP:
+ case MLX5_CMD_OP_QUERY_QP:
+ case MLX5_CMD_OP_SQD_RTS_QP:
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ case MLX5_CMD_OP_CREATE_PSV:
+ case MLX5_CMD_OP_CREATE_SRQ:
+ case MLX5_CMD_OP_QUERY_SRQ:
+ case MLX5_CMD_OP_ARM_RQ:
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ case MLX5_CMD_OP_CREATE_DCT:
+ case MLX5_CMD_OP_DRAIN_DCT:
+ case MLX5_CMD_OP_QUERY_DCT:
+ case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+ case MLX5_CMD_OP_QUERY_VPORT_STATE:
+ case MLX5_CMD_OP_MODIFY_VPORT_STATE:
+ case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
+ case MLX5_CMD_OP_SET_ROCE_ADDRESS:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
+ case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ case MLX5_CMD_OP_ALLOC_PD:
+ case MLX5_CMD_OP_ALLOC_UAR:
+ case MLX5_CMD_OP_CONFIG_INT_MODERATION:
+ case MLX5_CMD_OP_ACCESS_REG:
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
+ case MLX5_CMD_OP_MAD_IFC:
+ case MLX5_CMD_OP_QUERY_MAD_DEMUX:
+ case MLX5_CMD_OP_SET_MAD_DEMUX:
+ case MLX5_CMD_OP_NOP:
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ case MLX5_CMD_OP_QUERY_CONG_STATUS:
+ case MLX5_CMD_OP_MODIFY_CONG_STATUS:
+ case MLX5_CMD_OP_QUERY_CONG_PARAMS:
+ case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
+ case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_CREATE_TIR:
+ case MLX5_CMD_OP_MODIFY_TIR:
+ case MLX5_CMD_OP_QUERY_TIR:
+ case MLX5_CMD_OP_CREATE_SQ:
+ case MLX5_CMD_OP_MODIFY_SQ:
+ case MLX5_CMD_OP_QUERY_SQ:
+ case MLX5_CMD_OP_CREATE_RQ:
+ case MLX5_CMD_OP_MODIFY_RQ:
+ case MLX5_CMD_OP_QUERY_RQ:
+ case MLX5_CMD_OP_CREATE_RMP:
+ case MLX5_CMD_OP_MODIFY_RMP:
+ case MLX5_CMD_OP_QUERY_RMP:
+ case MLX5_CMD_OP_CREATE_TIS:
+ case MLX5_CMD_OP_MODIFY_TIS:
+ case MLX5_CMD_OP_QUERY_TIS:
+ case MLX5_CMD_OP_CREATE_RQT:
+ case MLX5_CMD_OP_MODIFY_RQT:
+ case MLX5_CMD_OP_QUERY_RQT:
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ *status = MLX5_DRIVER_STATUS_ABORTED;
+ *synd = MLX5_DRIVER_SYND;
+ return -EIO;
+ default:
+ mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
+ return -EINVAL;
+ }
+}
+
const char *mlx5_command_str(int command)
{
switch (command) {
@@ -473,6 +623,7 @@ static void cmd_work_handler(struct work_struct *work)
struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
struct mlx5_cmd_layout *lay;
struct semaphore *sem;
+ unsigned long flags;
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
@@ -485,6 +636,9 @@ static void cmd_work_handler(struct work_struct *work)
}
} else {
ent->idx = cmd->max_reg_cmds;
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
+ clear_bit(ent->idx, &cmd->bitmask);
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
ent->token = alloc_token(cmd);
@@ -584,6 +738,16 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
return err;
}
+static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out)
+{
+ return &out->syndrome;
+}
+
+static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
+{
+ return &out->status;
+}
+
/* Notes:
* 1. Callback functions may not sleep
* 2. page queue commands do not support asynchrous completion
@@ -1081,7 +1245,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
}
}
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent;
@@ -1092,7 +1256,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
s64 ds;
struct mlx5_cmd_stats *stats;
unsigned long flags;
+ unsigned long vector;
+ /* there can be at most 32 command queues */
+ vector = vec & 0xffffffff;
for (i = 0; i < (1 << cmd->log_sz); i++) {
if (test_bit(i, &vector)) {
struct semaphore *sem;
@@ -1110,11 +1277,16 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
ent->ret = verify_signature(ent);
else
ent->ret = 0;
- ent->status = ent->lay->status_own >> 1;
+ if (vec & MLX5_TRIGGERED_CMD_COMP)
+ ent->status = MLX5_DRIVER_STATUS_ABORTED;
+ else
+ ent->status = ent->lay->status_own >> 1;
+
mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
ent->ret, deliv_status_to_str(ent->status), ent->status);
}
free_ent(cmd, ent->idx);
+
if (ent->callback) {
ds = ent->ts2 - ent->ts1;
if (ent->op < ARRAY_SIZE(cmd->stats)) {
@@ -1136,6 +1308,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
mlx5_free_cmd_msg(dev, ent->out);
free_msg(dev, ent->in);
+ err = err ? err : ent->status;
free_cmd(ent);
callback(err, context);
} else {
@@ -1183,6 +1356,11 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
return msg;
}
+static u16 opcode_from_in(struct mlx5_inbox_hdr *in)
+{
+ return be16_to_cpu(in->opcode);
+}
+
static int is_manage_pages(struct mlx5_inbox_hdr *in)
{
return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
@@ -1197,6 +1375,15 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
gfp_t gfp;
int err;
u8 status = 0;
+ u32 drv_synd;
+
+ if (pci_channel_offline(dev->pdev) ||
+ dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), &drv_synd, &status);
+ *get_synd_ptr(out) = cpu_to_be32(drv_synd);
+ *get_status_ptr(out) = status;
+ return err;
+ }
pages_queue = is_manage_pages(in);
gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
@@ -1363,6 +1550,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
int err;
int i;
+ memset(cmd, 0, sizeof(*cmd));
cmd_if_rev = cmdif_rev(dev);
if (cmd_if_rev != CMD_IF_REV) {
dev_err(&dev->pdev->dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 04ab7e445eae..b51e42d6fbec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -242,6 +242,7 @@ int mlx5_init_cq_table(struct mlx5_core_dev *dev)
struct mlx5_cq_table *table = &dev->priv.cq_table;
int err;
+ memset(table, 0, sizeof(*table));
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
err = mlx5_cq_debugfs_init(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 3d23bd657e3c..f2ae62dd8c09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -42,24 +42,27 @@
#define MLX5E_MAX_NUM_TC 8
-#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x7
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x7
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
-#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (16 * 1024)
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
-#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7
+#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
+#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
+#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
+#define MLX5E_SQ_BF_BUDGET 16
static const char vport_strings[][ETH_GSTRING_LEN] = {
/* vport statistics */
@@ -91,6 +94,7 @@ static const char vport_strings[][ETH_GSTRING_LEN] = {
"lro_bytes",
"rx_csum_good",
"rx_csum_none",
+ "rx_csum_sw",
"tx_csum_offload",
"tx_queue_stopped",
"tx_queue_wake",
@@ -128,18 +132,94 @@ struct mlx5e_vport_stats {
u64 lro_bytes;
u64 rx_csum_good;
u64 rx_csum_none;
+ u64 rx_csum_sw;
u64 tx_csum_offload;
u64 tx_queue_stopped;
u64 tx_queue_wake;
u64 tx_queue_dropped;
u64 rx_wqe_err;
-#define NUM_VPORT_COUNTERS 31
+#define NUM_VPORT_COUNTERS 32
+};
+
+static const char pport_strings[][ETH_GSTRING_LEN] = {
+ /* IEEE802.3 counters */
+ "frames_tx",
+ "frames_rx",
+ "check_seq_err",
+ "alignment_err",
+ "octets_tx",
+ "octets_received",
+ "multicast_xmitted",
+ "broadcast_xmitted",
+ "multicast_rx",
+ "broadcast_rx",
+ "in_range_len_errors",
+ "out_of_range_len",
+ "too_long_errors",
+ "symbol_err",
+ "mac_control_tx",
+ "mac_control_rx",
+ "unsupported_op_rx",
+ "pause_ctrl_rx",
+ "pause_ctrl_tx",
+
+ /* RFC2863 counters */
+ "in_octets",
+ "in_ucast_pkts",
+ "in_discards",
+ "in_errors",
+ "in_unknown_protos",
+ "out_octets",
+ "out_ucast_pkts",
+ "out_discards",
+ "out_errors",
+ "in_multicast_pkts",
+ "in_broadcast_pkts",
+ "out_multicast_pkts",
+ "out_broadcast_pkts",
+
+ /* RFC2819 counters */
+ "drop_events",
+ "octets",
+ "pkts",
+ "broadcast_pkts",
+ "multicast_pkts",
+ "crc_align_errors",
+ "undersize_pkts",
+ "oversize_pkts",
+ "fragments",
+ "jabbers",
+ "collisions",
+ "p64octets",
+ "p65to127octets",
+ "p128to255octets",
+ "p256to511octets",
+ "p512to1023octets",
+ "p1024to1518octets",
+ "p1519to2047octets",
+ "p2048to4095octets",
+ "p4096to8191octets",
+ "p8192to10239octets",
+};
+
+#define NUM_IEEE_802_3_COUNTERS 19
+#define NUM_RFC_2863_COUNTERS 13
+#define NUM_RFC_2819_COUNTERS 21
+#define NUM_PPORT_COUNTERS (NUM_IEEE_802_3_COUNTERS + \
+ NUM_RFC_2863_COUNTERS + \
+ NUM_RFC_2819_COUNTERS)
+
+struct mlx5e_pport_stats {
+ __be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS];
+ __be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS];
+ __be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS];
};
static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
"packets",
"csum_none",
+ "csum_sw",
"lro_packets",
"lro_bytes",
"wqe_err"
@@ -148,10 +228,11 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
struct mlx5e_rq_stats {
u64 packets;
u64 csum_none;
+ u64 csum_sw;
u64 lro_packets;
u64 lro_bytes;
u64 wqe_err;
-#define NUM_RQ_STATS 5
+#define NUM_RQ_STATS 6
};
static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
@@ -179,6 +260,7 @@ struct mlx5e_sq_stats {
struct mlx5e_stats {
struct mlx5e_vport_stats vport;
+ struct mlx5e_pport_stats pport;
};
struct mlx5e_params {
@@ -192,9 +274,12 @@ struct mlx5e_params {
u16 tx_cq_moderation_usec;
u16 tx_cq_moderation_pkts;
u16 min_rx_wqes;
- u16 rx_hash_log_tbl_sz;
bool lro_en;
u32 lro_wqe_sz;
+ u16 tx_max_inline;
+ u8 rss_hfunc;
+ u8 toeplitz_hash_key[40];
+ u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
};
enum {
@@ -214,6 +299,7 @@ struct mlx5e_cq {
struct napi_struct *napi;
struct mlx5_core_cq mcq;
struct mlx5e_channel *channel;
+ struct mlx5e_priv *priv;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
@@ -237,6 +323,7 @@ struct mlx5e_rq {
struct mlx5_wq_ctrl wq_ctrl;
u32 rqn;
struct mlx5e_channel *channel;
+ struct mlx5e_priv *priv;
} ____cacheline_aligned_in_smp;
struct mlx5e_tx_skb_cb {
@@ -266,7 +353,9 @@ struct mlx5e_sq {
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc;
- u32 bf_offset;
+ u16 bf_offset;
+ u16 prev_cc;
+ u8 bf_budget;
struct mlx5e_sq_stats stats;
struct mlx5e_cq cq;
@@ -279,9 +368,10 @@ struct mlx5e_sq {
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
void __iomem *uar_map;
+ void __iomem *uar_bf_map;
struct netdev_queue *txq;
u32 sqn;
- u32 bf_buf_size;
+ u16 bf_buf_size;
u16 max_inline;
u16 edge;
struct device *pdev;
@@ -315,7 +405,6 @@ struct mlx5e_channel {
__be32 mkey_be;
u8 num_tc;
unsigned long flags;
- int tc_to_txq_map[MLX5E_MAX_NUM_TC];
/* control */
struct mlx5e_priv *priv;
@@ -324,20 +413,24 @@ struct mlx5e_channel {
};
enum mlx5e_traffic_types {
- MLX5E_TT_IPV4_TCP = 0,
- MLX5E_TT_IPV6_TCP = 1,
- MLX5E_TT_IPV4_UDP = 2,
- MLX5E_TT_IPV6_UDP = 3,
- MLX5E_TT_IPV4 = 4,
- MLX5E_TT_IPV6 = 5,
- MLX5E_TT_ANY = 6,
- MLX5E_NUM_TT = 7,
+ MLX5E_TT_IPV4_TCP,
+ MLX5E_TT_IPV6_TCP,
+ MLX5E_TT_IPV4_UDP,
+ MLX5E_TT_IPV6_UDP,
+ MLX5E_TT_IPV4_IPSEC_AH,
+ MLX5E_TT_IPV6_IPSEC_AH,
+ MLX5E_TT_IPV4_IPSEC_ESP,
+ MLX5E_TT_IPV6_IPSEC_ESP,
+ MLX5E_TT_IPV4,
+ MLX5E_TT_IPV6,
+ MLX5E_TT_ANY,
+ MLX5E_NUM_TT,
};
-enum {
- MLX5E_RQT_SPREADING = 0,
- MLX5E_RQT_DEFAULT_RQ = 1,
- MLX5E_NUM_RQT = 2,
+enum mlx5e_rqt_ix {
+ MLX5E_INDIRECTION_RQT,
+ MLX5E_SINGLE_RQ_RQT,
+ MLX5E_NUM_RQT,
};
struct mlx5e_eth_addr_info {
@@ -362,10 +455,10 @@ struct mlx5e_eth_addr_db {
enum {
MLX5E_STATE_ASYNC_EVENTS_ENABLE,
MLX5E_STATE_OPENED,
+ MLX5E_STATE_DESTROYING,
};
struct mlx5e_vlan_db {
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u32 active_vlans_ft_ix[VLAN_N_VID];
u32 untagged_rule_ft_ix;
u32 any_vlan_rule_ft_ix;
@@ -379,9 +472,9 @@ struct mlx5e_flow_table {
struct mlx5e_priv {
/* priv data path fields - start */
- int num_tc;
int default_vlan_prio;
struct mlx5e_sq **txq_to_sq_map;
+ int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
/* priv data path fields - end */
unsigned long state;
@@ -390,10 +483,11 @@ struct mlx5e_priv {
u32 pdn;
u32 tdn;
struct mlx5_core_mr mr;
+ struct mlx5e_rq drop_rq;
struct mlx5e_channel **channel;
u32 tisn[MLX5E_MAX_NUM_TC];
- u32 rqtn;
+ u32 rqtn[MLX5E_NUM_RQT];
u32 tirn[MLX5E_NUM_TT];
struct mlx5e_flow_table ft;
@@ -470,10 +564,9 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
void mlx5e_update_stats(struct mlx5e_priv *priv);
-int mlx5e_open_flow_table(struct mlx5e_priv *priv);
-void mlx5e_close_flow_table(struct mlx5e_priv *priv);
+int mlx5e_create_flow_tables(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv);
void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
-void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -482,17 +575,17 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
-int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
-void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
- struct mlx5e_params *new_params);
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
- struct mlx5e_tx_wqe *wqe)
+ struct mlx5e_tx_wqe *wqe, int bf_sz)
{
+ u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
+
/* ensure wqe is visible to device before updating doorbell record */
dma_wmb();
@@ -503,9 +596,15 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
*/
wmb();
- mlx5_write64((__be32 *)&wqe->ctrl,
- sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
- NULL);
+ if (bf_sz) {
+ __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
+
+ /* flush the write-combining mapped buffer */
+ wmb();
+
+ } else {
+ mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
+ }
sq->bf_offset ^= sq->bf_buf_size;
}
@@ -518,4 +617,11 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
}
+static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
+{
+ return min_t(int, mdev->priv.eq_table.num_comp_vectors,
+ MLX5E_MAX_NUM_CHANNELS);
+}
+
extern const struct ethtool_ops mlx5e_ethtool_ops;
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 388938482ff9..2e022e900939 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -171,9 +171,9 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
switch (sset) {
case ETH_SS_STATS:
- return NUM_VPORT_COUNTERS +
+ return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
priv->params.num_channels * NUM_RQ_STATS +
- priv->params.num_channels * priv->num_tc *
+ priv->params.num_channels * priv->params.num_tc *
NUM_SQ_STATS;
/* fallthrough */
default:
@@ -200,6 +200,11 @@ static void mlx5e_get_strings(struct net_device *dev,
strcpy(data + (idx++) * ETH_GSTRING_LEN,
vport_strings[i]);
+ /* PPORT counters */
+ for (i = 0; i < NUM_PPORT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_strings[i]);
+
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
@@ -207,7 +212,7 @@ static void mlx5e_get_strings(struct net_device *dev,
"rx%d_%s", i, rq_stats_strings[j]);
for (i = 0; i < priv->params.num_channels; i++)
- for (tc = 0; tc < priv->num_tc; tc++)
+ for (tc = 0; tc < priv->params.num_tc; tc++)
for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data +
(idx++) * ETH_GSTRING_LEN,
@@ -234,6 +239,9 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
data[idx++] = ((u64 *)&priv->stats.vport)[i];
+ for (i = 0; i < NUM_PPORT_COUNTERS; i++)
+ data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]);
+
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
@@ -242,7 +250,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
((u64 *)&priv->channel[i]->rq.stats)[j];
for (i = 0; i < priv->params.num_channels; i++)
- for (tc = 0; tc < priv->num_tc; tc++)
+ for (tc = 0; tc < priv->params.num_tc; tc++)
for (j = 0; j < NUM_SQ_STATS; j++)
data[idx++] = !test_bit(MLX5E_STATE_OPENED,
&priv->state) ? 0 :
@@ -264,7 +272,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_params new_params;
+ bool was_opened;
u16 min_rx_wqes;
u8 log_rq_size;
u8 log_sq_size;
@@ -316,11 +324,18 @@ static int mlx5e_set_ringparam(struct net_device *dev,
return 0;
mutex_lock(&priv->state_lock);
- new_params = priv->params;
- new_params.log_rq_size = log_rq_size;
- new_params.log_sq_size = log_sq_size;
- new_params.min_rx_wqes = min_rx_wqes;
- err = mlx5e_update_priv_params(priv, &new_params);
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(dev);
+
+ priv->params.log_rq_size = log_rq_size;
+ priv->params.log_sq_size = log_sq_size;
+ priv->params.min_rx_wqes = min_rx_wqes;
+
+ if (was_opened)
+ err = mlx5e_open_locked(dev);
+
mutex_unlock(&priv->state_lock);
return err;
@@ -330,9 +345,8 @@ static void mlx5e_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
- ch->max_combined = ncv;
+ ch->max_combined = mlx5e_get_max_num_channels(priv->mdev);
ch->combined_count = priv->params.num_channels;
}
@@ -340,9 +354,9 @@ static int mlx5e_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+ int ncv = mlx5e_get_max_num_channels(priv->mdev);
unsigned int count = ch->combined_count;
- struct mlx5e_params new_params;
+ bool was_opened;
int err = 0;
if (!count) {
@@ -365,9 +379,16 @@ static int mlx5e_set_channels(struct net_device *dev,
return 0;
mutex_lock(&priv->state_lock);
- new_params = priv->params;
- new_params.num_channels = count;
- err = mlx5e_update_priv_params(priv, &new_params);
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(dev);
+
+ priv->params.num_channels = count;
+
+ if (was_opened)
+ err = mlx5e_open_locked(dev);
+
mutex_unlock(&priv->state_lock);
return err;
@@ -606,7 +627,7 @@ static int mlx5e_set_settings(struct net_device *netdev,
u32 link_modes;
u32 speed;
u32 eth_proto_cap, eth_proto_admin;
- u8 port_status;
+ enum mlx5_port_status ps;
int err;
speed = ethtool_cmd_speed(cmd);
@@ -640,25 +661,197 @@ static int mlx5e_set_settings(struct net_device *netdev,
if (link_modes == eth_proto_admin)
goto out;
- err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+ mlx5_query_port_admin_status(mdev, &ps);
+ if (ps == MLX5_PORT_UP)
+ mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
+ mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+ if (ps == MLX5_PORT_UP)
+ mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
+
+out:
+ return err;
+}
+
+static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return sizeof(priv->params.toeplitz_hash_key);
+}
+
+static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return MLX5E_INDIR_RQT_SIZE;
+}
+
+static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ if (indir)
+ memcpy(indir, priv->params.indirection_rqt,
+ sizeof(priv->params.indirection_rqt));
+
+ if (key)
+ memcpy(key, priv->params.toeplitz_hash_key,
+ sizeof(priv->params.toeplitz_hash_key));
+
+ if (hfunc)
+ *hfunc = priv->params.rss_hfunc;
+
+ return 0;
+}
+
+static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ bool close_open;
+ int err = 0;
+
+ if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
+ (hfunc != ETH_RSS_HASH_XOR) &&
+ (hfunc != ETH_RSS_HASH_TOP))
+ return -EINVAL;
+
+ mutex_lock(&priv->state_lock);
+
+ if (indir) {
+ memcpy(priv->params.indirection_rqt, indir,
+ sizeof(priv->params.indirection_rqt));
+ mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
+ }
+
+ close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) &&
+ test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (close_open)
+ mlx5e_close_locked(dev);
+
+ if (key)
+ memcpy(priv->params.toeplitz_hash_key, key,
+ sizeof(priv->params.toeplitz_hash_key));
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE)
+ priv->params.rss_hfunc = hfunc;
+
+ if (close_open)
+ err = mlx5e_open_locked(priv->netdev);
+
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static int mlx5e_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = priv->params.num_channels;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int mlx5e_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ void *data)
+{
+ const struct mlx5e_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_TX_COPYBREAK:
+ *(u32 *)data = priv->params.tx_max_inline;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int mlx5e_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ bool was_opened;
+ u32 val;
+ int err = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_TX_COPYBREAK:
+ val = *(u32 *)data;
+ if (val > mlx5e_get_max_inline_cap(mdev)) {
+ err = -EINVAL;
+ break;
+ }
+
+ mutex_lock(&priv->state_lock);
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(dev);
+
+ priv->params.tx_max_inline = val;
+
+ if (was_opened)
+ err = mlx5e_open_locked(dev);
+
+ mutex_unlock(&priv->state_lock);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static void mlx5e_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
+ err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause,
+ &pauseparam->tx_pause);
if (err) {
- netdev_err(netdev, "%s: set port eth proto admin failed: %d\n",
+ netdev_err(netdev, "%s: mlx5_query_port_pause failed:0x%x\n",
__func__, err);
- goto out;
}
+}
- err = mlx5_query_port_status(mdev, &port_status);
- if (err)
- goto out;
+static int mlx5e_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
- if (port_status == MLX5_PORT_DOWN)
- return 0;
+ if (pauseparam->autoneg)
+ return -EINVAL;
+
+ err = mlx5_set_port_pause(mdev,
+ pauseparam->rx_pause ? 1 : 0,
+ pauseparam->tx_pause ? 1 : 0);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_set_port_pause failed:0x%x\n",
+ __func__, err);
+ }
- err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
- if (err)
- goto out;
- err = mlx5_set_port_status(mdev, MLX5_PORT_UP);
-out:
return err;
}
@@ -676,4 +869,13 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_coalesce = mlx5e_set_coalesce,
.get_settings = mlx5e_get_settings,
.set_settings = mlx5e_set_settings,
+ .get_rxfh_key_size = mlx5e_get_rxfh_key_size,
+ .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
+ .get_rxfh = mlx5e_get_rxfh,
+ .set_rxfh = mlx5e_set_rxfh,
+ .get_rxnfc = mlx5e_get_rxnfc,
+ .get_tunable = mlx5e_get_tunable,
+ .set_tunable = mlx5e_set_tunable,
+ .get_pauseparam = mlx5e_get_pauseparam,
+ .set_pauseparam = mlx5e_set_pauseparam,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
index 120db80c47aa..22d603f78273 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -105,25 +105,41 @@ static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
{
void *ft = priv->ft.main;
- if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
+ mlx5_del_flow_table_entry(ft,
+ ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);
+
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
+ mlx5_del_flow_table_entry(ft,
+ ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);
+
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
+ mlx5_del_flow_table_entry(ft,
+ ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);
+
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
+ mlx5_del_flow_table_entry(ft,
+ ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);
+
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
- if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+ if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
- if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+ if (ai->tt_vec & BIT(MLX5E_TT_ANY))
mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
}
@@ -156,33 +172,37 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
switch (eth_addr_type) {
case MLX5E_UC:
ret =
- (1 << MLX5E_TT_IPV4_TCP) |
- (1 << MLX5E_TT_IPV6_TCP) |
- (1 << MLX5E_TT_IPV4_UDP) |
- (1 << MLX5E_TT_IPV6_UDP) |
- (1 << MLX5E_TT_IPV4) |
- (1 << MLX5E_TT_IPV6) |
- (1 << MLX5E_TT_ANY) |
+ BIT(MLX5E_TT_IPV4_TCP) |
+ BIT(MLX5E_TT_IPV6_TCP) |
+ BIT(MLX5E_TT_IPV4_UDP) |
+ BIT(MLX5E_TT_IPV6_UDP) |
+ BIT(MLX5E_TT_IPV4_IPSEC_AH) |
+ BIT(MLX5E_TT_IPV6_IPSEC_AH) |
+ BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+ BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+ BIT(MLX5E_TT_IPV4) |
+ BIT(MLX5E_TT_IPV6) |
+ BIT(MLX5E_TT_ANY) |
0;
break;
case MLX5E_MC_IPV4:
ret =
- (1 << MLX5E_TT_IPV4_UDP) |
- (1 << MLX5E_TT_IPV4) |
+ BIT(MLX5E_TT_IPV4_UDP) |
+ BIT(MLX5E_TT_IPV4) |
0;
break;
case MLX5E_MC_IPV6:
ret =
- (1 << MLX5E_TT_IPV6_UDP) |
- (1 << MLX5E_TT_IPV6) |
+ BIT(MLX5E_TT_IPV6_UDP) |
+ BIT(MLX5E_TT_IPV6) |
0;
break;
case MLX5E_MC_OTHER:
ret =
- (1 << MLX5E_TT_ANY) |
+ BIT(MLX5E_TT_ANY) |
0;
break;
}
@@ -191,23 +211,27 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
case MLX5E_ALLMULTI:
ret =
- (1 << MLX5E_TT_IPV4_UDP) |
- (1 << MLX5E_TT_IPV6_UDP) |
- (1 << MLX5E_TT_IPV4) |
- (1 << MLX5E_TT_IPV6) |
- (1 << MLX5E_TT_ANY) |
+ BIT(MLX5E_TT_IPV4_UDP) |
+ BIT(MLX5E_TT_IPV6_UDP) |
+ BIT(MLX5E_TT_IPV4) |
+ BIT(MLX5E_TT_IPV6) |
+ BIT(MLX5E_TT_ANY) |
0;
break;
default: /* MLX5E_PROMISC */
ret =
- (1 << MLX5E_TT_IPV4_TCP) |
- (1 << MLX5E_TT_IPV6_TCP) |
- (1 << MLX5E_TT_IPV4_UDP) |
- (1 << MLX5E_TT_IPV6_UDP) |
- (1 << MLX5E_TT_IPV4) |
- (1 << MLX5E_TT_IPV6) |
- (1 << MLX5E_TT_ANY) |
+ BIT(MLX5E_TT_IPV4_TCP) |
+ BIT(MLX5E_TT_IPV6_TCP) |
+ BIT(MLX5E_TT_IPV4_UDP) |
+ BIT(MLX5E_TT_IPV6_UDP) |
+ BIT(MLX5E_TT_IPV4_IPSEC_AH) |
+ BIT(MLX5E_TT_IPV6_IPSEC_AH) |
+ BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+ BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+ BIT(MLX5E_TT_IPV4) |
+ BIT(MLX5E_TT_IPV6) |
+ BIT(MLX5E_TT_ANY) |
0;
break;
}
@@ -226,6 +250,7 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
u8 *match_criteria_dmac;
void *ft = priv->ft.main;
u32 *tirn = priv->tirn;
+ u32 *ft_ix;
u32 tt_vec;
int err;
@@ -261,51 +286,51 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
tt_vec = mlx5e_get_tt_vec(ai, type);
- if (tt_vec & (1 << MLX5E_TT_ANY)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
+ if (tt_vec & BIT(MLX5E_TT_ANY)) {
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_ANY]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_ANY]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_ANY);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_ANY);
}
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
outer_headers.ethertype);
- if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
+ if (tt_vec & BIT(MLX5E_TT_IPV4)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IP);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV4]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV4]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4);
}
- if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
+ if (tt_vec & BIT(MLX5E_TT_IPV6)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IPV6);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV6]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV6]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6);
}
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
@@ -313,70 +338,141 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
IPPROTO_UDP);
- if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
+ if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IP);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV4_UDP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
}
- if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
+ if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IPV6);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV6_UDP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
}
MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
IPPROTO_TCP);
- if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
+ if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IP);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV4_TCP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
}
- if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
+ if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
ETH_P_IPV6);
MLX5_SET(dest_format_struct, dest, destination_id,
tirn[MLX5E_TT_IPV6_TCP]);
err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
match_criteria, flow_context,
- &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
- if (err) {
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
- return err;
- }
- ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
+ }
+
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_AH);
+
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
+ if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_IPSEC_AH]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
+ }
+
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
+ if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_IPSEC_AH]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
+ }
+
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_ESP);
+
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
+ if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
+ }
+
+ ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
+ if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ ft_ix);
+ if (err)
+ goto err_del_ai;
+
+ ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
}
return 0;
+
+err_del_ai:
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+
+ return err;
}
static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
@@ -498,44 +594,32 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
{
- WARN_ON(!mutex_is_locked(&priv->state_lock));
+ if (!priv->vlan.filter_disabled)
+ return;
- if (priv->vlan.filter_disabled) {
- priv->vlan.filter_disabled = false;
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
- 0);
- }
+ priv->vlan.filter_disabled = false;
+ if (priv->netdev->flags & IFF_PROMISC)
+ return;
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
}
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
{
- WARN_ON(!mutex_is_locked(&priv->state_lock));
+ if (priv->vlan.filter_disabled)
+ return;
- if (!priv->vlan.filter_disabled) {
- priv->vlan.filter_disabled = true;
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
- 0);
- }
+ priv->vlan.filter_disabled = true;
+ if (priv->netdev->flags & IFF_PROMISC)
+ return;
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
}
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int err = 0;
- mutex_lock(&priv->state_lock);
-
- set_bit(vid, priv->vlan.active_vlans);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
- vid);
-
- mutex_unlock(&priv->state_lock);
-
- return err;
+ return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
}
int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -543,56 +627,11 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- mutex_lock(&priv->state_lock);
-
- clear_bit(vid, priv->vlan.active_vlans);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-
- mutex_unlock(&priv->state_lock);
-
- return 0;
-}
-
-int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
-{
- u16 vid;
- int err;
-
- for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
- vid);
- if (err)
- return err;
- }
-
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- if (err)
- return err;
-
- if (priv->vlan.filter_disabled) {
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
- 0);
- if (err)
- return err;
- }
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
return 0;
}
-void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
-{
- u16 vid;
-
- if (priv->vlan.filter_disabled)
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
-
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-
- for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-}
-
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
@@ -656,18 +695,21 @@ static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
hn->action = MLX5E_ACTION_DEL;
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
mlx5e_sync_netdev_addr(priv);
mlx5e_apply_netdev_addr(priv);
}
-void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
+void mlx5e_set_rx_mode_work(struct work_struct *work)
{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ set_rx_mode_work);
+
struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
struct net_device *ndev = priv->netdev;
- bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
bool broadcast_enabled = rx_mode_enable;
@@ -679,8 +721,12 @@ void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
- if (enable_promisc)
+ if (enable_promisc) {
mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
+ if (!priv->vlan.filter_disabled)
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ }
if (enable_allmulti)
mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
if (enable_broadcast)
@@ -692,25 +738,18 @@ void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
if (disable_allmulti)
mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
- if (disable_promisc)
+ if (disable_promisc) {
+ if (!priv->vlan.filter_disabled)
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+ }
ea->promisc_enabled = promisc_enabled;
ea->allmulti_enabled = allmulti_enabled;
ea->broadcast_enabled = broadcast_enabled;
}
-void mlx5e_set_rx_mode_work(struct work_struct *work)
-{
- struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
- set_rx_mode_work);
-
- mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_set_rx_mode_core(priv);
- mutex_unlock(&priv->state_lock);
-}
-
void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
{
ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
@@ -725,7 +764,7 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
if (!g)
return -ENOMEM;
- g[0].log_sz = 2;
+ g[0].log_sz = 3;
g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
outer_headers.ethertype);
@@ -833,7 +872,7 @@ static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
mlx5_destroy_flow_table(priv->ft.vlan);
}
-int mlx5e_open_flow_table(struct mlx5e_priv *priv)
+int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
{
int err;
@@ -845,16 +884,24 @@ int mlx5e_open_flow_table(struct mlx5e_priv *priv)
if (err)
goto err_destroy_main_flow_table;
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ if (err)
+ goto err_destroy_vlan_flow_table;
+
return 0;
+err_destroy_vlan_flow_table:
+ mlx5e_destroy_vlan_flow_table(priv);
+
err_destroy_main_flow_table:
mlx5e_destroy_main_flow_table(priv);
return err;
}
-void mlx5e_close_flow_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
{
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
mlx5e_destroy_vlan_flow_table(priv);
mlx5e_destroy_main_flow_table(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 40206da1f9d7..5fc4d2d78cdf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -41,6 +41,7 @@ struct mlx5e_rq_param {
struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
+ u16 max_inline;
};
struct mlx5e_cq_param {
@@ -81,6 +82,47 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
+static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_pport_stats *s = &priv->stats.pport;
+ u32 *in;
+ u32 *out;
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ in = mlx5_vzalloc(sz);
+ out = mlx5_vzalloc(sz);
+ if (!in || !out)
+ goto free_out;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+ memcpy(s->IEEE_802_3_counters,
+ MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+ sizeof(s->IEEE_802_3_counters));
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+ memcpy(s->RFC_2863_counters,
+ MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+ sizeof(s->RFC_2863_counters));
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+ memcpy(s->RFC_2819_counters,
+ MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+ sizeof(s->RFC_2819_counters));
+
+free_out:
+ kvfree(in);
+ kvfree(out);
+}
+
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -107,6 +149,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->lro_packets = 0;
s->lro_bytes = 0;
s->rx_csum_none = 0;
+ s->rx_csum_sw = 0;
s->rx_wqe_err = 0;
for (i = 0; i < priv->params.num_channels; i++) {
rq_stats = &priv->channel[i]->rq.stats;
@@ -114,9 +157,10 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->lro_packets += rq_stats->lro_packets;
s->lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
+ s->rx_csum_sw += rq_stats->csum_sw;
s->rx_wqe_err += rq_stats->wqe_err;
- for (j = 0; j < priv->num_tc; j++) {
+ for (j = 0; j < priv->params.num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
s->tso_packets += sq_stats->tso_packets;
@@ -199,8 +243,10 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
/* Update calculated offload counters */
s->tx_csum_offload = s->tx_packets - tx_offload_none;
- s->rx_csum_good = s->rx_packets - s->rx_csum_none;
+ s->rx_csum_good = s->rx_packets - s->rx_csum_none -
+ s->rx_csum_sw;
+ mlx5e_update_pport_counters(priv);
free_out:
kvfree(out);
}
@@ -272,6 +318,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
int err;
int i;
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
+
err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
&rq->wq_ctrl);
if (err)
@@ -304,6 +352,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->netdev = c->netdev;
rq->channel = c;
rq->ix = c->ix;
+ rq->priv = c->priv;
return 0;
@@ -321,8 +370,7 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
{
- struct mlx5e_channel *c = rq->channel;
- struct mlx5e_priv *priv = c->priv;
+ struct mlx5e_priv *priv = rq->priv;
struct mlx5_core_dev *mdev = priv->mdev;
void *in;
@@ -342,11 +390,11 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
memcpy(rqc, param->rqc, sizeof(param->rqc));
- MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
+ MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
- PAGE_SHIFT);
+ MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
mlx5_fill_page_array(&rq->wq_ctrl.buf,
@@ -389,21 +437,17 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
static void mlx5e_disable_rq(struct mlx5e_rq *rq)
{
- struct mlx5e_channel *c = rq->channel;
- struct mlx5e_priv *priv = c->priv;
- struct mlx5_core_dev *mdev = priv->mdev;
-
- mlx5_core_destroy_rq(mdev, rq->rqn);
+ mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
}
static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
{
+ unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
struct mlx5e_channel *c = rq->channel;
struct mlx5e_priv *priv = c->priv;
struct mlx5_wq_ll *wq = &rq->wq;
- int i;
- for (i = 0; i < 1000; i++) {
+ while (time_before(jiffies, exp_time)) {
if (wq->cur_sz >= priv->params.min_rx_wqes)
return 0;
@@ -502,6 +546,8 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
if (err)
return err;
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
+
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
&sq->wq_ctrl);
if (err)
@@ -509,7 +555,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
sq->uar_map = sq->uar.map;
+ sq->uar_bf_map = sq->uar.bf_map;
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+ sq->max_inline = param->max_inline;
err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
if (err)
@@ -518,11 +566,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
txq_ix = c->ix + tc * priv->params.num_channels;
sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
- sq->pdev = c->pdev;
- sq->mkey_be = c->mkey_be;
- sq->channel = c;
- sq->tc = tc;
- sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+ sq->pdev = c->pdev;
+ sq->mkey_be = c->mkey_be;
+ sq->channel = c;
+ sq->tc = tc;
+ sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+ sq->bf_budget = MLX5E_SQ_BF_BUDGET;
priv->txq_to_sq_map[txq_ix] = sq;
return 0;
@@ -569,7 +618,6 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
memcpy(sqc, param->sqc, sizeof(param->sqc));
- MLX5_SET(sqc, sqc, user_index, sq->tc);
MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
@@ -579,7 +627,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, uar_page, sq->uar.index);
MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
- PAGE_SHIFT);
+ MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
mlx5_fill_page_array(&sq->wq_ctrl.buf,
@@ -702,7 +750,8 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
int err;
u32 i;
- param->wq.numa = cpu_to_node(c->cpu);
+ param->wq.buf_numa_node = cpu_to_node(c->cpu);
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
param->eq_ix = c->ix;
err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
@@ -732,6 +781,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
}
cq->channel = c;
+ cq->priv = priv;
return 0;
}
@@ -743,8 +793,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
{
- struct mlx5e_channel *c = cq->channel;
- struct mlx5e_priv *priv = c->priv;
+ struct mlx5e_priv *priv = cq->priv;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
@@ -773,7 +822,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
- PAGE_SHIFT);
+ MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
err = mlx5_core_create_cq(mdev, mcq, in, inlen);
@@ -790,8 +839,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
static void mlx5e_disable_cq(struct mlx5e_cq *cq)
{
- struct mlx5e_channel *c = cq->channel;
- struct mlx5e_priv *priv = c->priv;
+ struct mlx5e_priv *priv = cq->priv;
struct mlx5_core_dev *mdev = priv->mdev;
mlx5_core_destroy_cq(mdev, &cq->mcq);
@@ -901,13 +949,13 @@ static void mlx5e_close_sqs(struct mlx5e_channel *c)
mlx5e_close_sq(&c->sq[tc]);
}
-static void mlx5e_build_tc_to_txq_map(struct mlx5e_channel *c,
- int num_channels)
+static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
{
int i;
for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
- c->tc_to_txq_map[i] = c->ix + i * num_channels;
+ priv->channeltc_to_txq_map[ix][i] =
+ ix + i * priv->params.num_channels;
}
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
@@ -929,9 +977,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->pdev = &priv->mdev->pdev->dev;
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mr.key);
- c->num_tc = priv->num_tc;
+ c->num_tc = priv->params.num_tc;
- mlx5e_build_tc_to_txq_map(c, priv->params.num_channels);
+ mlx5e_build_channeltc_to_txq_map(priv, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
@@ -1000,7 +1048,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
MLX5_SET(wq, wq, pd, priv->pdn);
- param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->wq.linear = 1;
}
@@ -1014,7 +1062,8 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->pdn);
- param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+ param->max_inline = priv->params.tx_max_inline;
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1059,27 +1108,28 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
static int mlx5e_open_channels(struct mlx5e_priv *priv)
{
struct mlx5e_channel_param cparam;
+ int nch = priv->params.num_channels;
int err = -ENOMEM;
int i;
int j;
- priv->channel = kcalloc(priv->params.num_channels,
- sizeof(struct mlx5e_channel *), GFP_KERNEL);
+ priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
+ GFP_KERNEL);
- priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc,
+ priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
sizeof(struct mlx5e_sq *), GFP_KERNEL);
if (!priv->channel || !priv->txq_to_sq_map)
goto err_free_txq_to_sq_map;
mlx5e_build_channel_param(priv, &cparam);
- for (i = 0; i < priv->params.num_channels; i++) {
+ for (i = 0; i < nch; i++) {
err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
if (err)
goto err_close_channels;
}
- for (j = 0; j < priv->params.num_channels; j++) {
+ for (j = 0; j < nch; j++) {
err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
if (err)
goto err_close_channels;
@@ -1109,67 +1159,73 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
kfree(priv->channel);
}
-static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
+static int mlx5e_rx_hash_fn(int hfunc)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 in[MLX5_ST_SZ_DW(create_tis_in)];
- void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ return (hfunc == ETH_RSS_HASH_TOP) ?
+ MLX5_RX_HASH_FN_TOEPLITZ :
+ MLX5_RX_HASH_FN_INVERTED_XOR8;
+}
- memset(in, 0, sizeof(in));
+static int mlx5e_bits_invert(unsigned long a, int size)
+{
+ int inv = 0;
+ int i;
- MLX5_SET(tisc, tisc, prio, tc);
- MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+ for (i = 0; i < size; i++)
+ inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
- return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+ return inv;
}
-static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
+static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
{
- mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
-}
+ int i;
-static int mlx5e_open_tises(struct mlx5e_priv *priv)
-{
- int num_tc = priv->num_tc;
- int err;
- int tc;
+ for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
+ int ix = i;
- for (tc = 0; tc < num_tc; tc++) {
- err = mlx5e_open_tis(priv, tc);
- if (err)
- goto err_close_tises;
+ if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
+ ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
+
+ ix = priv->params.indirection_rqt[ix];
+ ix = ix % priv->params.num_channels;
+ MLX5_SET(rqtc, rqtc, rq_num[i],
+ test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+ priv->channel[ix]->rq.rqn :
+ priv->drop_rq.rqn);
}
+}
- return 0;
+static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
+ enum mlx5e_rqt_ix rqt_ix)
+{
-err_close_tises:
- for (tc--; tc >= 0; tc--)
- mlx5e_close_tis(priv, tc);
+ switch (rqt_ix) {
+ case MLX5E_INDIRECTION_RQT:
+ mlx5e_fill_indir_rqt_rqns(priv, rqtc);
- return err;
-}
+ break;
-static void mlx5e_close_tises(struct mlx5e_priv *priv)
-{
- int num_tc = priv->num_tc;
- int tc;
+ default: /* MLX5E_SINGLE_RQ_RQT */
+ MLX5_SET(rqtc, rqtc, rq_num[0],
+ test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+ priv->channel[0]->rq.rqn :
+ priv->drop_rq.rqn);
- for (tc = 0; tc < num_tc; tc++)
- mlx5e_close_tis(priv, tc);
+ break;
+ }
}
-static int mlx5e_open_rqt(struct mlx5e_priv *priv)
+static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 *in;
- u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
void *rqtc;
int inlen;
- int err;
int sz;
- int i;
+ int err;
- sz = 1 << priv->params.rx_hash_log_tbl_sz;
+ sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
@@ -1181,198 +1237,101 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv)
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
- for (i = 0; i < sz; i++) {
- int ix = i % priv->params.num_channels;
-
- MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
- }
-
- MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+ mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
- if (!err)
- priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+ err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
kvfree(in);
return err;
}
-static void mlx5e_close_rqt(struct mlx5e_priv *priv)
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
- u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ void *rqtc;
+ int inlen;
+ int sz;
+ int err;
- memset(in, 0, sizeof(in));
+ sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
- MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
- MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
- mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
- sizeof(out));
-}
+ rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
-static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
-{
- void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
- MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+ mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
-#define ROUGH_MAX_L2_L3_HDR_SZ 256
+ MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
-#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP |\
- MLX5_HASH_FIELD_SEL_L4_SPORT |\
- MLX5_HASH_FIELD_SEL_L4_DPORT)
-
- if (priv->params.lro_en) {
- MLX5_SET(tirc, tirc, lro_enable_mask,
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
- MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
- (priv->params.lro_wqe_sz -
- ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
- MLX5_CAP_ETH(priv->mdev,
- lro_timer_supported_periods[3]));
- }
+ err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
- switch (tt) {
- case MLX5E_TT_ANY:
- MLX5_SET(tirc, tirc, disp_type,
- MLX5_TIRC_DISP_TYPE_DIRECT);
- MLX5_SET(tirc, tirc, inline_rqn,
- priv->channel[0]->rq.rqn);
- break;
- default:
- MLX5_SET(tirc, tirc, disp_type,
- MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, indirect_table,
- priv->rqtn);
- MLX5_SET(tirc, tirc, rx_hash_fn,
- MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
- netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
- rx_hash_toeplitz_key),
- MLX5_FLD_SZ_BYTES(tirc,
- rx_hash_toeplitz_key));
- break;
- }
+ kvfree(in);
- switch (tt) {
- case MLX5E_TT_IPV4_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_ALL);
- break;
+ return err;
+}
- case MLX5E_TT_IPV6_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_ALL);
- break;
+static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+{
+ mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
+}
- case MLX5E_TT_IPV4_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_ALL);
- break;
+static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
+{
+ mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
+ mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+}
- case MLX5E_TT_IPV6_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_ALL);
- break;
+static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
+{
+ if (!priv->params.lro_en)
+ return;
- case MLX5E_TT_IPV4:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
+#define ROUGH_MAX_L2_L3_HDR_SZ 256
- case MLX5E_TT_IPV6:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
- }
+ MLX5_SET(tirc, tirc, lro_enable_mask,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+ MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+ (priv->params.lro_wqe_sz -
+ ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+ MLX5_CAP_ETH(priv->mdev,
+ lro_timer_supported_periods[2]));
}
-static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
+static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u32 *in;
+
+ void *in;
void *tirc;
int inlen;
int err;
- inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
+ tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
- mlx5e_build_tir_ctx(priv, tirc, tt);
+ mlx5e_build_tir_ctx_lro(tirc, priv);
- err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+ err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
kvfree(in);
return err;
}
-static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
-{
- mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
-}
-
-static int mlx5e_open_tirs(struct mlx5e_priv *priv)
-{
- int err;
- int i;
-
- for (i = 0; i < MLX5E_NUM_TT; i++) {
- err = mlx5e_open_tir(priv, i);
- if (err)
- goto err_close_tirs;
- }
-
- return 0;
-
-err_close_tirs:
- for (i--; i >= 0; i--)
- mlx5e_close_tir(priv, i);
-
- return err;
-}
-
-static void mlx5e_close_tirs(struct mlx5e_priv *priv)
-{
- int i;
-
- for (i = 0; i < MLX5E_NUM_TT; i++)
- mlx5e_close_tir(priv, i);
-}
-
static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1400,81 +1359,32 @@ int mlx5e_open_locked(struct net_device *netdev)
int num_txqs;
int err;
+ set_bit(MLX5E_STATE_OPENED, &priv->state);
+
num_txqs = priv->params.num_channels * priv->params.num_tc;
netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
err = mlx5e_set_dev_port_mtu(netdev);
if (err)
- return err;
-
- err = mlx5e_open_tises(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
- __func__, err);
- return err;
- }
+ goto err_clear_state_opened_flag;
err = mlx5e_open_channels(priv);
if (err) {
netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
__func__, err);
- goto err_close_tises;
- }
-
- err = mlx5e_open_rqt(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n",
- __func__, err);
- goto err_close_channels;
- }
-
- err = mlx5e_open_tirs(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
- __func__, err);
- goto err_close_rqls;
- }
-
- err = mlx5e_open_flow_table(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
- __func__, err);
- goto err_close_tirs;
- }
-
- err = mlx5e_add_all_vlan_rules(priv);
- if (err) {
- netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
- __func__, err);
- goto err_close_flow_table;
+ goto err_clear_state_opened_flag;
}
- mlx5e_init_eth_addr(priv);
-
- set_bit(MLX5E_STATE_OPENED, &priv->state);
-
mlx5e_update_carrier(priv);
- mlx5e_set_rx_mode_core(priv);
+ mlx5e_redirect_rqts(priv);
schedule_delayed_work(&priv->update_stats_work, 0);
- return 0;
-err_close_flow_table:
- mlx5e_close_flow_table(priv);
-
-err_close_tirs:
- mlx5e_close_tirs(priv);
-
-err_close_rqls:
- mlx5e_close_rqt(priv);
-
-err_close_channels:
- mlx5e_close_channels(priv);
-
-err_close_tises:
- mlx5e_close_tises(priv);
+ return 0;
+err_clear_state_opened_flag:
+ clear_bit(MLX5E_STATE_OPENED, &priv->state);
return err;
}
@@ -1494,16 +1404,17 @@ int mlx5e_close_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ /* May already be CLOSED in case a previous configuration operation
+ * (e.g RX/TX queue size change) that involves close&open failed.
+ */
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
clear_bit(MLX5E_STATE_OPENED, &priv->state);
- mlx5e_set_rx_mode_core(priv);
- mlx5e_del_all_vlan_rules(priv);
+ mlx5e_redirect_rqts(priv);
netif_carrier_off(priv->netdev);
- mlx5e_close_flow_table(priv);
- mlx5e_close_tirs(priv);
- mlx5e_close_rqt(priv);
mlx5e_close_channels(priv);
- mlx5e_close_tises(priv);
return 0;
}
@@ -1520,26 +1431,341 @@ static int mlx5e_close(struct net_device *netdev)
return err;
}
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
- struct mlx5e_params *new_params)
+static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
+ struct mlx5e_rq *rq,
+ struct mlx5e_rq_param *param)
{
- int err = 0;
- int was_opened;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ void *rqc = param->rqc;
+ void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ int err;
- WARN_ON(!mutex_is_locked(&priv->state_lock));
+ param->wq.db_numa_node = param->wq.buf_numa_node;
- was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
- mlx5e_close_locked(priv->netdev);
+ err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+ &rq->wq_ctrl);
+ if (err)
+ return err;
- priv->params = *new_params;
+ rq->priv = priv;
- if (was_opened)
- err = mlx5e_open_locked(priv->netdev);
+ return 0;
+}
+
+static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
+ struct mlx5e_cq *cq,
+ struct mlx5e_cq_param *param)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ int eqn_not_used;
+ int irqn;
+ int err;
+
+ err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
+ &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+ *mcq->set_ci_db = 0;
+ *mcq->arm_db = 0;
+ mcq->vector = param->eq_ix;
+ mcq->comp = mlx5e_completion_event;
+ mcq->event = mlx5e_cq_error_event;
+ mcq->irqn = irqn;
+ mcq->uar = &priv->cq_uar;
+
+ cq->priv = priv;
+
+ return 0;
+}
+
+static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
+{
+ struct mlx5e_cq_param cq_param;
+ struct mlx5e_rq_param rq_param;
+ struct mlx5e_rq *rq = &priv->drop_rq;
+ struct mlx5e_cq *cq = &priv->drop_rq.cq;
+ int err;
+
+ memset(&cq_param, 0, sizeof(cq_param));
+ memset(&rq_param, 0, sizeof(rq_param));
+ mlx5e_build_rx_cq_param(priv, &cq_param);
+ mlx5e_build_rq_param(priv, &rq_param);
+
+ err = mlx5e_create_drop_cq(priv, cq, &cq_param);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_cq(cq, &cq_param);
+ if (err)
+ goto err_destroy_cq;
+
+ err = mlx5e_create_drop_rq(priv, rq, &rq_param);
+ if (err)
+ goto err_disable_cq;
+
+ err = mlx5e_enable_rq(rq, &rq_param);
+ if (err)
+ goto err_destroy_rq;
+
+ return 0;
+
+err_destroy_rq:
+ mlx5e_destroy_rq(&priv->drop_rq);
+
+err_disable_cq:
+ mlx5e_disable_cq(&priv->drop_rq.cq);
+
+err_destroy_cq:
+ mlx5e_destroy_cq(&priv->drop_rq.cq);
+
+ return err;
+}
+
+static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
+{
+ mlx5e_disable_rq(&priv->drop_rq);
+ mlx5e_destroy_rq(&priv->drop_rq);
+ mlx5e_disable_cq(&priv->drop_rq.cq);
+ mlx5e_destroy_cq(&priv->drop_rq.cq);
+}
+
+static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+ void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(tisc, tisc, prio, tc);
+ MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+
+ return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+}
+
+static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
+{
+ mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+}
+
+static int mlx5e_create_tises(struct mlx5e_priv *priv)
+{
+ int err;
+ int tc;
+
+ for (tc = 0; tc < priv->params.num_tc; tc++) {
+ err = mlx5e_create_tis(priv, tc);
+ if (err)
+ goto err_close_tises;
+ }
+
+ return 0;
+
+err_close_tises:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_destroy_tis(priv, tc);
return err;
}
+static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
+{
+ int tc;
+
+ for (tc = 0; tc < priv->params.num_tc; tc++)
+ mlx5e_destroy_tis(priv, tc);
+}
+
+static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+{
+ void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+ MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
+#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_L4_SPORT |\
+ MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
+ mlx5e_build_tir_ctx_lro(tirc, priv);
+
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+
+ switch (tt) {
+ case MLX5E_TT_ANY:
+ MLX5_SET(tirc, tirc, indirect_table,
+ priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
+ break;
+ default:
+ MLX5_SET(tirc, tirc, indirect_table,
+ priv->rqtn[MLX5E_INDIRECTION_RQT]);
+ MLX5_SET(tirc, tirc, rx_hash_fn,
+ mlx5e_rx_hash_fn(priv->params.rss_hfunc));
+ if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+ void *rss_key = MLX5_ADDR_OF(tirc, tirc,
+ rx_hash_toeplitz_key);
+ size_t len = MLX5_FLD_SZ_BYTES(tirc,
+ rx_hash_toeplitz_key);
+
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ memcpy(rss_key, priv->params.toeplitz_hash_key, len);
+ }
+ break;
+ }
+
+ switch (tt) {
+ case MLX5E_TT_IPV4_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV6_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV4_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV6_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV4_IPSEC_AH:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV6_IPSEC_AH:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV4_IPSEC_ESP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV6_IPSEC_ESP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV4:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+
+ case MLX5E_TT_IPV6:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+ }
+}
+
+static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ void *tirc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ mlx5e_build_tir_ctx(priv, tirc, tt);
+
+ err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
+{
+ mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+}
+
+static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++) {
+ err = mlx5e_create_tir(priv, i);
+ if (err)
+ goto err_destroy_tirs;
+ }
+
+ return 0;
+
+err_destroy_tirs:
+ for (i--; i >= 0; i--)
+ mlx5e_destroy_tir(priv, i);
+
+ return err;
+}
+
+static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++)
+ mlx5e_destroy_tir(priv, i);
+}
+
static struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
@@ -1589,20 +1815,26 @@ static int mlx5e_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err = 0;
netdev_features_t changes = features ^ netdev->features;
- struct mlx5e_params new_params;
- bool update_params = false;
mutex_lock(&priv->state_lock);
- new_params = priv->params;
if (changes & NETIF_F_LRO) {
- new_params.lro_en = !!(features & NETIF_F_LRO);
- update_params = true;
+ bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ if (was_opened)
+ mlx5e_close_locked(priv->netdev);
+
+ priv->params.lro_en = !!(features & NETIF_F_LRO);
+ mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
+ mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
+
+ if (was_opened)
+ err = mlx5e_open_locked(priv->netdev);
}
- if (update_params)
- mlx5e_update_priv_params(priv, &new_params);
+ mutex_unlock(&priv->state_lock);
if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -1611,17 +1843,16 @@ static int mlx5e_set_features(struct net_device *netdev,
mlx5e_disable_vlan_filter(priv);
}
- mutex_unlock(&priv->state_lock);
-
- return 0;
+ return err;
}
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ bool was_opened;
int max_mtu;
- int err;
+ int err = 0;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
@@ -1633,8 +1864,16 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
}
mutex_lock(&priv->state_lock);
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(netdev);
+
netdev->mtu = new_mtu;
- err = mlx5e_update_priv_params(priv, &priv->params);
+
+ if (was_opened)
+ err = mlx5e_open_locked(netdev);
+
mutex_unlock(&priv->state_lock);
return err;
@@ -1673,11 +1912,21 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
return 0;
}
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
+{
+ int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+ return bf_buf_size -
+ sizeof(struct mlx5e_tx_wqe) +
+ 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
+}
+
static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
- int num_comp_vectors)
+ int num_channels)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int i;
priv->params.log_sq_size =
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
@@ -1691,24 +1940,25 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
priv->params.tx_cq_moderation_pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
priv->params.min_rx_wqes =
MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
- priv->params.rx_hash_log_tbl_sz =
- (order_base_2(num_comp_vectors) >
- MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
- order_base_2(num_comp_vectors) :
- MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
priv->params.num_tc = 1;
priv->params.default_vlan_prio = 0;
+ priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
+
+ netdev_rss_key_fill(priv->params.toeplitz_hash_key,
+ sizeof(priv->params.toeplitz_hash_key));
+
+ for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
+ priv->params.indirection_rqt[i] = i % num_channels;
- priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
priv->params.lro_wqe_sz =
MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
priv->mdev = mdev;
priv->netdev = netdev;
- priv->params.num_channels = num_comp_vectors;
- priv->num_tc = priv->params.num_tc;
+ priv->params.num_channels = num_channels;
priv->default_vlan_prio = priv->params.default_vlan_prio;
spin_lock_init(&priv->async_events_spinlock);
@@ -1733,9 +1983,8 @@ static void mlx5e_build_netdev(struct net_device *netdev)
SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
- if (priv->num_tc > 1) {
+ if (priv->params.num_tc > 1)
mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
- }
netdev->netdev_ops = &mlx5e_netdev_ops;
netdev->watchdog_timeo = 15 * HZ;
@@ -1755,6 +2004,7 @@ static void mlx5e_build_netdev(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_LRO;
netdev->hw_features = netdev->vlan_features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -1798,19 +2048,19 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
{
struct net_device *netdev;
struct mlx5e_priv *priv;
- int ncv = mdev->priv.eq_table.num_comp_vectors;
+ int nch = mlx5e_get_max_num_channels(mdev);
int err;
if (mlx5e_check_required_hca_cap(mdev))
return NULL;
- netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv);
+ netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
return NULL;
}
- mlx5e_build_netdev_priv(mdev, netdev, ncv);
+ mlx5e_build_netdev_priv(mdev, netdev, nch);
mlx5e_build_netdev(netdev);
netif_carrier_off(netdev);
@@ -1819,43 +2069,95 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
if (err) {
- netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n",
- __func__, err);
+ mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
goto err_free_netdev;
}
err = mlx5_core_alloc_pd(mdev, &priv->pdn);
if (err) {
- netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n",
- __func__, err);
+ mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
goto err_unmap_free_uar;
}
err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
if (err) {
- netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n",
- __func__, err);
+ mlx5_core_err(mdev, "alloc td failed, %d\n", err);
goto err_dealloc_pd;
}
err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
if (err) {
- netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
- __func__, err);
+ mlx5_core_err(mdev, "create mkey failed, %d\n", err);
goto err_dealloc_transport_domain;
}
- err = register_netdev(netdev);
+ err = mlx5e_create_tises(priv);
if (err) {
- netdev_err(netdev, "%s: register_netdev failed, %d\n",
- __func__, err);
+ mlx5_core_warn(mdev, "create tises failed, %d\n", err);
goto err_destroy_mkey;
}
+ err = mlx5e_open_drop_rq(priv);
+ if (err) {
+ mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
+ goto err_destroy_tises;
+ }
+
+ err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
+ if (err) {
+ mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
+ goto err_close_drop_rq;
+ }
+
+ err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+ if (err) {
+ mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
+ goto err_destroy_rqt_indir;
+ }
+
+ err = mlx5e_create_tirs(priv);
+ if (err) {
+ mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
+ goto err_destroy_rqt_single;
+ }
+
+ err = mlx5e_create_flow_tables(priv);
+ if (err) {
+ mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
+ goto err_destroy_tirs;
+ }
+
+ mlx5e_init_eth_addr(priv);
+
+ err = register_netdev(netdev);
+ if (err) {
+ mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
+ goto err_destroy_flow_tables;
+ }
+
mlx5e_enable_async_events(priv);
+ schedule_work(&priv->set_rx_mode_work);
return priv;
+err_destroy_flow_tables:
+ mlx5e_destroy_flow_tables(priv);
+
+err_destroy_tirs:
+ mlx5e_destroy_tirs(priv);
+
+err_destroy_rqt_single:
+ mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+
+err_destroy_rqt_indir:
+ mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+
+err_close_drop_rq:
+ mlx5e_close_drop_rq(priv);
+
+err_destroy_tises:
+ mlx5e_destroy_tises(priv);
+
err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, &priv->mr);
@@ -1879,13 +2181,22 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
struct mlx5e_priv *priv = vpriv;
struct net_device *netdev = priv->netdev;
+ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+
+ schedule_work(&priv->set_rx_mode_work);
+ mlx5e_disable_async_events(priv);
+ flush_scheduled_work();
unregister_netdev(netdev);
+ mlx5e_destroy_flow_tables(priv);
+ mlx5e_destroy_tirs(priv);
+ mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+ mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+ mlx5e_close_drop_rq(priv);
+ mlx5e_destroy_tises(priv);
mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
- mlx5e_disable_async_events(priv);
- flush_scheduled_work();
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 9a9374131f5b..cf0098596e85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -111,10 +111,12 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
sizeof(struct iphdr));
ipv6 = NULL;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
} else {
tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
sizeof(struct ipv6hdr));
ipv4 = NULL;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
}
if (get_cqe_lro_tcppsh(cqe))
@@ -149,6 +151,38 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
}
+static inline bool is_first_ethertype_ip(struct sk_buff *skb)
+{
+ __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
+
+ return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
+}
+
+static inline void mlx5e_handle_csum(struct net_device *netdev,
+ struct mlx5_cqe64 *cqe,
+ struct mlx5e_rq *rq,
+ struct sk_buff *skb)
+{
+ if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
+ goto csum_none;
+
+ if (likely(cqe->hds_ip_ext & CQE_L4_OK)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if (is_first_ethertype_ip(skb)) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ rq->stats.csum_sw++;
+ } else {
+ goto csum_none;
+ }
+
+ return;
+
+csum_none:
+ skb->ip_summed = CHECKSUM_NONE;
+ rq->stats.csum_none++;
+}
+
static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
struct sk_buff *skb)
@@ -162,20 +196,12 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe);
- skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
rq->stats.lro_packets++;
rq->stats.lro_bytes += cqe_bcnt;
}
- if (likely(netdev->features & NETIF_F_RXCSUM) &&
- (cqe->hds_ip_ext & CQE_L2_OK) &&
- (cqe->hds_ip_ext & CQE_L3_OK) &&
- (cqe->hds_ip_ext & CQE_L4_OK)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- skb->ip_summed = CHECKSUM_NONE;
- rq->stats.csum_none++;
- }
+ mlx5e_handle_csum(netdev, cqe, rq, skb);
skb->protocol = eth_type_trans(skb, netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 03f28f438e55..cd8f85a251d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -57,7 +57,7 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
if (notify_hw) {
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, wqe);
+ mlx5e_tx_notify_hw(sq, wqe, 0);
}
}
@@ -106,16 +106,39 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
priv->default_vlan_prio;
int tc = netdev_get_prio_tc_map(dev, up);
- return priv->channel[channel_ix]->tc_to_txq_map[tc];
+ return priv->channeltc_to_txq_map[channel_ix][tc];
}
static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool bf)
{
-#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+ /* Some NIC TX decisions, e.g loopback, are based on the packet
+ * headers and occur before the data gather.
+ * Therefore these headers must be copied into the WQE
+ */
+#define MLX5E_MIN_INLINE ETH_HLEN
+
+ if (bf && (skb_headlen(skb) <= sq->max_inline))
+ return skb_headlen(skb);
+
return MLX5E_MIN_INLINE;
}
+static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
+{
+ struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
+ int cpy1_sz = 2 * ETH_ALEN;
+ int cpy2_sz = ihs - cpy1_sz;
+
+ skb_copy_from_linear_data(skb, vhdr, cpy1_sz);
+ skb_pull_inline(skb, cpy1_sz);
+ vhdr->h_vlan_proto = skb->vlan_proto;
+ vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
+ skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto,
+ cpy2_sz);
+ skb_pull_inline(skb, cpy2_sz);
+}
+
static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
{
struct mlx5_wq_cyc *wq = &sq->wq;
@@ -129,6 +152,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
u8 opcode = MLX5_OPCODE_SEND;
dma_addr_t dma_addr = 0;
+ bool bf = false;
u16 headlen;
u16 ds_cnt;
u16 ihs;
@@ -141,6 +165,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
else
sq->stats.csum_offload_none++;
+ if (sq->cc != sq->prev_cc) {
+ sq->prev_cc = sq->cc;
+ sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
+ }
+
if (skb_is_gso(skb)) {
u32 payload_len;
@@ -153,13 +182,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.tso_packets++;
sq->stats.tso_bytes += payload_len;
} else {
- ihs = mlx5e_get_inline_hdr_size(sq, skb);
+ bf = sq->bf_budget &&
+ !skb->xmit_more &&
+ !skb_shinfo(skb)->nr_frags;
+ ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
ETH_ZLEN);
}
- skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
- skb_pull_inline(skb, ihs);
+ if (skb_vlan_tag_present(skb)) {
+ mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs);
+ ihs += VLAN_HLEN;
+ } else {
+ skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
+ skb_pull_inline(skb, ihs);
+ }
eseg->inline_hdr_sz = cpu_to_be16(ihs);
@@ -225,14 +262,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
}
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
+ int bf_sz = 0;
+
+ if (bf && sq->uar_bf_map)
+ bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3;
+
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, wqe);
+ mlx5e_tx_notify_hw(sq, wqe, bf_sz);
}
/* fill sq edge with nops to avoid wqe wrap around */
while ((sq->pc & wq->sz_m1) > sq->edge)
mlx5e_send_nop(sq, false);
+ sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+
sq->stats.packets++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index a40b96d4c662..713ead583347 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -346,6 +346,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int inlen;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
+ eq->cons_index = 0;
err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
if (err)
return err;
@@ -381,10 +382,10 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
name, pci_name(dev->pdev));
eq->eqn = out.eq_number;
- eq->irqn = vecidx;
+ eq->irqn = priv->msix_arr[vecidx].vector;
eq->dev = dev;
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
- err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
+ err = request_irq(eq->irqn, mlx5_msix_handler, 0,
priv->irq_info[vecidx].name, eq);
if (err)
goto err_eq;
@@ -420,12 +421,12 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
int err;
mlx5_debug_eq_remove(dev, eq);
- free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
+ free_irq(eq->irqn, eq);
err = mlx5_cmd_destroy_eq(dev, eq->eqn);
if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
- synchronize_irq(dev->priv.msix_arr[eq->irqn].vector);
+ synchronize_irq(eq->irqn);
mlx5_buf_free(dev, &eq->buf);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 292d76f2a904..f5deb642d0d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
+#include <linux/hardirq.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
@@ -46,39 +47,113 @@ enum {
enum {
MLX5_HEALTH_SYNDR_FW_ERR = 0x1,
MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7,
+ MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR = 0x8,
MLX5_HEALTH_SYNDR_CRC_ERR = 0x9,
MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa,
MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb,
MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc,
MLX5_HEALTH_SYNDR_EQ_ERR = 0xd,
+ MLX5_HEALTH_SYNDR_EQ_INV = 0xe,
MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf,
+ MLX5_HEALTH_SYNDR_HIGH_TEMP = 0x10
};
-static DEFINE_SPINLOCK(health_lock);
-static LIST_HEAD(health_list);
-static struct work_struct health_work;
+enum {
+ MLX5_NIC_IFC_FULL = 0,
+ MLX5_NIC_IFC_DISABLED = 1,
+ MLX5_NIC_IFC_NO_DRAM_NIC = 2
+};
-static void health_care(struct work_struct *work)
+static u8 get_nic_interface(struct mlx5_core_dev *dev)
{
- struct mlx5_core_health *health, *n;
- struct mlx5_core_dev *dev;
- struct mlx5_priv *priv;
- LIST_HEAD(tlist);
+ return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
+}
+
+static void trigger_cmd_completions(struct mlx5_core_dev *dev)
+{
+ unsigned long flags;
+ u64 vector;
- spin_lock_irq(&health_lock);
- list_splice_init(&health_list, &tlist);
+ /* wait for pending handlers to complete */
+ synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector);
+ spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
+ vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
+ if (!vector)
+ goto no_trig;
+
+ vector |= MLX5_TRIGGERED_CMD_COMP;
+ spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+
+ mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
+ mlx5_cmd_comp_handler(dev, vector);
+ return;
+
+no_trig:
+ spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+}
+
+static int in_fatal(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+ struct health_buffer __iomem *h = health->health;
- spin_unlock_irq(&health_lock);
+ if (get_nic_interface(dev) == MLX5_NIC_IFC_DISABLED)
+ return 1;
- list_for_each_entry_safe(health, n, &tlist, list) {
- priv = container_of(health, struct mlx5_priv, health);
- dev = container_of(priv, struct mlx5_core_dev, priv);
- mlx5_core_warn(dev, "handling bad device here\n");
- /* nothing yet */
- spin_lock_irq(&health_lock);
- list_del_init(&health->list);
- spin_unlock_irq(&health_lock);
+ if (ioread32be(&h->fw_ver) == 0xffffffff)
+ return 1;
+
+ return 0;
+}
+
+void mlx5_enter_error_state(struct mlx5_core_dev *dev)
+{
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return;
+
+ mlx5_core_err(dev, "start\n");
+ if (pci_channel_offline(dev->pdev) || in_fatal(dev))
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+
+ mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
+ mlx5_core_err(dev, "end\n");
+}
+
+static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
+{
+ u8 nic_interface = get_nic_interface(dev);
+
+ switch (nic_interface) {
+ case MLX5_NIC_IFC_FULL:
+ mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
+ break;
+
+ case MLX5_NIC_IFC_DISABLED:
+ mlx5_core_warn(dev, "starting teardown\n");
+ break;
+
+ case MLX5_NIC_IFC_NO_DRAM_NIC:
+ mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
+ break;
+ default:
+ mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n",
+ nic_interface);
}
+
+ mlx5_disable_device(dev);
+}
+
+static void health_care(struct work_struct *work)
+{
+ struct mlx5_core_health *health;
+ struct mlx5_core_dev *dev;
+ struct mlx5_priv *priv;
+
+ health = container_of(work, struct mlx5_core_health, work);
+ priv = container_of(health, struct mlx5_priv, health);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+ mlx5_core_warn(dev, "handling bad device here\n");
+ mlx5_handle_bad_state(dev);
}
static const char *hsynd_str(u8 synd)
@@ -88,6 +163,8 @@ static const char *hsynd_str(u8 synd)
return "firmware internal error";
case MLX5_HEALTH_SYNDR_IRISC_ERR:
return "irisc not responding";
+ case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
+ return "unrecoverable hardware error";
case MLX5_HEALTH_SYNDR_CRC_ERR:
return "firmware CRC error";
case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
@@ -98,48 +175,81 @@ static const char *hsynd_str(u8 synd)
return "async EQ buffer overrun";
case MLX5_HEALTH_SYNDR_EQ_ERR:
return "EQ error";
+ case MLX5_HEALTH_SYNDR_EQ_INV:
+ return "Invalid EQ refrenced";
case MLX5_HEALTH_SYNDR_FFSER_ERR:
return "FFSER error";
+ case MLX5_HEALTH_SYNDR_HIGH_TEMP:
+ return "High temprature";
default:
return "unrecognized error";
}
}
-static u16 read_be16(__be16 __iomem *p)
+static u16 get_maj(u32 fw)
{
- return swab16(readl((__force u16 __iomem *) p));
+ return fw >> 28;
}
-static u32 read_be32(__be32 __iomem *p)
+static u16 get_min(u32 fw)
{
- return swab32(readl((__force u32 __iomem *) p));
+ return fw >> 16 & 0xfff;
+}
+
+static u16 get_sub(u32 fw)
+{
+ return fw & 0xffff;
}
static void print_health_info(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
+ char fw_str[18];
+ u32 fw;
int i;
+ /* If the syndrom is 0, the device is OK and no need to print buffer */
+ if (!ioread8(&h->synd))
+ return;
+
for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
- pr_info("assert_var[%d] 0x%08x\n", i, read_be32(h->assert_var + i));
+ dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
+
+ dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
+ dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
+ fw = ioread32be(&h->fw_ver);
+ sprintf(fw_str, "%d.%d.%d", get_maj(fw), get_min(fw), get_sub(fw));
+ dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str);
+ dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
+ dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index));
+ dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
+ dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
+}
+
+static unsigned long get_next_poll_jiffies(void)
+{
+ unsigned long next;
- pr_info("assert_exit_ptr 0x%08x\n", read_be32(&h->assert_exit_ptr));
- pr_info("assert_callra 0x%08x\n", read_be32(&h->assert_callra));
- pr_info("fw_ver 0x%08x\n", read_be32(&h->fw_ver));
- pr_info("hw_id 0x%08x\n", read_be32(&h->hw_id));
- pr_info("irisc_index %d\n", readb(&h->irisc_index));
- pr_info("synd 0x%x: %s\n", readb(&h->synd), hsynd_str(readb(&h->synd)));
- pr_info("ext_sync 0x%04x\n", read_be16(&h->ext_sync));
+ get_random_bytes(&next, sizeof(next));
+ next %= HZ;
+ next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
+
+ return next;
}
static void poll_health(unsigned long data)
{
struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
struct mlx5_core_health *health = &dev->priv.health;
- unsigned long next;
u32 count;
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ trigger_cmd_completions(dev);
+ mod_timer(&health->timer, get_next_poll_jiffies());
+ return;
+ }
+
count = ioread32be(health->health_counter);
if (count == health->prev)
++health->miss_counter;
@@ -148,18 +258,16 @@ static void poll_health(unsigned long data)
health->prev = count;
if (health->miss_counter == MAX_MISSES) {
- mlx5_core_err(dev, "device's health compromised\n");
+ dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n");
print_health_info(dev);
- spin_lock_irq(&health_lock);
- list_add_tail(&health->list, &health_list);
- spin_unlock_irq(&health_lock);
-
- queue_work(mlx5_core_wq, &health_work);
} else {
- get_random_bytes(&next, sizeof(next));
- next %= HZ;
- next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
- mod_timer(&health->timer, next);
+ mod_timer(&health->timer, get_next_poll_jiffies());
+ }
+
+ if (in_fatal(dev) && !health->sick) {
+ health->sick = true;
+ print_health_info(dev);
+ queue_work(health->wq, &health->work);
}
}
@@ -167,7 +275,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- INIT_LIST_HEAD(&health->list);
init_timer(&health->timer);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
@@ -183,18 +290,33 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
del_timer_sync(&health->timer);
-
- spin_lock_irq(&health_lock);
- if (!list_empty(&health->list))
- list_del_init(&health->list);
- spin_unlock_irq(&health_lock);
}
-void mlx5_health_cleanup(void)
+void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ destroy_workqueue(health->wq);
}
-void __init mlx5_health_init(void)
+int mlx5_health_init(struct mlx5_core_dev *dev)
{
- INIT_WORK(&health_work, health_care);
+ struct mlx5_core_health *health;
+ char *name;
+
+ health = &dev->priv.health;
+ name = kmalloc(64, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ strcpy(name, "mlx5_health");
+ strcat(name, dev_name(&dev->pdev->dev));
+ health->wq = create_singlethread_workqueue(name);
+ kfree(name);
+ if (!health->wq)
+ return -ENOMEM;
+
+ INIT_WORK(&health->work, health_care);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index afad529838de..4ac8d4cc4973 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -30,7 +30,7 @@
* SOFTWARE.
*/
-#include <asm-generic/kmap_types.h>
+#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
@@ -39,12 +39,14 @@
#include <linux/slab.h>
#include <linux/io-mapping.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/srq.h>
#include <linux/debugfs.h>
#include <linux/kmod.h>
+#include <linux/delay.h>
#include <linux/mlx5/mlx5_ifc.h>
#include "mlx5_core.h"
@@ -62,7 +64,6 @@ static int prof_sel = MLX5_DEFAULT_PROF;
module_param_named(prof_sel, prof_sel, int, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
-struct workqueue_struct *mlx5_core_wq;
static LIST_HEAD(intf_list);
static LIST_HEAD(dev_list);
static DEFINE_MUTEX(intf_mutex);
@@ -152,6 +153,25 @@ static struct mlx5_profile profile[] = {
},
};
+#define FW_INIT_TIMEOUT_MILI 2000
+#define FW_INIT_WAIT_MS 2
+
+static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
+ int err = 0;
+
+ while (fw_initializing(dev)) {
+ if (time_after(jiffies, end)) {
+ err = -EBUSY;
+ break;
+ }
+ msleep(FW_INIT_WAIT_MS);
+ }
+
+ return err;
+}
+
static int set_dma_caps(struct pci_dev *pdev)
{
int err;
@@ -182,6 +202,34 @@ static int set_dma_caps(struct pci_dev *pdev)
return err;
}
+static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int err = 0;
+
+ mutex_lock(&dev->pci_status_mutex);
+ if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
+ err = pci_enable_device(pdev);
+ if (!err)
+ dev->pci_status = MLX5_PCI_STATUS_ENABLED;
+ }
+ mutex_unlock(&dev->pci_status_mutex);
+
+ return err;
+}
+
+static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+
+ mutex_lock(&dev->pci_status_mutex);
+ if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
+ pci_disable_device(pdev);
+ dev->pci_status = MLX5_PCI_STATUS_DISABLED;
+ }
+ mutex_unlock(&dev->pci_status_mutex);
+}
+
static int request_bar(struct pci_dev *pdev)
{
int err = 0;
@@ -391,6 +439,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
/* disable cmdif checksum */
MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
+
err = set_caps(dev, set_ctx, set_sz);
query_ex:
@@ -455,7 +505,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
struct mlx5_priv *priv = &mdev->priv;
struct msix_entry *msix = priv->msix_arr;
int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
- int numa_node = dev_to_node(&mdev->pdev->dev);
+ int numa_node = priv->numa_node;
int err;
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
@@ -654,12 +704,142 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
}
#endif
-static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
+static int map_bf_area(struct mlx5_core_dev *dev)
+{
+ resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
+ resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
+
+ dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
+
+ return dev->priv.bf_mapping ? 0 : -ENOMEM;
+}
+
+static void unmap_bf_area(struct mlx5_core_dev *dev)
+{
+ if (dev->priv.bf_mapping)
+ io_mapping_free(dev->priv.bf_mapping);
+}
+
+static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
+ if (!dev_ctx)
+ return;
+
+ dev_ctx->intf = intf;
+ dev_ctx->context = intf->add(dev);
+
+ if (dev_ctx->context) {
+ spin_lock_irq(&priv->ctx_lock);
+ list_add_tail(&dev_ctx->list, &priv->ctx_list);
+ spin_unlock_irq(&priv->ctx_lock);
+ } else {
+ kfree(dev_ctx);
+ }
+}
+
+static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf == intf) {
+ spin_lock_irq(&priv->ctx_lock);
+ list_del(&dev_ctx->list);
+ spin_unlock_irq(&priv->ctx_lock);
+
+ intf->remove(dev, dev_ctx->context);
+ kfree(dev_ctx);
+ return;
+ }
+}
+
+static int mlx5_register_device(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
- int err;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&intf_mutex);
+ list_add_tail(&priv->dev_list, &dev_list);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_add_device(intf, priv);
+ mutex_unlock(&intf_mutex);
+
+ return 0;
+}
+
+static void mlx5_unregister_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&intf_mutex);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_remove_device(intf, priv);
+ list_del(&priv->dev_list);
+ mutex_unlock(&intf_mutex);
+}
+
+int mlx5_register_interface(struct mlx5_interface *intf)
+{
+ struct mlx5_priv *priv;
+
+ if (!intf->add || !intf->remove)
+ return -EINVAL;
+
+ mutex_lock(&intf_mutex);
+ list_add_tail(&intf->list, &intf_list);
+ list_for_each_entry(priv, &dev_list, dev_list)
+ mlx5_add_device(intf, priv);
+ mutex_unlock(&intf_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(mlx5_register_interface);
+
+void mlx5_unregister_interface(struct mlx5_interface *intf)
+{
+ struct mlx5_priv *priv;
+
+ mutex_lock(&intf_mutex);
+ list_for_each_entry(priv, &dev_list, dev_list)
+ mlx5_remove_device(intf, priv);
+ list_del(&intf->list);
+ mutex_unlock(&intf_mutex);
+}
+EXPORT_SYMBOL(mlx5_unregister_interface);
+
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct mlx5_device_context *dev_ctx;
+ unsigned long flags;
+ void *result = NULL;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
+ if ((dev_ctx->intf->protocol == protocol) &&
+ dev_ctx->intf->get_dev) {
+ result = dev_ctx->intf->get_dev(dev_ctx->context);
+ break;
+ }
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL(mlx5_get_protocol_dev);
+
+static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int err = 0;
- dev->pdev = pdev;
pci_set_drvdata(dev->pdev, dev);
strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
@@ -668,11 +848,15 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
INIT_LIST_HEAD(&priv->pgdir_list);
spin_lock_init(&priv->mkey_lock);
+ mutex_init(&priv->alloc_mutex);
+
+ priv->numa_node = dev_to_node(&dev->pdev->dev);
+
priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
if (!priv->dbg_root)
return -ENOMEM;
- err = pci_enable_device(pdev);
+ err = mlx5_pci_enable_device(dev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
goto err_dbg;
@@ -699,13 +883,61 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
goto err_clr_master;
}
+
+ return 0;
+
+err_clr_master:
+ pci_clear_master(dev->pdev);
+ release_bar(dev->pdev);
+err_disable:
+ mlx5_pci_disable_device(dev);
+
+err_dbg:
+ debugfs_remove(priv->dbg_root);
+ return err;
+}
+
+static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+ iounmap(dev->iseg);
+ pci_clear_master(dev->pdev);
+ release_bar(dev->pdev);
+ mlx5_pci_disable_device(dev);
+ debugfs_remove(priv->dbg_root);
+}
+
+#define MLX5_IB_MOD "mlx5_ib"
+static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int err;
+
+ mutex_lock(&dev->intf_state_mutex);
+ if (dev->interface_state == MLX5_INTERFACE_STATE_UP) {
+ dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
+ __func__);
+ goto out;
+ }
+
dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
fw_rev_min(dev), fw_rev_sub(dev));
+ /* on load removing any previous indication of internal error, device is
+ * up
+ */
+ dev->state = MLX5_DEVICE_STATE_UP;
+
err = mlx5_cmd_init(dev);
if (err) {
dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
- goto err_unmap;
+ goto out_err;
+ }
+
+ err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
+ if (err) {
+ dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
+ FW_INIT_TIMEOUT_MILI);
+ goto out_err;
}
mlx5_pagealloc_init(dev);
@@ -804,10 +1036,13 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_stop_eqs;
}
+ if (map_bf_area(dev))
+ dev_err(&pdev->dev, "Failed to map blue flame area\n");
+
err = mlx5_irq_set_affinity_hints(dev);
if (err) {
dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
- goto err_free_comp_eqs;
+ goto err_unmap_bf_area;
}
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
@@ -817,9 +1052,32 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
mlx5_init_srq_table(dev);
mlx5_init_mr_table(dev);
+ err = mlx5_register_device(dev);
+ if (err) {
+ dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
+ goto err_reg_dev;
+ }
+
+ err = request_module_nowait(MLX5_IB_MOD);
+ if (err)
+ pr_info("failed request module on %s\n", MLX5_IB_MOD);
+
+ dev->interface_state = MLX5_INTERFACE_STATE_UP;
+out:
+ mutex_unlock(&dev->intf_state_mutex);
+
return 0;
-err_free_comp_eqs:
+err_reg_dev:
+ mlx5_cleanup_mr_table(dev);
+ mlx5_cleanup_srq_table(dev);
+ mlx5_cleanup_qp_table(dev);
+ mlx5_cleanup_cq_table(dev);
+ mlx5_irq_clear_affinity_hints(dev);
+
+err_unmap_bf_area:
+ unmap_bf_area(dev);
+
free_comp_eqs(dev);
err_stop_eqs:
@@ -838,7 +1096,7 @@ err_stop_poll:
mlx5_stop_health_poll(dev);
if (mlx5_cmd_teardown_hca(dev)) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
- return err;
+ goto out_err;
}
err_pagealloc_stop:
@@ -854,168 +1112,55 @@ err_pagealloc_cleanup:
mlx5_pagealloc_cleanup(dev);
mlx5_cmd_cleanup(dev);
-err_unmap:
- iounmap(dev->iseg);
-
-err_clr_master:
- pci_clear_master(dev->pdev);
- release_bar(dev->pdev);
-
-err_disable:
- pci_disable_device(dev->pdev);
+out_err:
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ mutex_unlock(&dev->intf_state_mutex);
-err_dbg:
- debugfs_remove(priv->dbg_root);
return err;
}
-static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
+static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
{
- struct mlx5_priv *priv = &dev->priv;
+ int err = 0;
+ mutex_lock(&dev->intf_state_mutex);
+ if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) {
+ dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
+ __func__);
+ goto out;
+ }
+ mlx5_unregister_device(dev);
+ mlx5_cleanup_mr_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev);
+ unmap_bf_area(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
mlx5_eq_cleanup(dev);
mlx5_disable_msix(dev);
mlx5_stop_health_poll(dev);
- if (mlx5_cmd_teardown_hca(dev)) {
+ err = mlx5_cmd_teardown_hca(dev);
+ if (err) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
- return;
+ goto out;
}
mlx5_pagealloc_stop(dev);
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev);
mlx5_pagealloc_cleanup(dev);
mlx5_cmd_cleanup(dev);
- iounmap(dev->iseg);
- pci_clear_master(dev->pdev);
- release_bar(dev->pdev);
- pci_disable_device(dev->pdev);
- debugfs_remove(priv->dbg_root);
-}
-static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
- struct mlx5_device_context *dev_ctx;
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
- dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
- if (!dev_ctx) {
- pr_warn("mlx5_add_device: alloc context failed\n");
- return;
- }
-
- dev_ctx->intf = intf;
- dev_ctx->context = intf->add(dev);
-
- if (dev_ctx->context) {
- spin_lock_irq(&priv->ctx_lock);
- list_add_tail(&dev_ctx->list, &priv->ctx_list);
- spin_unlock_irq(&priv->ctx_lock);
- } else {
- kfree(dev_ctx);
- }
-}
-
-static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
- struct mlx5_device_context *dev_ctx;
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
- list_for_each_entry(dev_ctx, &priv->ctx_list, list)
- if (dev_ctx->intf == intf) {
- spin_lock_irq(&priv->ctx_lock);
- list_del(&dev_ctx->list);
- spin_unlock_irq(&priv->ctx_lock);
-
- intf->remove(dev, dev_ctx->context);
- kfree(dev_ctx);
- return;
- }
-}
-static int mlx5_register_device(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_interface *intf;
-
- mutex_lock(&intf_mutex);
- list_add_tail(&priv->dev_list, &dev_list);
- list_for_each_entry(intf, &intf_list, list)
- mlx5_add_device(intf, priv);
- mutex_unlock(&intf_mutex);
-
- return 0;
-}
-static void mlx5_unregister_device(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_interface *intf;
-
- mutex_lock(&intf_mutex);
- list_for_each_entry(intf, &intf_list, list)
- mlx5_remove_device(intf, priv);
- list_del(&priv->dev_list);
- mutex_unlock(&intf_mutex);
-}
-
-int mlx5_register_interface(struct mlx5_interface *intf)
-{
- struct mlx5_priv *priv;
-
- if (!intf->add || !intf->remove)
- return -EINVAL;
-
- mutex_lock(&intf_mutex);
- list_add_tail(&intf->list, &intf_list);
- list_for_each_entry(priv, &dev_list, dev_list)
- mlx5_add_device(intf, priv);
- mutex_unlock(&intf_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL(mlx5_register_interface);
-
-void mlx5_unregister_interface(struct mlx5_interface *intf)
-{
- struct mlx5_priv *priv;
-
- mutex_lock(&intf_mutex);
- list_for_each_entry(priv, &dev_list, dev_list)
- mlx5_remove_device(intf, priv);
- list_del(&intf->list);
- mutex_unlock(&intf_mutex);
-}
-EXPORT_SYMBOL(mlx5_unregister_interface);
-
-void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
-{
- struct mlx5_priv *priv = &mdev->priv;
- struct mlx5_device_context *dev_ctx;
- unsigned long flags;
- void *result = NULL;
-
- spin_lock_irqsave(&priv->ctx_lock, flags);
-
- list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
- if ((dev_ctx->intf->protocol == protocol) &&
- dev_ctx->intf->get_dev) {
- result = dev_ctx->intf->get_dev(dev_ctx->context);
- break;
- }
-
- spin_unlock_irqrestore(&priv->ctx_lock, flags);
-
- return result;
+out:
+ dev->interface_state = MLX5_INTERFACE_STATE_DOWN;
+ mutex_unlock(&dev->intf_state_mutex);
+ return err;
}
-EXPORT_SYMBOL(mlx5_get_protocol_dev);
-static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
- unsigned long param)
+void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+ unsigned long param)
{
struct mlx5_priv *priv = &dev->priv;
struct mlx5_device_context *dev_ctx;
@@ -1036,7 +1181,6 @@ struct mlx5_core_event_handler {
void *data);
};
-#define MLX5_IB_MOD "mlx5_ib"
static int init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
@@ -1060,43 +1204,166 @@ static int init_one(struct pci_dev *pdev,
prof_sel = MLX5_DEFAULT_PROF;
}
dev->profile = &profile[prof_sel];
+ dev->pdev = pdev;
dev->event = mlx5_core_event;
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
- err = mlx5_dev_init(dev, pdev);
+ mutex_init(&dev->pci_status_mutex);
+ mutex_init(&dev->intf_state_mutex);
+ err = mlx5_pci_init(dev, priv);
if (err) {
- dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
- goto out;
+ dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
+ goto clean_dev;
}
- err = mlx5_register_device(dev);
+ err = mlx5_health_init(dev);
if (err) {
- dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
- goto out_init;
+ dev_err(&pdev->dev, "mlx5_health_init failed with error code %d\n", err);
+ goto close_pci;
}
- err = request_module_nowait(MLX5_IB_MOD);
- if (err)
- pr_info("failed request module on %s\n", MLX5_IB_MOD);
+ err = mlx5_load_one(dev, priv);
+ if (err) {
+ dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
+ goto clean_health;
+ }
return 0;
-out_init:
- mlx5_dev_cleanup(dev);
-out:
+clean_health:
+ mlx5_health_cleanup(dev);
+close_pci:
+ mlx5_pci_close(dev, priv);
+clean_dev:
+ pci_set_drvdata(pdev, NULL);
kfree(dev);
+
return err;
}
+
static void remove_one(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_priv *priv = &dev->priv;
- mlx5_unregister_device(dev);
- mlx5_dev_cleanup(dev);
+ if (mlx5_unload_one(dev, priv)) {
+ dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
+ mlx5_health_cleanup(dev);
+ return;
+ }
+ mlx5_health_cleanup(dev);
+ mlx5_pci_close(dev, priv);
+ pci_set_drvdata(pdev, NULL);
kfree(dev);
}
+static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_priv *priv = &dev->priv;
+
+ dev_info(&pdev->dev, "%s was called\n", __func__);
+ mlx5_enter_error_state(dev);
+ mlx5_unload_one(dev, priv);
+ mlx5_pci_disable_device(dev);
+ return state == pci_channel_io_perm_failure ?
+ PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ int err = 0;
+
+ dev_info(&pdev->dev, "%s was called\n", __func__);
+
+ err = mlx5_pci_enable_device(dev);
+ if (err) {
+ dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
+ , __func__, err);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+void mlx5_disable_device(struct mlx5_core_dev *dev)
+{
+ mlx5_pci_err_detected(dev->pdev, 0);
+}
+
+/* wait for the device to show vital signs. For now we check
+ * that we can read the device ID and that the health buffer
+ * shows a non zero value which is different than 0xffffffff
+ */
+static void wait_vital(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_core_health *health = &dev->priv.health;
+ const int niter = 100;
+ u32 count;
+ u16 did;
+ int i;
+
+ /* Wait for firmware to be ready after reset */
+ msleep(1000);
+ for (i = 0; i < niter; i++) {
+ if (pci_read_config_word(pdev, 2, &did)) {
+ dev_warn(&pdev->dev, "failed reading config word\n");
+ break;
+ }
+ if (did == pdev->device) {
+ dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
+ break;
+ }
+ msleep(50);
+ }
+ if (i == niter)
+ dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+
+ for (i = 0; i < niter; i++) {
+ count = ioread32be(health->health_counter);
+ if (count && count != 0xffffffff) {
+ dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+ break;
+ }
+ msleep(50);
+ }
+
+ if (i == niter)
+ dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+}
+
+static void mlx5_pci_resume(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_priv *priv = &dev->priv;
+ int err;
+
+ dev_info(&pdev->dev, "%s was called\n", __func__);
+
+ pci_save_state(pdev);
+ wait_vital(pdev);
+
+ err = mlx5_load_one(dev, priv);
+ if (err)
+ dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
+ , __func__, err);
+ else
+ dev_info(&pdev->dev, "%s: device recovered\n", __func__);
+}
+
+static const struct pci_error_handlers mlx5_err_handler = {
+ .error_detected = mlx5_pci_err_detected,
+ .slot_reset = mlx5_pci_slot_reset,
+ .resume = mlx5_pci_resume
+};
+
static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
{ PCI_VDEVICE(MELLANOX, 0x1012) }, /* Connect-IB VF */
@@ -1113,7 +1380,8 @@ static struct pci_driver mlx5_core_driver = {
.name = DRIVER_NAME,
.id_table = mlx5_core_pci_table,
.probe = init_one,
- .remove = remove_one
+ .remove = remove_one,
+ .err_handler = &mlx5_err_handler
};
static int __init init(void)
@@ -1121,16 +1389,10 @@ static int __init init(void)
int err;
mlx5_register_debugfs();
- mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq");
- if (!mlx5_core_wq) {
- err = -ENOMEM;
- goto err_debug;
- }
- mlx5_health_init();
err = pci_register_driver(&mlx5_core_driver);
if (err)
- goto err_health;
+ goto err_debug;
#ifdef CONFIG_MLX5_CORE_EN
mlx5e_init();
@@ -1138,9 +1400,6 @@ static int __init init(void)
return 0;
-err_health:
- mlx5_health_cleanup();
- destroy_workqueue(mlx5_core_wq);
err_debug:
mlx5_unregister_debugfs();
return err;
@@ -1152,8 +1411,6 @@ static void __exit cleanup(void)
mlx5e_cleanup();
#endif
pci_unregister_driver(&mlx5_core_driver);
- mlx5_health_cleanup();
- destroy_workqueue(mlx5_core_wq);
mlx5_unregister_debugfs();
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index fc88ecaecb4b..cee5b7a839bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -43,25 +43,25 @@
extern int mlx5_core_debug_mask;
-#define mlx5_core_dbg(dev, format, ...) \
- pr_debug("%s:%s:%d:(pid %d): " format, \
- (dev)->priv.name, __func__, __LINE__, current->pid, \
+#define mlx5_core_dbg(__dev, format, ...) \
+ dev_dbg(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
+ (__dev)->priv.name, __func__, __LINE__, current->pid, \
##__VA_ARGS__)
-#define mlx5_core_dbg_mask(dev, mask, format, ...) \
+#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
do { \
if ((mask) & mlx5_core_debug_mask) \
- mlx5_core_dbg(dev, format, ##__VA_ARGS__); \
+ mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \
} while (0)
-#define mlx5_core_err(dev, format, ...) \
- pr_err("%s:%s:%d:(pid %d): " format, \
- (dev)->priv.name, __func__, __LINE__, current->pid, \
+#define mlx5_core_err(__dev, format, ...) \
+ dev_err(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
+ (__dev)->priv.name, __func__, __LINE__, current->pid, \
##__VA_ARGS__)
-#define mlx5_core_warn(dev, format, ...) \
- pr_warn("%s:%s:%d:(pid %d): " format, \
- (dev)->priv.name, __func__, __LINE__, current->pid, \
+#define mlx5_core_warn(__dev, format, ...) \
+ dev_warn(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
+ (__dev)->priv.name, __func__, __LINE__, current->pid, \
##__VA_ARGS__)
enum {
@@ -73,7 +73,12 @@ static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
int in_size, u32 *out,
int out_size)
{
- mlx5_cmd_exec(dev, in, in_size, out, out_size);
+ int err;
+
+ err = mlx5_cmd_exec(dev, in, in_size, out, out_size);
+ if (err)
+ return err;
+
return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
}
@@ -81,6 +86,10 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
+void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+ unsigned long param);
+void mlx5_enter_error_state(struct mlx5_core_dev *dev);
+void mlx5_disable_device(struct mlx5_core_dev *dev);
void mlx5e_init(void);
void mlx5e_cleanup(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 1adb300dd850..6fa22b51e460 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -40,6 +40,7 @@ void mlx5_init_mr_table(struct mlx5_core_dev *dev)
{
struct mlx5_mr_table *table = &dev->priv.mr_table;
+ memset(table, 0, sizeof(*table));
rwlock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 8a64542abc16..4d3377b12657 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -30,7 +30,7 @@
* SOFTWARE.
*/
-#include <asm-generic/kmap_types.h>
+#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
@@ -275,12 +275,36 @@ out_alloc:
return err;
}
+
+static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
+{
+ struct mlx5_manage_pages_inbox *in;
+ struct mlx5_manage_pages_outbox out;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return;
+
+ memset(&out, 0, sizeof(out));
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
+ in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
+ in->func_id = cpu_to_be16(func_id);
+ err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
+ if (!err)
+ err = mlx5_cmd_status_to_err(&out.hdr);
+
+ if (err)
+ mlx5_core_warn(dev, "page notify failed\n");
+
+ kfree(in);
+}
+
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail)
{
struct mlx5_manage_pages_inbox *in;
struct mlx5_manage_pages_outbox out;
- struct mlx5_manage_pages_inbox *nin;
int inlen;
u64 addr;
int err;
@@ -289,8 +313,9 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
in = mlx5_vzalloc(inlen);
if (!in) {
+ err = -ENOMEM;
mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
- return -ENOMEM;
+ goto out_free;
}
memset(&out, 0, sizeof(out));
@@ -316,43 +341,29 @@ retry:
if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
- goto out_alloc;
+ goto out_4k;
}
dev->priv.fw_pages += npages;
- if (out.hdr.status) {
- err = mlx5_cmd_status_to_err(&out.hdr);
- if (err) {
- mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
- func_id, npages, out.hdr.status);
- goto out_alloc;
- }
+ err = mlx5_cmd_status_to_err(&out.hdr);
+ if (err) {
+ mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
+ func_id, npages, out.hdr.status);
+ goto out_4k;
}
mlx5_core_dbg(dev, "err %d\n", err);
- goto out_free;
-
-out_alloc:
- if (notify_fail) {
- nin = kzalloc(sizeof(*nin), GFP_KERNEL);
- if (!nin) {
- mlx5_core_warn(dev, "allocation failed\n");
- goto out_4k;
- }
- memset(&out, 0, sizeof(out));
- nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
- nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
- if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out)))
- mlx5_core_warn(dev, "page notify failed\n");
- kfree(nin);
- }
+ kvfree(in);
+ return 0;
out_4k:
for (i--; i >= 0; i--)
free_4k(dev, be64_to_cpu(in->pas[i]));
out_free:
kvfree(in);
+ if (notify_fail)
+ page_notify_fail(dev, func_id);
return err;
}
@@ -482,15 +493,20 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
struct fw_page *fwp;
struct rb_node *p;
int nclaimed = 0;
- int err;
+ int err = 0;
do {
p = rb_first(&dev->priv.page_root);
if (p) {
fwp = rb_entry(p, struct fw_page, rb_node);
- err = reclaim_pages(dev, fwp->func_id,
- optimal_reclaimed_pages(),
- &nclaimed);
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ free_4k(dev, fwp->addr);
+ nclaimed = 1;
+ } else {
+ err = reclaim_pages(dev, fwp->func_id,
+ optimal_reclaimed_pages(),
+ &nclaimed);
+ }
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 70147999f657..a87e773e93f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -90,16 +90,13 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
{
struct mlx5_reg_pcap in;
struct mlx5_reg_pcap out;
- int err;
memset(&in, 0, sizeof(in));
in.caps_127_96 = cpu_to_be32(caps);
in.port_num = port_num;
- err = mlx5_core_access_reg(dev, &in, sizeof(in), &out,
- sizeof(out), MLX5_REG_PCAP, 0, 1);
-
- return err;
+ return mlx5_core_access_reg(dev, &in, sizeof(in), &out,
+ sizeof(out), MLX5_REG_PCAP, 0, 1);
}
EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
@@ -107,16 +104,13 @@ int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port)
{
u32 in[MLX5_ST_SZ_DW(ptys_reg)];
- int err;
memset(in, 0, sizeof(in));
MLX5_SET(ptys_reg, in, local_port, local_port);
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
- err = mlx5_core_access_reg(dev, in, sizeof(in), ptys,
- ptys_size, MLX5_REG_PTYS, 0, 0);
-
- return err;
+ return mlx5_core_access_reg(dev, in, sizeof(in), ptys,
+ ptys_size, MLX5_REG_PTYS, 0, 0);
}
EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
@@ -199,7 +193,6 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
{
u32 in[MLX5_ST_SZ_DW(ptys_reg)];
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
- int err;
memset(in, 0, sizeof(in));
@@ -210,28 +203,30 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
else
MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
- err = mlx5_core_access_reg(dev, in, sizeof(in), out,
- sizeof(out), MLX5_REG_PTYS, 0, 1);
- return err;
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PTYS, 0, 1);
}
EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
-int mlx5_set_port_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status status)
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status)
{
u32 in[MLX5_ST_SZ_DW(paos_reg)];
u32 out[MLX5_ST_SZ_DW(paos_reg)];
memset(in, 0, sizeof(in));
+ MLX5_SET(paos_reg, in, local_port, 1);
MLX5_SET(paos_reg, in, admin_status, status);
MLX5_SET(paos_reg, in, ase, 1);
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 1);
}
+EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
-int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status)
{
u32 in[MLX5_ST_SZ_DW(paos_reg)];
u32 out[MLX5_ST_SZ_DW(paos_reg)];
@@ -239,14 +234,17 @@ int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
memset(in, 0, sizeof(in));
+ MLX5_SET(paos_reg, in, local_port, 1);
+
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 0);
if (err)
return err;
- *status = MLX5_GET(paos_reg, out, oper_status);
- return err;
+ *status = MLX5_GET(paos_reg, out, admin_status);
+ return 0;
}
+EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
int *max_mtu, int *oper_mtu, u8 port)
@@ -302,15 +300,12 @@ static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
{
u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
- int err;
memset(in, 0, sizeof(in));
- MLX5_SET(ptys_reg, in, local_port, local_port);
+ MLX5_SET(pvlc_reg, in, local_port, local_port);
- err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
- pvlc_size, MLX5_REG_PVLC, 0, 0);
-
- return err;
+ return mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
+ pvlc_size, MLX5_REG_PVLC, 0, 0);
}
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
@@ -328,3 +323,43 @@ int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
+
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+ MLX5_SET(pfcc_reg, in, pptx, tx_pause);
+ MLX5_SET(pfcc_reg, in, pprx, rx_pause);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
+
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 0);
+ if (err)
+ return err;
+
+ if (rx_pause)
+ *rx_pause = MLX5_GET(pfcc_reg, out, pprx);
+
+ if (tx_pause)
+ *tx_pause = MLX5_GET(pfcc_reg, out, pptx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 8b494b562263..30e2ba3f5f16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -345,6 +345,7 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev)
{
struct mlx5_qp_table *table = &dev->priv.qp_table;
+ memset(table, 0, sizeof(*table));
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
mlx5_qp_debugfs_init(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index c48f504ccbeb..ffada801976b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -531,6 +531,7 @@ void mlx5_init_srq_table(struct mlx5_core_dev *dev)
{
struct mlx5_srq_table *table = &dev->priv.srq_table;
+ memset(table, 0, sizeof(*table));
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 8d98b03026d5..d7068f54e800 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -163,9 +163,21 @@ int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err;
}
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
+ int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_tir_out)];
+
+ MLX5_SET(modify_tir_in, in, tirn, tirn);
+ MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)];
u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
memset(in, 0, sizeof(in));
@@ -194,7 +206,7 @@ int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tis_out)];
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)];
u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
memset(in, 0, sizeof(in));
@@ -358,3 +370,44 @@ int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
}
+
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqtn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+ int err;
+
+ MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+ return err;
+}
+
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+ int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_rqt_out)];
+
+ MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
+ MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
index f9ef244710d5..74cae51436e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
@@ -45,6 +45,8 @@ int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn);
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
+ int inlen);
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tisn);
@@ -61,4 +63,10 @@ int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqtn);
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+ int inlen);
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
+
#endif /* __TRANSOBJ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 9ef85873ceea..eb05c845ece9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
@@ -246,6 +247,10 @@ int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
goto err_free_uar;
}
+ if (mdev->priv.bf_mapping)
+ uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
+ uar->index << PAGE_SHIFT);
+
return 0;
err_free_uar:
@@ -257,6 +262,7 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
{
+ io_mapping_unmap(uar->bf_map);
iounmap(uar->map);
mlx5_cmd_free_uar(mdev, uar->index);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 8388411582cf..ce21ee5b2357 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -73,13 +73,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
- err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err;
}
- err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
+ err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
goto err_db_free;
@@ -108,13 +109,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
wq->sz_m1 = (1 << wq->log_sz) - 1;
- err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err;
}
- err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
+ err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
goto err_db_free;
@@ -144,7 +146,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
- err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index e0ddd69fb429..6c2a8f95093c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -37,7 +37,8 @@
struct mlx5_wq_param {
int linear;
- int numa;
+ int buf_numa_node;
+ int db_numa_node;
};
struct mlx5_wq_ctrl {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
new file mode 100644
index 000000000000..e36e12219c9b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -0,0 +1,43 @@
+#
+# Mellanox switch drivers configuration
+#
+
+config MLXSW_CORE
+ tristate "Mellanox Technologies Switch ASICs support"
+ ---help---
+ This driver supports Mellanox Technologies Switch ASICs family.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_core.
+
+config MLXSW_PCI
+ tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
+ depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE
+ default m
+ ---help---
+ This is PCI bus implementation for Mellanox Technologies Switch ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_pci.
+
+config MLXSW_SWITCHX2
+ tristate "Mellanox Technologies SwitchX-2 support"
+ depends on MLXSW_CORE && NET_SWITCHDEV
+ default m
+ ---help---
+ This driver supports Mellanox Technologies SwitchX-2 Ethernet
+ Switch ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_switchx2.
+
+config MLXSW_SPECTRUM
+ tristate "Mellanox Technologies Spectrum support"
+ depends on MLXSW_CORE && NET_SWITCHDEV
+ default m
+ ---help---
+ This driver supports Mellanox Technologies Spectrum Ethernet
+ Switch ASICs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mlxsw_spectrum.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
new file mode 100644
index 000000000000..af015818fd19
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o
+mlxsw_core-objs := core.o
+obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
+mlxsw_pci-objs := pci.o
+obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o
+mlxsw_switchx2-objs := switchx2.o
+obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
+mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
+ spectrum_switchdev.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
new file mode 100644
index 000000000000..cd63b8263688
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -0,0 +1,1115 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/cmd.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CMD_H
+#define _MLXSW_CMD_H
+
+#include "item.h"
+
+#define MLXSW_CMD_MBOX_SIZE 4096
+
+static inline char *mlxsw_cmd_mbox_alloc(void)
+{
+ return kzalloc(MLXSW_CMD_MBOX_SIZE, GFP_KERNEL);
+}
+
+static inline void mlxsw_cmd_mbox_free(char *mbox)
+{
+ kfree(mbox);
+}
+
+static inline void mlxsw_cmd_mbox_zero(char *mbox)
+{
+ memset(mbox, 0, MLXSW_CMD_MBOX_SIZE);
+}
+
+struct mlxsw_core;
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size);
+
+static inline int mlxsw_cmd_exec_in(struct mlxsw_core *mlxsw_core, u16 opcode,
+ u8 opcode_mod, u32 in_mod, char *in_mbox,
+ size_t in_mbox_size)
+{
+ return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+ in_mbox, in_mbox_size, NULL, 0);
+}
+
+static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode,
+ u8 opcode_mod, u32 in_mod,
+ bool out_mbox_direct,
+ char *out_mbox, size_t out_mbox_size)
+{
+ return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod,
+ out_mbox_direct, NULL, 0,
+ out_mbox, out_mbox_size);
+}
+
+static inline int mlxsw_cmd_exec_none(struct mlxsw_core *mlxsw_core, u16 opcode,
+ u8 opcode_mod, u32 in_mod)
+{
+ return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+ NULL, 0, NULL, 0);
+}
+
+enum mlxsw_cmd_opcode {
+ MLXSW_CMD_OPCODE_QUERY_FW = 0x004,
+ MLXSW_CMD_OPCODE_QUERY_BOARDINFO = 0x006,
+ MLXSW_CMD_OPCODE_QUERY_AQ_CAP = 0x003,
+ MLXSW_CMD_OPCODE_MAP_FA = 0xFFF,
+ MLXSW_CMD_OPCODE_UNMAP_FA = 0xFFE,
+ MLXSW_CMD_OPCODE_CONFIG_PROFILE = 0x100,
+ MLXSW_CMD_OPCODE_ACCESS_REG = 0x040,
+ MLXSW_CMD_OPCODE_SW2HW_DQ = 0x201,
+ MLXSW_CMD_OPCODE_HW2SW_DQ = 0x202,
+ MLXSW_CMD_OPCODE_2ERR_DQ = 0x01E,
+ MLXSW_CMD_OPCODE_QUERY_DQ = 0x022,
+ MLXSW_CMD_OPCODE_SW2HW_CQ = 0x016,
+ MLXSW_CMD_OPCODE_HW2SW_CQ = 0x017,
+ MLXSW_CMD_OPCODE_QUERY_CQ = 0x018,
+ MLXSW_CMD_OPCODE_SW2HW_EQ = 0x013,
+ MLXSW_CMD_OPCODE_HW2SW_EQ = 0x014,
+ MLXSW_CMD_OPCODE_QUERY_EQ = 0x015,
+};
+
+static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
+{
+ switch (opcode) {
+ case MLXSW_CMD_OPCODE_QUERY_FW:
+ return "QUERY_FW";
+ case MLXSW_CMD_OPCODE_QUERY_BOARDINFO:
+ return "QUERY_BOARDINFO";
+ case MLXSW_CMD_OPCODE_QUERY_AQ_CAP:
+ return "QUERY_AQ_CAP";
+ case MLXSW_CMD_OPCODE_MAP_FA:
+ return "MAP_FA";
+ case MLXSW_CMD_OPCODE_UNMAP_FA:
+ return "UNMAP_FA";
+ case MLXSW_CMD_OPCODE_CONFIG_PROFILE:
+ return "CONFIG_PROFILE";
+ case MLXSW_CMD_OPCODE_ACCESS_REG:
+ return "ACCESS_REG";
+ case MLXSW_CMD_OPCODE_SW2HW_DQ:
+ return "SW2HW_DQ";
+ case MLXSW_CMD_OPCODE_HW2SW_DQ:
+ return "HW2SW_DQ";
+ case MLXSW_CMD_OPCODE_2ERR_DQ:
+ return "2ERR_DQ";
+ case MLXSW_CMD_OPCODE_QUERY_DQ:
+ return "QUERY_DQ";
+ case MLXSW_CMD_OPCODE_SW2HW_CQ:
+ return "SW2HW_CQ";
+ case MLXSW_CMD_OPCODE_HW2SW_CQ:
+ return "HW2SW_CQ";
+ case MLXSW_CMD_OPCODE_QUERY_CQ:
+ return "QUERY_CQ";
+ case MLXSW_CMD_OPCODE_SW2HW_EQ:
+ return "SW2HW_EQ";
+ case MLXSW_CMD_OPCODE_HW2SW_EQ:
+ return "HW2SW_EQ";
+ case MLXSW_CMD_OPCODE_QUERY_EQ:
+ return "QUERY_EQ";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+enum mlxsw_cmd_status {
+ /* Command execution succeeded. */
+ MLXSW_CMD_STATUS_OK = 0x00,
+ /* Internal error (e.g. bus error) occurred while processing command. */
+ MLXSW_CMD_STATUS_INTERNAL_ERR = 0x01,
+ /* Operation/command not supported or opcode modifier not supported. */
+ MLXSW_CMD_STATUS_BAD_OP = 0x02,
+ /* Parameter not supported, parameter out of range. */
+ MLXSW_CMD_STATUS_BAD_PARAM = 0x03,
+ /* System was not enabled or bad system state. */
+ MLXSW_CMD_STATUS_BAD_SYS_STATE = 0x04,
+ /* Attempt to access reserved or unallocated resource, or resource in
+ * inappropriate ownership.
+ */
+ MLXSW_CMD_STATUS_BAD_RESOURCE = 0x05,
+ /* Requested resource is currently executing a command. */
+ MLXSW_CMD_STATUS_RESOURCE_BUSY = 0x06,
+ /* Required capability exceeds device limits. */
+ MLXSW_CMD_STATUS_EXCEED_LIM = 0x08,
+ /* Resource is not in the appropriate state or ownership. */
+ MLXSW_CMD_STATUS_BAD_RES_STATE = 0x09,
+ /* Index out of range (might be beyond table size or attempt to
+ * access a reserved resource).
+ */
+ MLXSW_CMD_STATUS_BAD_INDEX = 0x0A,
+ /* NVMEM checksum/CRC failed. */
+ MLXSW_CMD_STATUS_BAD_NVMEM = 0x0B,
+ /* Bad management packet (silently discarded). */
+ MLXSW_CMD_STATUS_BAD_PKT = 0x30,
+};
+
+static inline const char *mlxsw_cmd_status_str(u8 status)
+{
+ switch (status) {
+ case MLXSW_CMD_STATUS_OK:
+ return "OK";
+ case MLXSW_CMD_STATUS_INTERNAL_ERR:
+ return "INTERNAL_ERR";
+ case MLXSW_CMD_STATUS_BAD_OP:
+ return "BAD_OP";
+ case MLXSW_CMD_STATUS_BAD_PARAM:
+ return "BAD_PARAM";
+ case MLXSW_CMD_STATUS_BAD_SYS_STATE:
+ return "BAD_SYS_STATE";
+ case MLXSW_CMD_STATUS_BAD_RESOURCE:
+ return "BAD_RESOURCE";
+ case MLXSW_CMD_STATUS_RESOURCE_BUSY:
+ return "RESOURCE_BUSY";
+ case MLXSW_CMD_STATUS_EXCEED_LIM:
+ return "EXCEED_LIM";
+ case MLXSW_CMD_STATUS_BAD_RES_STATE:
+ return "BAD_RES_STATE";
+ case MLXSW_CMD_STATUS_BAD_INDEX:
+ return "BAD_INDEX";
+ case MLXSW_CMD_STATUS_BAD_NVMEM:
+ return "BAD_NVMEM";
+ case MLXSW_CMD_STATUS_BAD_PKT:
+ return "BAD_PKT";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+/* QUERY_FW - Query Firmware
+ * -------------------------
+ * OpMod == 0, INMmod == 0
+ * -----------------------
+ * The QUERY_FW command retrieves information related to firmware, command
+ * interface version and the amount of resources that should be allocated to
+ * the firmware.
+ */
+
+static inline int mlxsw_cmd_query_fw(struct mlxsw_core *mlxsw_core,
+ char *out_mbox)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_FW,
+ 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_fw_fw_pages
+ * Amount of physical memory to be allocatedfor firmware usage in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_pages, 0x00, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_major
+ * Firmware Revision - Major
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_major, 0x00, 0, 16);
+
+/* cmd_mbox_query_fw_fw_rev_subminor
+ * Firmware Sub-minor version (Patch level)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_subminor, 0x04, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_minor
+ * Firmware Revision - Minor
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_minor, 0x04, 0, 16);
+
+/* cmd_mbox_query_fw_core_clk
+ * Internal Clock Frequency (in MHz)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, core_clk, 0x08, 16, 16);
+
+/* cmd_mbox_query_fw_cmd_interface_rev
+ * Command Interface Interpreter Revision ID. This number is bumped up
+ * every time a non-backward-compatible change is done for the command
+ * interface. The current cmd_interface_rev is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, cmd_interface_rev, 0x08, 0, 16);
+
+/* cmd_mbox_query_fw_dt
+ * If set, Debug Trace is supported
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, dt, 0x0C, 31, 1);
+
+/* cmd_mbox_query_fw_api_version
+ * Indicates the version of the API, to enable software querying
+ * for compatibility. The current api_version is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, api_version, 0x0C, 0, 16);
+
+/* cmd_mbox_query_fw_fw_hour
+ * Firmware timestamp - hour
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_hour, 0x10, 24, 8);
+
+/* cmd_mbox_query_fw_fw_minutes
+ * Firmware timestamp - minutes
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_minutes, 0x10, 16, 8);
+
+/* cmd_mbox_query_fw_fw_seconds
+ * Firmware timestamp - seconds
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_seconds, 0x10, 8, 8);
+
+/* cmd_mbox_query_fw_fw_year
+ * Firmware timestamp - year
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_year, 0x14, 16, 16);
+
+/* cmd_mbox_query_fw_fw_month
+ * Firmware timestamp - month
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_month, 0x14, 8, 8);
+
+/* cmd_mbox_query_fw_fw_day
+ * Firmware timestamp - day
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8);
+
+/* cmd_mbox_query_fw_clr_int_base_offset
+ * Clear Interrupt register's offset from clr_int_bar register
+ * in PCI address space.
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, clr_int_base_offset, 0x20, 0, 64);
+
+/* cmd_mbox_query_fw_clr_int_bar
+ * PCI base address register (BAR) where clr_int register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, clr_int_bar, 0x28, 30, 2);
+
+/* cmd_mbox_query_fw_error_buf_offset
+ * Read Only buffer for internal error reports of offset
+ * from error_buf_bar register in PCI address space).
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, error_buf_offset, 0x30, 0, 64);
+
+/* cmd_mbox_query_fw_error_buf_size
+ * Internal error buffer size in DWORDs
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_buf_size, 0x38, 0, 32);
+
+/* cmd_mbox_query_fw_error_int_bar
+ * PCI base address register (BAR) where error buffer
+ * register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_int_bar, 0x3C, 30, 2);
+
+/* cmd_mbox_query_fw_doorbell_page_offset
+ * Offset of the doorbell page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, doorbell_page_offset, 0x40, 0, 64);
+
+/* cmd_mbox_query_fw_doorbell_page_bar
+ * PCI base address register (BAR) of the doorbell page
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, doorbell_page_bar, 0x48, 30, 2);
+
+/* QUERY_BOARDINFO - Query Board Information
+ * -----------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_BOARDINFO command retrieves adapter specific parameters.
+ */
+
+static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core,
+ char *out_mbox)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_BOARDINFO,
+ 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_boardinfo_intapin
+ * When PCIe interrupt messages are being used, this value is used for clearing
+ * an interrupt. When using MSI-X, this register is not used.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, intapin, 0x10, 24, 8);
+
+/* cmd_mbox_boardinfo_vsd_vendor_id
+ * PCISIG Vendor ID (www.pcisig.com/membership/vid_search) of the vendor
+ * specifying/formatting the VSD. The vsd_vendor_id identifies the management
+ * domain of the VSD/PSID data. Different vendors may choose different VSD/PSID
+ * format and encoding as long as they use their assigned vsd_vendor_id.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, vsd_vendor_id, 0x1C, 0, 16);
+
+/* cmd_mbox_boardinfo_vsd
+ * Vendor Specific Data. The VSD string that is burnt to the Flash
+ * with the firmware.
+ */
+#define MLXSW_CMD_BOARDINFO_VSD_LEN 208
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, vsd, 0x20, MLXSW_CMD_BOARDINFO_VSD_LEN);
+
+/* cmd_mbox_boardinfo_psid
+ * The PSID field is a 16-ascii (byte) character string which acts as
+ * the board ID. The PSID format is used in conjunction with
+ * Mellanox vsd_vendor_id (15B3h).
+ */
+#define MLXSW_CMD_BOARDINFO_PSID_LEN 16
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, psid, 0xF0, MLXSW_CMD_BOARDINFO_PSID_LEN);
+
+/* QUERY_AQ_CAP - Query Asynchronous Queues Capabilities
+ * -----------------------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_AQ_CAP command returns the device asynchronous queues
+ * capabilities supported.
+ */
+
+static inline int mlxsw_cmd_query_aq_cap(struct mlxsw_core *mlxsw_core,
+ char *out_mbox)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_AQ_CAP,
+ 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_aq_cap_log_max_sdq_sz
+ * Log (base 2) of max WQEs allowed on SDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_sdq_sz, 0x00, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_sdqs
+ * Maximum number of SDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_sdqs, 0x00, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_rdq_sz
+ * Log (base 2) of max WQEs allowed on RDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_rdqs
+ * Maximum number of RDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_cq_sz
+ * Log (base 2) of max CQEs allowed on CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_cqs
+ * Maximum number of CQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_cqs, 0x08, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_eq_sz
+ * Log (base 2) of max EQEs allowed on EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_eq_sz, 0x0C, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_eqs
+ * Maximum number of EQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_eqs, 0x0C, 0, 8);
+
+/* cmd_mbox_query_aq_cap_max_sg_sq
+ * The maximum S/G list elements in an DSQ. DSQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_sq, 0x10, 8, 8);
+
+/* cmd_mbox_query_aq_cap_
+ * The maximum S/G list elements in an DRQ. DRQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8);
+
+/* MAP_FA - Map Firmware Area
+ * --------------------------
+ * OpMod == 0 (N/A), INMmod == Number of VPM entries
+ * -------------------------------------------------
+ * The MAP_FA command passes physical pages to the switch. These pages
+ * are used to store the device firmware. MAP_FA can be executed multiple
+ * times until all the firmware area is mapped (the size that should be
+ * mapped is retrieved through the QUERY_FW command). All required pages
+ * must be mapped to finish the initialization phase. Physical memory
+ * passed in this command must be pinned.
+ */
+
+#define MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX 32
+
+static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 vpm_entries_count)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_MAP_FA,
+ 0, vpm_entries_count,
+ in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_map_fa_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, map_fa, pa, 0x00, 12, 52, 0x08, 0x00, true);
+
+/* cmd_mbox_map_fa_log2size
+ * Log (base 2) of the size in 4KB pages of the physical and contiguous memory
+ * that starts at PA_L/H.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, map_fa, log2size, 0x00, 0, 5, 0x08, 0x04, false);
+
+/* UNMAP_FA - Unmap Firmware Area
+ * ------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The UNMAP_FA command unload the firmware and unmaps all the
+ * firmware area. After this command is completed the device will not access
+ * the pages that were mapped to the firmware area. After executing UNMAP_FA
+ * command, software reset must be done prior to execution of MAP_FW command.
+ */
+
+static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0);
+}
+
+/* CONFIG_PROFILE (Set) - Configure Switch Profile
+ * ------------------------------
+ * OpMod == 1 (Set), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The CONFIG_PROFILE command sets the switch profile. The command can be
+ * executed on the device only once at startup in order to allocate and
+ * configure all switch resources and prepare it for operational mode.
+ * It is not possible to change the device profile after the chip is
+ * in operational mode.
+ * Failure of the CONFIG_PROFILE command leaves the hardware in an indeterminate
+ * state therefore it is required to perform software reset to the device
+ * following an unsuccessful completion of the command. It is required
+ * to perform software reset to the device to change an existing profile.
+ */
+
+static inline int mlxsw_cmd_config_profile_set(struct mlxsw_core *mlxsw_core,
+ char *in_mbox)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_CONFIG_PROFILE,
+ 1, 0, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_config_profile_set_max_vepa_channels
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vepa_channels, 0x0C, 0, 1);
+
+/* cmd_mbox_config_profile_set_max_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_lag, 0x0C, 1, 1);
+
+/* cmd_mbox_config_profile_set_max_port_per_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_port_per_lag, 0x0C, 2, 1);
+
+/* cmd_mbox_config_profile_set_max_mid
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_mid, 0x0C, 3, 1);
+
+/* cmd_mbox_config_profile_set_max_pgt
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pgt, 0x0C, 4, 1);
+
+/* cmd_mbox_config_profile_set_max_system_port
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_system_port, 0x0C, 5, 1);
+
+/* cmd_mbox_config_profile_set_max_vlan_groups
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1);
+
+/* cmd_mbox_config_profile_set_max_regions
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1);
+
+/* cmd_mbox_config_profile_set_flood_mode
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_mode, 0x0C, 8, 1);
+
+/* cmd_mbox_config_profile_set_max_flood_tables
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_tables, 0x0C, 9, 1);
+
+/* cmd_mbox_config_profile_set_max_ib_mc
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_ib_mc, 0x0C, 12, 1);
+
+/* cmd_mbox_config_profile_set_max_pkey
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pkey, 0x0C, 13, 1);
+
+/* cmd_mbox_config_profile_set_adaptive_routing_group_cap
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+ set_adaptive_routing_group_cap, 0x0C, 14, 1);
+
+/* cmd_mbox_config_profile_set_ar_sec
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+
+/* cmd_mbox_config_profile_max_vepa_channels
+ * Maximum number of VEPA channels per port (0 through 16)
+ * 0 - multi-channel VEPA is disabled
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vepa_channels, 0x10, 0, 8);
+
+/* cmd_mbox_config_profile_max_lag
+ * Maximum number of LAG IDs requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_lag, 0x14, 0, 16);
+
+/* cmd_mbox_config_profile_max_port_per_lag
+ * Maximum number of ports per LAG requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_port_per_lag, 0x18, 0, 16);
+
+/* cmd_mbox_config_profile_max_mid
+ * Maximum Multicast IDs.
+ * Multicast IDs are allocated from 0 to max_mid-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_mid, 0x1C, 0, 16);
+
+/* cmd_mbox_config_profile_max_pgt
+ * Maximum records in the Port Group Table per Switch Partition.
+ * Port Group Table indexes are from 0 to max_pgt-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pgt, 0x20, 0, 16);
+
+/* cmd_mbox_config_profile_max_system_port
+ * The maximum number of system ports that can be allocated.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_system_port, 0x24, 0, 16);
+
+/* cmd_mbox_config_profile_max_vlan_groups
+ * Maximum number VLAN Groups for VLAN binding.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12);
+
+/* cmd_mbox_config_profile_max_regions
+ * Maximum number of TCAM Regions.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16);
+
+/* cmd_mbox_config_profile_max_flood_tables
+ * Maximum number of single-entry flooding tables. Different flooding tables
+ * can be associated with different packet types.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
+
+/* cmd_mbox_config_profile_max_vid_flood_tables
+ * Maximum number of per-vid flooding tables. Flooding tables are associated
+ * to the different packet types for the different switch partitions.
+ * Table size is 4K entries covering all VID space.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
+
+/* cmd_mbox_config_profile_flood_mode
+ * Flooding mode to use.
+ * 0-2 - Backward compatible modes for SwitchX devices.
+ * 3 - Mixed mode, where:
+ * max_flood_tables indicates the number of single-entry tables.
+ * max_vid_flood_tables indicates the number of per-VID tables.
+ * max_fid_offset_flood_tables indicates the number of FID-offset tables.
+ * max_fid_flood_tables indicates the number of per-FID tables.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
+
+/* cmd_mbox_config_profile_max_fid_offset_flood_tables
+ * Maximum number of FID-offset flooding tables.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+ max_fid_offset_flood_tables, 0x34, 24, 4);
+
+/* cmd_mbox_config_profile_fid_offset_flood_table_size
+ * The size (number of entries) of each FID-offset flood table.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+ fid_offset_flood_table_size, 0x34, 0, 16);
+
+/* cmd_mbox_config_profile_max_fid_flood_tables
+ * Maximum number of per-FID flooding tables.
+ *
+ * Note: This flooding tables cover special FIDs only (vFIDs), starting at
+ * FID value 4K and higher.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_fid_flood_tables, 0x38, 24, 4);
+
+/* cmd_mbox_config_profile_fid_flood_table_size
+ * The size (number of entries) of each per-FID table.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, fid_flood_table_size, 0x38, 0, 16);
+
+/* cmd_mbox_config_profile_max_ib_mc
+ * Maximum number of multicast FDB records for InfiniBand
+ * FDB (in 512 chunks) per InfiniBand switch partition.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_ib_mc, 0x40, 0, 15);
+
+/* cmd_mbox_config_profile_max_pkey
+ * Maximum per port PKEY table size (for PKEY enforcement)
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pkey, 0x44, 0, 15);
+
+/* cmd_mbox_config_profile_ar_sec
+ * Primary/secondary capability
+ * Describes the number of adaptive routing sub-groups
+ * 0 - disable primary/secondary (single group)
+ * 1 - enable primary/secondary (2 sub-groups)
+ * 2 - 3 sub-groups: Not supported in SwitchX, SwitchX-2
+ * 3 - 4 sub-groups: Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, ar_sec, 0x4C, 24, 2);
+
+/* cmd_mbox_config_profile_adaptive_routing_group_cap
+ * Adaptive Routing Group Capability. Indicates the number of AR groups
+ * supported. Note that when Primary/secondary is enabled, each
+ * primary/secondary couple consumes 2 adaptive routing entries.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
+
+/* cmd_mbox_config_profile_arn
+ * Adaptive Routing Notification Enable
+ * Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
+
+/* cmd_mbox_config_profile_swid_config_mask
+ * Modify Switch Partition Configuration mask. When set, the configu-
+ * ration value for the Switch Partition are taken from the mailbox.
+ * When clear, the current configuration values are used.
+ * Bit 0 - set type
+ * Bit 1 - properties
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_mask,
+ 0x60, 24, 8, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_type
+ * Switch Partition type.
+ * 0000 - disabled (Switch Partition does not exist)
+ * 0001 - InfiniBand
+ * 0010 - Ethernet
+ * 1000 - router port (SwitchX-2 only)
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
+ 0x60, 20, 4, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_properties
+ * Switch Partition properties.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
+ 0x60, 0, 8, 0x08, 0x00, false);
+
+/* ACCESS_REG - Access EMAD Supported Register
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -------------------------------------
+ * The ACCESS_REG command supports accessing device registers. This access
+ * is mainly used for bootstrapping.
+ */
+
+static inline int mlxsw_cmd_access_reg(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, char *out_mbox)
+{
+ return mlxsw_cmd_exec(mlxsw_core, MLXSW_CMD_OPCODE_ACCESS_REG,
+ 0, 0, false, in_mbox, MLXSW_CMD_MBOX_SIZE,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_DQ - Software to Hardware DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The SW2HW_DQ command transitions a descriptor queue from software to
+ * hardware ownership. The command enables posting WQEs and ringing DoorBells
+ * on the descriptor queue.
+ */
+
+static inline int __mlxsw_cmd_sw2hw_dq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 dq_number,
+ u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_DQ,
+ opcode_mod, dq_number,
+ in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+enum {
+ MLXSW_CMD_OPCODE_MOD_SDQ = 0,
+ MLXSW_CMD_OPCODE_MOD_RDQ = 1,
+};
+
+static inline int mlxsw_cmd_sw2hw_sdq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* cmd_mbox_sw2hw_dq_cq
+ * Number of the CQ that this Descriptor Queue reports completions to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8);
+
+/* cmd_mbox_sw2hw_dq_sdq_tclass
+ * SDQ: CPU Egress TClass
+ * RDQ: Reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_tclass, 0x00, 16, 6);
+
+/* cmd_mbox_sw2hw_dq_log2_dq_sz
+ * Log (base 2) of the Descriptor Queue size in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, log2_dq_sz, 0x00, 0, 6);
+
+/* cmd_mbox_sw2hw_dq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_dq, pa, 0x10, 12, 52, 0x08, 0x00, true);
+
+/* HW2SW_DQ - Hardware to Software DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The HW2SW_DQ command transitions a descriptor queue from hardware to
+ * software ownership. Incoming packets on the DQ are silently discarded,
+ * SW should not post descriptors on nonoperational DQs.
+ */
+
+static inline int __mlxsw_cmd_hw2sw_dq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number, u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_DQ,
+ opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_hw2sw_sdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_hw2sw_rdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* 2ERR_DQ - To Error DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The 2ERR_DQ command transitions the DQ into the error state from the state
+ * in which it has been. While the command is executed, some in-process
+ * descriptors may complete. Once the DQ transitions into the error state,
+ * if there are posted descriptors on the RDQ/SDQ, the hardware writes
+ * a completion with error (flushed) for all descriptors posted in the RDQ/SDQ.
+ * When the command is completed successfully, the DQ is already in
+ * the error state.
+ */
+
+static inline int __mlxsw_cmd_2err_dq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number, u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+ opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_2err_sdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_2err_rdq(struct mlxsw_core *mlxsw_core,
+ u32 dq_number)
+{
+ return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* QUERY_DQ - Query DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The QUERY_DQ command retrieves a snapshot of DQ parameters from the hardware.
+ *
+ * Note: Output mailbox has the same format as SW2HW_DQ.
+ */
+
+static inline int __mlxsw_cmd_query_dq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 dq_number,
+ u8 opcode_mod)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+ opcode_mod, dq_number, false,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+static inline int mlxsw_cmd_query_sdq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_query_rdq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 dq_number)
+{
+ return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+ MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* SW2HW_CQ - Software to Hardware CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The SW2HW_CQ command transfers ownership of a CQ context entry from software
+ * to hardware. The command takes the CQ context entry from the input mailbox
+ * and stores it in the CQC in the ownership of the hardware. The command fails
+ * if the requested CQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 cq_number)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_CQ,
+ 0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_cq_cv
+ * CQE Version.
+ * 0 - CQE Version 0, 1 - CQE Version 1
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
+
+/* cmd_mbox_sw2hw_cq_c_eqn
+ * Event Queue this CQ reports completion events to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_cq_oi
+ * When set, overrun ignore is enabled. When set, updates of
+ * CQ consumer counter (poll for completion) or Request completion
+ * notifications (Arm CQ) DoorBells should not be rung on that CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_cq_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, st, 0x00, 8, 1);
+
+/* cmd_mbox_sw2hw_cq_log_cq_size
+ * Log (base 2) of the CQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, log_cq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_cq_producer_counter
+ * Producer Counter. The counter is incremented for each CQE that is
+ * written by the HW to the CQ.
+ * Maintained by HW (valid for the QUERY_CQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_cq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_cq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_CQ - Hardware to Software CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The HW2SW_CQ command transfers ownership of a CQ context entry from hardware
+ * to software. The CQC entry is invalidated as a result of this command.
+ */
+
+static inline int mlxsw_cmd_hw2sw_cq(struct mlxsw_core *mlxsw_core,
+ u32 cq_number)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_CQ,
+ 0, cq_number);
+}
+
+/* QUERY_CQ - Query CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The QUERY_CQ command retrieves a snapshot of the current CQ context entry.
+ * The command stores the snapshot in the output mailbox in the software format.
+ * Note that the CQ context state and values are not affected by the QUERY_CQ
+ * command. The QUERY_CQ command is for debug purposes only.
+ *
+ * Note: Output mailbox has the same format as SW2HW_CQ.
+ */
+
+static inline int mlxsw_cmd_query_cq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 cq_number)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_CQ,
+ 0, cq_number, false,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_EQ - Software to Hardware EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ * The SW2HW_EQ command transfers ownership of an EQ context entry from software
+ * to hardware. The command takes the EQ context entry from the input mailbox
+ * and stores it in the EQC in the ownership of the hardware. The command fails
+ * if the requested EQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
+ char *in_mbox, u32 eq_number)
+{
+ return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_EQ,
+ 0, eq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_eq_int_msix
+ * When set, MSI-X cycles will be generated by this EQ.
+ * When cleared, an interrupt will be generated by this EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_eq_int_oi
+ * When set, overrun ignore is enabled.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_eq_int_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ * 0x11 - Always ARMED
+ * other - reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2);
+
+/* cmd_mbox_sw2hw_eq_int_log_eq_size
+ * Log (base 2) of the EQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_eq_int_producer_counter
+ * Producer Counter. The counter is incremented for each EQE that is written
+ * by the HW to the EQ.
+ * Maintained by HW (valid for the QUERY_EQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_eq_int_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_EQ - Hardware to Software EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ */
+
+static inline int mlxsw_cmd_hw2sw_eq(struct mlxsw_core *mlxsw_core,
+ u32 eq_number)
+{
+ return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_EQ,
+ 0, eq_number);
+}
+
+/* QUERY_EQ - Query EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ *
+ * Note: Output mailbox has the same format as SW2HW_EQ.
+ */
+
+static inline int mlxsw_cmd_query_eq(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, u32 eq_number)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_EQ,
+ 0, eq_number, false,
+ out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
new file mode 100644
index 000000000000..97f0d93caf99
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -0,0 +1,1299 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/if_link.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/netdevice.h>
+#include <linux/wait.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/gfp.h>
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+
+#include "core.h"
+#include "item.h"
+#include "cmd.h"
+#include "port.h"
+#include "trap.h"
+#include "emad.h"
+#include "reg.h"
+
+static LIST_HEAD(mlxsw_core_driver_list);
+static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
+
+static const char mlxsw_core_driver_name[] = "mlxsw_core";
+
+static struct dentry *mlxsw_core_dbg_root;
+
+struct mlxsw_core_pcpu_stats {
+ u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
+ u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
+ u64 port_rx_packets[MLXSW_PORT_MAX_PORTS];
+ u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS];
+ struct u64_stats_sync syncp;
+ u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX];
+ u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS];
+ u32 trap_rx_invalid;
+ u32 port_rx_invalid;
+};
+
+struct mlxsw_core {
+ struct mlxsw_driver *driver;
+ const struct mlxsw_bus *bus;
+ void *bus_priv;
+ const struct mlxsw_bus_info *bus_info;
+ struct list_head rx_listener_list;
+ struct list_head event_listener_list;
+ struct {
+ struct sk_buff *resp_skb;
+ u64 tid;
+ wait_queue_head_t wait;
+ bool trans_active;
+ struct mutex lock; /* One EMAD transaction at a time. */
+ bool use_emad;
+ } emad;
+ struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
+ struct dentry *dbg_dir;
+ struct {
+ struct debugfs_blob_wrapper vsd_blob;
+ struct debugfs_blob_wrapper psid_blob;
+ } dbg;
+ unsigned long driver_priv[0];
+ /* driver_priv has to be always the last item */
+};
+
+struct mlxsw_rx_listener_item {
+ struct list_head list;
+ struct mlxsw_rx_listener rxl;
+ void *priv;
+};
+
+struct mlxsw_event_listener_item {
+ struct list_head list;
+ struct mlxsw_event_listener el;
+ void *priv;
+};
+
+/******************
+ * EMAD processing
+ ******************/
+
+/* emad_eth_hdr_dmac
+ * Destination MAC in EMAD's Ethernet header.
+ * Must be set to 01:02:c9:00:00:01
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
+
+/* emad_eth_hdr_smac
+ * Source MAC in EMAD's Ethernet header.
+ * Must be set to 00:02:c9:01:02:03
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
+
+/* emad_eth_hdr_ethertype
+ * Ethertype in EMAD's Ethernet header.
+ * Must be set to 0x8932
+ */
+MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
+
+/* emad_eth_hdr_mlx_proto
+ * Mellanox protocol.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
+
+/* emad_eth_hdr_ver
+ * Mellanox protocol version.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
+
+/* emad_op_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x1 (operation TLV).
+ */
+MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
+
+/* emad_op_tlv_len
+ * Length of the operation TLV in u32.
+ * Must be set to 0x4.
+ */
+MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
+
+/* emad_op_tlv_dr
+ * Direct route bit. Setting to 1 indicates the EMAD is a direct route
+ * EMAD. DR TLV must follow.
+ *
+ * Note: Currently not supported and must not be set.
+ */
+MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
+
+/* emad_op_tlv_status
+ * Returned status in case of EMAD response. Must be set to 0 in case
+ * of EMAD request.
+ * 0x0 - success
+ * 0x1 - device is busy. Requester should retry
+ * 0x2 - Mellanox protocol version not supported
+ * 0x3 - unknown TLV
+ * 0x4 - register not supported
+ * 0x5 - operation class not supported
+ * 0x6 - EMAD method not supported
+ * 0x7 - bad parameter (e.g. port out of range)
+ * 0x8 - resource not available
+ * 0x9 - message receipt acknowledgment. Requester should retry
+ * 0x70 - internal error
+ */
+MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
+
+/* emad_op_tlv_register_id
+ * Register ID of register within register TLV.
+ */
+MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
+
+/* emad_op_tlv_r
+ * Response bit. Setting to 1 indicates Response, otherwise request.
+ */
+MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
+
+/* emad_op_tlv_method
+ * EMAD method type.
+ * 0x1 - query
+ * 0x2 - write
+ * 0x3 - send (currently not supported)
+ * 0x4 - event
+ */
+MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
+
+/* emad_op_tlv_class
+ * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
+ */
+MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
+
+/* emad_op_tlv_tid
+ * EMAD transaction ID. Used for pairing request and response EMADs.
+ */
+MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
+
+/* emad_reg_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x3 (register TLV).
+ */
+MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
+
+/* emad_reg_tlv_len
+ * Length of the operation TLV in u32.
+ */
+MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
+
+/* emad_end_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x0 (end TLV).
+ */
+MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
+
+/* emad_end_tlv_len
+ * Length of the end TLV in u32.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
+
+enum mlxsw_core_reg_access_type {
+ MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
+ MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
+};
+
+static inline const char *
+mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
+{
+ switch (type) {
+ case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
+ return "query";
+ case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
+ return "write";
+ }
+ BUG();
+}
+
+static void mlxsw_emad_pack_end_tlv(char *end_tlv)
+{
+ mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
+ mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
+}
+
+static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
+ const struct mlxsw_reg_info *reg,
+ char *payload)
+{
+ mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
+ mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
+ memcpy(reg_tlv + sizeof(u32), payload, reg->len);
+}
+
+static void mlxsw_emad_pack_op_tlv(char *op_tlv,
+ const struct mlxsw_reg_info *reg,
+ enum mlxsw_core_reg_access_type type,
+ struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
+ mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
+ mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
+ mlxsw_emad_op_tlv_status_set(op_tlv, 0);
+ mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
+ mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
+ if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
+ mlxsw_emad_op_tlv_method_set(op_tlv,
+ MLXSW_EMAD_OP_TLV_METHOD_QUERY);
+ else
+ mlxsw_emad_op_tlv_method_set(op_tlv,
+ MLXSW_EMAD_OP_TLV_METHOD_WRITE);
+ mlxsw_emad_op_tlv_class_set(op_tlv,
+ MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
+ mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
+}
+
+static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
+{
+ char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
+
+ mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
+ mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
+ mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
+ mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
+ mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
+
+ skb_reset_mac_header(skb);
+
+ return 0;
+}
+
+static void mlxsw_emad_construct(struct sk_buff *skb,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type,
+ struct mlxsw_core *mlxsw_core)
+{
+ char *buf;
+
+ buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
+ mlxsw_emad_pack_end_tlv(buf);
+
+ buf = skb_push(skb, reg->len + sizeof(u32));
+ mlxsw_emad_pack_reg_tlv(buf, reg, payload);
+
+ buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
+ mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
+
+ mlxsw_emad_construct_eth_hdr(skb);
+}
+
+static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
+{
+ return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
+}
+
+static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
+{
+ return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
+ MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
+}
+
+static char *mlxsw_emad_reg_payload(const char *op_tlv)
+{
+ return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
+}
+
+static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
+{
+ char *op_tlv;
+
+ op_tlv = mlxsw_emad_op_tlv(skb);
+ return mlxsw_emad_op_tlv_tid_get(op_tlv);
+}
+
+static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
+{
+ char *op_tlv;
+
+ op_tlv = mlxsw_emad_op_tlv(skb);
+ return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
+}
+
+#define MLXSW_EMAD_TIMEOUT_MS 200
+
+static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ int err;
+ int ret;
+
+ mlxsw_core->emad.trans_active = true;
+
+ err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
+ if (err) {
+ dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
+ mlxsw_core->emad.tid);
+ dev_kfree_skb(skb);
+ goto trans_inactive_out;
+ }
+
+ ret = wait_event_timeout(mlxsw_core->emad.wait,
+ !(mlxsw_core->emad.trans_active),
+ msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
+ if (!ret) {
+ dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
+ mlxsw_core->emad.tid);
+ err = -EIO;
+ goto trans_inactive_out;
+ }
+
+ return 0;
+
+trans_inactive_out:
+ mlxsw_core->emad.trans_active = false;
+ return err;
+}
+
+static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
+ char *op_tlv)
+{
+ enum mlxsw_emad_op_tlv_status status;
+ u64 tid;
+
+ status = mlxsw_emad_op_tlv_status_get(op_tlv);
+ tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
+
+ switch (status) {
+ case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+ return 0;
+ case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+ case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+ dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
+ tid, status, mlxsw_emad_op_tlv_status_str(status));
+ return -EAGAIN;
+ case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+ case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+ case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+ case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+ case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+ default:
+ dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
+ tid, status, mlxsw_emad_op_tlv_status_str(status));
+ return -EIO;
+ }
+}
+
+static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb)
+{
+ return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
+}
+
+static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct sk_buff *trans_skb;
+ int n_retry;
+ int err;
+
+ n_retry = 0;
+retry:
+ /* We copy the EMAD to a new skb, since we might need
+ * to retransmit it in case of failure.
+ */
+ trans_skb = skb_copy(skb, GFP_KERNEL);
+ if (!trans_skb) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
+ if (!err) {
+ struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
+
+ err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
+ if (err)
+ dev_kfree_skb(resp_skb);
+ if (!err || err != -EAGAIN)
+ goto out;
+ }
+ if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
+ goto retry;
+
+out:
+ dev_kfree_skb(skb);
+ mlxsw_core->emad.tid++;
+ return err;
+}
+
+static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_core *mlxsw_core = priv;
+
+ if (mlxsw_emad_is_resp(skb) &&
+ mlxsw_core->emad.trans_active &&
+ mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
+ mlxsw_core->emad.resp_skb = skb;
+ mlxsw_core->emad.trans_active = false;
+ wake_up(&mlxsw_core->emad.wait);
+ } else {
+ dev_kfree_skb(skb);
+ }
+}
+
+static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
+ .func = mlxsw_emad_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_ETHEMAD,
+};
+
+static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ MLXSW_TRAP_ID_ETHEMAD);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+}
+
+static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
+{
+ int err;
+
+ /* Set the upper 32 bits of the transaction ID field to a random
+ * number. This allows us to discard EMADs addressed to other
+ * devices.
+ */
+ get_random_bytes(&mlxsw_core->emad.tid, 4);
+ mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
+
+ init_waitqueue_head(&mlxsw_core->emad.wait);
+ mlxsw_core->emad.trans_active = false;
+ mutex_init(&mlxsw_core->emad.lock);
+
+ err = mlxsw_core_rx_listener_register(mlxsw_core,
+ &mlxsw_emad_rx_listener,
+ mlxsw_core);
+ if (err)
+ return err;
+
+ err = mlxsw_emad_traps_set(mlxsw_core);
+ if (err)
+ goto err_emad_trap_set;
+
+ mlxsw_core->emad.use_emad = true;
+
+ return 0;
+
+err_emad_trap_set:
+ mlxsw_core_rx_listener_unregister(mlxsw_core,
+ &mlxsw_emad_rx_listener,
+ mlxsw_core);
+ return err;
+}
+
+static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
+{
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+
+ mlxsw_core->emad.use_emad = false;
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
+ MLXSW_TRAP_ID_ETHEMAD);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_core,
+ &mlxsw_emad_rx_listener,
+ mlxsw_core);
+}
+
+static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
+ u16 reg_len)
+{
+ struct sk_buff *skb;
+ u16 emad_len;
+
+ emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
+ (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
+ sizeof(u32) + mlxsw_core->driver->txhdr_len);
+ if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
+ return NULL;
+
+ skb = netdev_alloc_skb(NULL, emad_len);
+ if (!skb)
+ return NULL;
+ memset(skb->data, 0, emad_len);
+ skb_reserve(skb, emad_len);
+
+ return skb;
+}
+
+/*****************
+ * Core functions
+ *****************/
+
+static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_core *mlxsw_core = file->private;
+ struct mlxsw_core_pcpu_stats *p;
+ u64 rx_packets, rx_bytes;
+ u64 tmp_rx_packets, tmp_rx_bytes;
+ u32 rx_dropped, rx_invalid;
+ unsigned int start;
+ int i;
+ int j;
+ static const char hdr[] =
+ " NUM RX_PACKETS RX_BYTES RX_DROPPED\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
+ rx_packets = 0;
+ rx_bytes = 0;
+ rx_dropped = 0;
+ for_each_possible_cpu(j) {
+ p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+ do {
+ start = u64_stats_fetch_begin(&p->syncp);
+ tmp_rx_packets = p->trap_rx_packets[i];
+ tmp_rx_bytes = p->trap_rx_bytes[i];
+ } while (u64_stats_fetch_retry(&p->syncp, start));
+
+ rx_packets += tmp_rx_packets;
+ rx_bytes += tmp_rx_bytes;
+ rx_dropped += p->trap_rx_dropped[i];
+ }
+ seq_printf(file, "trap %3d %12llu %12llu %10u\n",
+ i, rx_packets, rx_bytes, rx_dropped);
+ }
+ rx_invalid = 0;
+ for_each_possible_cpu(j) {
+ p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+ rx_invalid += p->trap_rx_invalid;
+ }
+ seq_printf(file, "trap INV %10u\n",
+ rx_invalid);
+
+ for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
+ rx_packets = 0;
+ rx_bytes = 0;
+ rx_dropped = 0;
+ for_each_possible_cpu(j) {
+ p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+ do {
+ start = u64_stats_fetch_begin(&p->syncp);
+ tmp_rx_packets = p->port_rx_packets[i];
+ tmp_rx_bytes = p->port_rx_bytes[i];
+ } while (u64_stats_fetch_retry(&p->syncp, start));
+
+ rx_packets += tmp_rx_packets;
+ rx_bytes += tmp_rx_bytes;
+ rx_dropped += p->port_rx_dropped[i];
+ }
+ seq_printf(file, "port %3d %12llu %12llu %10u\n",
+ i, rx_packets, rx_bytes, rx_dropped);
+ }
+ rx_invalid = 0;
+ for_each_possible_cpu(j) {
+ p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+ rx_invalid += p->port_rx_invalid;
+ }
+ seq_printf(file, "port INV %10u\n",
+ rx_invalid);
+ return 0;
+}
+
+static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
+{
+ struct mlxsw_core *mlxsw_core = inode->i_private;
+
+ return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
+}
+
+static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
+ .owner = THIS_MODULE,
+ .open = mlxsw_core_rx_stats_dbg_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek
+};
+
+static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
+ const char *buf, size_t size)
+{
+ __be32 *m = (__be32 *) buf;
+ int i;
+ int count = size / sizeof(__be32);
+
+ for (i = count - 1; i >= 0; i--)
+ if (m[i])
+ break;
+ i++;
+ count = i ? i : 1;
+ for (i = 0; i < count; i += 4)
+ dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
+ i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
+ be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
+}
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
+{
+ spin_lock(&mlxsw_core_driver_list_lock);
+ list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_driver_register);
+
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
+{
+ spin_lock(&mlxsw_core_driver_list_lock);
+ list_del(&mlxsw_driver->list);
+ spin_unlock(&mlxsw_core_driver_list_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_driver_unregister);
+
+static struct mlxsw_driver *__driver_find(const char *kind)
+{
+ struct mlxsw_driver *mlxsw_driver;
+
+ list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
+ if (strcmp(mlxsw_driver->kind, kind) == 0)
+ return mlxsw_driver;
+ }
+ return NULL;
+}
+
+static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
+{
+ struct mlxsw_driver *mlxsw_driver;
+
+ spin_lock(&mlxsw_core_driver_list_lock);
+ mlxsw_driver = __driver_find(kind);
+ if (!mlxsw_driver) {
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
+ spin_lock(&mlxsw_core_driver_list_lock);
+ mlxsw_driver = __driver_find(kind);
+ }
+ if (mlxsw_driver) {
+ if (!try_module_get(mlxsw_driver->owner))
+ mlxsw_driver = NULL;
+ }
+
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ return mlxsw_driver;
+}
+
+static void mlxsw_core_driver_put(const char *kind)
+{
+ struct mlxsw_driver *mlxsw_driver;
+
+ spin_lock(&mlxsw_core_driver_list_lock);
+ mlxsw_driver = __driver_find(kind);
+ spin_unlock(&mlxsw_core_driver_list_lock);
+ if (!mlxsw_driver)
+ return;
+ module_put(mlxsw_driver->owner);
+}
+
+static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
+{
+ const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
+
+ mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
+ mlxsw_core_dbg_root);
+ if (!mlxsw_core->dbg_dir)
+ return -ENOMEM;
+ debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
+ mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
+ mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
+ mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
+ debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
+ &mlxsw_core->dbg.vsd_blob);
+ mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
+ mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
+ debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
+ &mlxsw_core->dbg.psid_blob);
+ return 0;
+}
+
+static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
+{
+ debugfs_remove_recursive(mlxsw_core->dbg_dir);
+}
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_bus *mlxsw_bus,
+ void *bus_priv)
+{
+ const char *device_kind = mlxsw_bus_info->device_kind;
+ struct mlxsw_core *mlxsw_core;
+ struct mlxsw_driver *mlxsw_driver;
+ size_t alloc_size;
+ int err;
+
+ mlxsw_driver = mlxsw_core_driver_get(device_kind);
+ if (!mlxsw_driver)
+ return -EINVAL;
+ alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
+ mlxsw_core = kzalloc(alloc_size, GFP_KERNEL);
+ if (!mlxsw_core) {
+ err = -ENOMEM;
+ goto err_core_alloc;
+ }
+
+ INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
+ INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
+ mlxsw_core->driver = mlxsw_driver;
+ mlxsw_core->bus = mlxsw_bus;
+ mlxsw_core->bus_priv = bus_priv;
+ mlxsw_core->bus_info = mlxsw_bus_info;
+
+ mlxsw_core->pcpu_stats =
+ netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
+ if (!mlxsw_core->pcpu_stats) {
+ err = -ENOMEM;
+ goto err_alloc_stats;
+ }
+
+ err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
+ if (err)
+ goto err_bus_init;
+
+ err = mlxsw_emad_init(mlxsw_core);
+ if (err)
+ goto err_emad_init;
+
+ err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
+ mlxsw_bus_info);
+ if (err)
+ goto err_driver_init;
+
+ err = mlxsw_core_debugfs_init(mlxsw_core);
+ if (err)
+ goto err_debugfs_init;
+
+ return 0;
+
+err_debugfs_init:
+ mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+err_driver_init:
+ mlxsw_emad_fini(mlxsw_core);
+err_emad_init:
+ mlxsw_bus->fini(bus_priv);
+err_bus_init:
+ free_percpu(mlxsw_core->pcpu_stats);
+err_alloc_stats:
+ kfree(mlxsw_core);
+err_core_alloc:
+ mlxsw_core_driver_put(device_kind);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_register);
+
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
+{
+ const char *device_kind = mlxsw_core->bus_info->device_kind;
+
+ mlxsw_core_debugfs_fini(mlxsw_core);
+ mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+ mlxsw_emad_fini(mlxsw_core);
+ mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+ free_percpu(mlxsw_core->pcpu_stats);
+ kfree(mlxsw_core);
+ mlxsw_core_driver_put(device_kind);
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
+
+static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
+{
+ return container_of(driver_priv, struct mlxsw_core, driver_priv);
+}
+
+bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
+
+ return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
+ tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
+
+ return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
+ tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit);
+
+static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
+ const struct mlxsw_rx_listener *rxl_b)
+{
+ return (rxl_a->func == rxl_b->func &&
+ rxl_a->local_port == rxl_b->local_port &&
+ rxl_a->trap_id == rxl_b->trap_id);
+}
+
+static struct mlxsw_rx_listener_item *
+__find_rx_listener_item(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
+ if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
+ rxl_item->priv == priv)
+ return rxl_item;
+ }
+ return NULL;
+}
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+ if (rxl_item)
+ return -EEXIST;
+ rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
+ if (!rxl_item)
+ return -ENOMEM;
+ rxl_item->rxl = *rxl;
+ rxl_item->priv = priv;
+
+ list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
+
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+ if (!rxl_item)
+ return;
+ list_del_rcu(&rxl_item->list);
+ synchronize_rcu();
+ kfree(rxl_item);
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
+
+static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_event_listener_item *event_listener_item = priv;
+ struct mlxsw_reg_info reg;
+ char *payload;
+ char *op_tlv = mlxsw_emad_op_tlv(skb);
+ char *reg_tlv = mlxsw_emad_reg_tlv(skb);
+
+ reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
+ reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
+ payload = mlxsw_emad_reg_payload(op_tlv);
+ event_listener_item->el.func(&reg, payload, event_listener_item->priv);
+ dev_kfree_skb(skb);
+}
+
+static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
+ const struct mlxsw_event_listener *el_b)
+{
+ return (el_a->func == el_b->func &&
+ el_a->trap_id == el_b->trap_id);
+}
+
+static struct mlxsw_event_listener_item *
+__find_event_listener_item(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv)
+{
+ struct mlxsw_event_listener_item *el_item;
+
+ list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
+ if (__is_event_listener_equal(&el_item->el, el) &&
+ el_item->priv == priv)
+ return el_item;
+ }
+ return NULL;
+}
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv)
+{
+ int err;
+ struct mlxsw_event_listener_item *el_item;
+ const struct mlxsw_rx_listener rxl = {
+ .func = mlxsw_core_event_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = el->trap_id,
+ };
+
+ el_item = __find_event_listener_item(mlxsw_core, el, priv);
+ if (el_item)
+ return -EEXIST;
+ el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
+ if (!el_item)
+ return -ENOMEM;
+ el_item->el = *el;
+ el_item->priv = priv;
+
+ err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
+ if (err)
+ goto err_rx_listener_register;
+
+ /* No reason to save item if we did not manage to register an RX
+ * listener for it.
+ */
+ list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
+
+ return 0;
+
+err_rx_listener_register:
+ kfree(el_item);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_register);
+
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv)
+{
+ struct mlxsw_event_listener_item *el_item;
+ const struct mlxsw_rx_listener rxl = {
+ .func = mlxsw_core_event_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = el->trap_id,
+ };
+
+ el_item = __find_event_listener_item(mlxsw_core, el, priv);
+ if (!el_item)
+ return;
+ mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
+ list_del(&el_item->list);
+ kfree(el_item);
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
+
+static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type)
+{
+ int err;
+ char *op_tlv;
+ struct sk_buff *skb;
+ struct mlxsw_tx_info tx_info = {
+ .local_port = MLXSW_PORT_CPU_PORT,
+ .is_emad = true,
+ };
+
+ skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
+ if (!skb)
+ return -ENOMEM;
+
+ mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
+ mlxsw_core->driver->txhdr_construct(skb, &tx_info);
+
+ dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
+ mlxsw_core->emad.tid);
+ mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
+
+ err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
+ if (!err) {
+ op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
+ memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
+ reg->len);
+
+ dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
+ mlxsw_core->emad.tid - 1);
+ mlxsw_core_buf_dump_dbg(mlxsw_core,
+ mlxsw_core->emad.resp_skb->data,
+ mlxsw_core->emad.resp_skb->len);
+
+ dev_kfree_skb(mlxsw_core->emad.resp_skb);
+ }
+
+ return err;
+}
+
+static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type)
+{
+ int err, n_retry;
+ char *in_mbox, *out_mbox, *tmp;
+
+ in_mbox = mlxsw_cmd_mbox_alloc();
+ if (!in_mbox)
+ return -ENOMEM;
+
+ out_mbox = mlxsw_cmd_mbox_alloc();
+ if (!out_mbox) {
+ err = -ENOMEM;
+ goto free_in_mbox;
+ }
+
+ mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
+ tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
+ mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
+
+ n_retry = 0;
+retry:
+ err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
+ if (!err) {
+ err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
+ if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
+ goto retry;
+ }
+
+ if (!err)
+ memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
+ reg->len);
+
+ mlxsw_core->emad.tid++;
+ mlxsw_cmd_mbox_free(out_mbox);
+free_in_mbox:
+ mlxsw_cmd_mbox_free(in_mbox);
+ return err;
+}
+
+static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type)
+{
+ u64 cur_tid;
+ int err;
+
+ if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
+ dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
+ reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+ return -EINTR;
+ }
+
+ cur_tid = mlxsw_core->emad.tid;
+ dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
+ cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+
+ /* During initialization EMAD interface is not available to us,
+ * so we default to command interface. We switch to EMAD interface
+ * after setting the appropriate traps.
+ */
+ if (!mlxsw_core->emad.use_emad)
+ err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
+ payload, type);
+ else
+ err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+ payload, type);
+
+ if (err)
+ dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
+ cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+
+ mutex_unlock(&mlxsw_core->emad.lock);
+ return err;
+}
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload)
+{
+ return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
+}
+EXPORT_SYMBOL(mlxsw_reg_query);
+
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload)
+{
+ return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
+}
+EXPORT_SYMBOL(mlxsw_reg_write);
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+ struct mlxsw_rx_info *rx_info)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+ const struct mlxsw_rx_listener *rxl;
+ struct mlxsw_core_pcpu_stats *pcpu_stats;
+ u8 local_port = rx_info->sys_port;
+ bool found = false;
+
+ dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n",
+ __func__, rx_info->sys_port, rx_info->trap_id);
+
+ if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
+ (local_port >= MLXSW_PORT_MAX_PORTS))
+ goto drop;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
+ rxl = &rxl_item->rxl;
+ if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
+ rxl->local_port == local_port) &&
+ rxl->trap_id == rx_info->trap_id) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (!found)
+ goto drop;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->port_rx_packets[local_port]++;
+ pcpu_stats->port_rx_bytes[local_port] += skb->len;
+ pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
+ pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ rxl->func(skb, local_port, rxl_item->priv);
+ return;
+
+drop:
+ if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
+ this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
+ else
+ this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
+ if (local_port >= MLXSW_PORT_MAX_PORTS)
+ this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
+ else
+ this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
+ dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_receive);
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size)
+{
+ u8 status;
+ int err;
+
+ BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
+ if (!mlxsw_core->bus->cmd_exec)
+ return -EOPNOTSUPP;
+
+ dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+ opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
+ if (in_mbox) {
+ dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
+ mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
+ }
+
+ err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
+ opcode_mod, in_mod, out_mbox_direct,
+ in_mbox, in_mbox_size,
+ out_mbox, out_mbox_size, &status);
+
+ if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
+ dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
+ opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+ in_mod, status, mlxsw_cmd_status_str(status));
+ } else if (err == -ETIMEDOUT) {
+ dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+ opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+ in_mod);
+ }
+
+ if (!err && out_mbox) {
+ dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
+ mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
+ }
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_cmd_exec);
+
+static int __init mlxsw_core_module_init(void)
+{
+ mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
+ if (!mlxsw_core_dbg_root)
+ return -ENOMEM;
+ return 0;
+}
+
+static void __exit mlxsw_core_module_exit(void)
+{
+ debugfs_remove_recursive(mlxsw_core_dbg_root);
+}
+
+module_init(mlxsw_core_module_init);
+module_exit(mlxsw_core_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch device core driver");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
new file mode 100644
index 000000000000..807827350a89
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -0,0 +1,212 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CORE_H
+#define _MLXSW_CORE_H
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include "trap.h"
+#include "reg.h"
+
+#include "cmd.h"
+
+#define MLXSW_MODULE_ALIAS_PREFIX "mlxsw-driver-"
+#define MODULE_MLXSW_DRIVER_ALIAS(kind) \
+ MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind)
+
+#define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2"
+#define MLXSW_DEVICE_KIND_SPECTRUM "spectrum"
+
+struct mlxsw_core;
+struct mlxsw_driver;
+struct mlxsw_bus;
+struct mlxsw_bus_info;
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_bus *mlxsw_bus,
+ void *bus_priv);
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core);
+
+struct mlxsw_tx_info {
+ u8 local_port;
+ bool is_emad;
+};
+
+bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+ const struct mlxsw_tx_info *tx_info);
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+
+struct mlxsw_rx_listener {
+ void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
+ u8 local_port;
+ u16 trap_id;
+};
+
+struct mlxsw_event_listener {
+ void (*func)(const struct mlxsw_reg_info *reg,
+ char *payload, void *priv);
+ enum mlxsw_event_trap_id trap_id;
+};
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv);
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ void *priv);
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv);
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_event_listener *el,
+ void *priv);
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload);
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload);
+
+struct mlxsw_rx_info {
+ u16 sys_port;
+ int trap_id;
+};
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+ struct mlxsw_rx_info *rx_info);
+
+#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
+
+struct mlxsw_swid_config {
+ u8 used_type:1,
+ used_properties:1;
+ u8 type;
+ u8 properties;
+};
+
+struct mlxsw_config_profile {
+ u16 used_max_vepa_channels:1,
+ used_max_lag:1,
+ used_max_port_per_lag:1,
+ used_max_mid:1,
+ used_max_pgt:1,
+ used_max_system_port:1,
+ used_max_vlan_groups:1,
+ used_max_regions:1,
+ used_flood_tables:1,
+ used_flood_mode:1,
+ used_max_ib_mc:1,
+ used_max_pkey:1,
+ used_ar_sec:1,
+ used_adaptive_routing_group_cap:1;
+ u8 max_vepa_channels;
+ u16 max_lag;
+ u16 max_port_per_lag;
+ u16 max_mid;
+ u16 max_pgt;
+ u16 max_system_port;
+ u16 max_vlan_groups;
+ u16 max_regions;
+ u8 max_flood_tables;
+ u8 max_vid_flood_tables;
+ u8 flood_mode;
+ u8 max_fid_offset_flood_tables;
+ u16 fid_offset_flood_table_size;
+ u8 max_fid_flood_tables;
+ u16 fid_flood_table_size;
+ u16 max_ib_mc;
+ u16 max_pkey;
+ u8 ar_sec;
+ u16 adaptive_routing_group_cap;
+ u8 arn;
+ struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
+};
+
+struct mlxsw_driver {
+ struct list_head list;
+ const char *kind;
+ struct module *owner;
+ size_t priv_size;
+ int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info);
+ void (*fini)(void *driver_priv);
+ void (*txhdr_construct)(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+ u8 txhdr_len;
+ const struct mlxsw_config_profile *profile;
+};
+
+struct mlxsw_bus {
+ const char *kind;
+ int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile);
+ void (*fini)(void *bus_priv);
+ bool (*skb_transmit_busy)(void *bus_priv,
+ const struct mlxsw_tx_info *tx_info);
+ int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+ int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size,
+ u8 *p_status);
+};
+
+struct mlxsw_bus_info {
+ const char *device_kind;
+ const char *device_name;
+ struct device *dev;
+ struct {
+ u16 major;
+ u16 minor;
+ u16 subminor;
+ } fw_rev;
+ u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
+ u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
+};
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
new file mode 100644
index 000000000000..97b6bb5d9185
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h
@@ -0,0 +1,127 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/emad.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_EMAD_H
+#define _MLXSW_EMAD_H
+
+#define MLXSW_EMAD_MAX_FRAME_LEN 1518 /* Length in u8 */
+#define MLXSW_EMAD_MAX_RETRY 5
+
+/* EMAD Ethernet header */
+#define MLXSW_EMAD_ETH_HDR_LEN 0x10 /* Length in u8 */
+#define MLXSW_EMAD_EH_DMAC "\x01\x02\xc9\x00\x00\x01"
+#define MLXSW_EMAD_EH_SMAC "\x00\x02\xc9\x01\x02\x03"
+#define MLXSW_EMAD_EH_ETHERTYPE 0x8932
+#define MLXSW_EMAD_EH_MLX_PROTO 0
+#define MLXSW_EMAD_EH_PROTO_VERSION 0
+
+/* EMAD TLV Types */
+enum {
+ MLXSW_EMAD_TLV_TYPE_END,
+ MLXSW_EMAD_TLV_TYPE_OP,
+ MLXSW_EMAD_TLV_TYPE_DR,
+ MLXSW_EMAD_TLV_TYPE_REG,
+ MLXSW_EMAD_TLV_TYPE_USERDATA,
+ MLXSW_EMAD_TLV_TYPE_OOBETH,
+};
+
+/* OP TLV */
+#define MLXSW_EMAD_OP_TLV_LEN 4 /* Length in u32 */
+
+enum {
+ MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS = 1,
+ MLXSW_EMAD_OP_TLV_CLASS_IPC = 2,
+};
+
+enum mlxsw_emad_op_tlv_status {
+ MLXSW_EMAD_OP_TLV_STATUS_SUCCESS,
+ MLXSW_EMAD_OP_TLV_STATUS_BUSY,
+ MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV,
+ MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED,
+ MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER,
+ MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE,
+ MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK,
+ MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR = 0x70,
+};
+
+static inline char *mlxsw_emad_op_tlv_status_str(u8 status)
+{
+ switch (status) {
+ case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+ return "operation performed";
+ case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+ return "device is busy";
+ case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+ return "version not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+ return "unknown TLV";
+ case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+ return "register not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+ return "class not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+ return "method not supported";
+ case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+ return "bad parameter";
+ case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+ return "resource not available";
+ case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+ return "acknowledged. retransmit";
+ case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+ return "internal error";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+enum {
+ MLXSW_EMAD_OP_TLV_REQUEST,
+ MLXSW_EMAD_OP_TLV_RESPONSE
+};
+
+enum {
+ MLXSW_EMAD_OP_TLV_METHOD_QUERY = 1,
+ MLXSW_EMAD_OP_TLV_METHOD_WRITE = 2,
+ MLXSW_EMAD_OP_TLV_METHOD_SEND = 3,
+ MLXSW_EMAD_OP_TLV_METHOD_EVENT = 5,
+};
+
+/* END TLV */
+#define MLXSW_EMAD_END_TLV_LEN 1 /* Length in u32 */
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
new file mode 100644
index 000000000000..a94dbda6590b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -0,0 +1,441 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/item.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_ITEM_H
+#define _MLXSW_ITEM_H
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+
+struct mlxsw_item {
+ unsigned short offset; /* bytes in container */
+ unsigned short step; /* step in bytes for indexed items */
+ unsigned short in_step_offset; /* offset within one step */
+ unsigned char shift; /* shift in bits */
+ unsigned char element_size; /* size of element in bit array */
+ bool no_real_shift;
+ union {
+ unsigned char bits;
+ unsigned short bytes;
+ } size;
+ const char *name;
+};
+
+static inline unsigned int
+__mlxsw_item_offset(struct mlxsw_item *item, unsigned short index,
+ size_t typesize)
+{
+ BUG_ON(index && !item->step);
+ if (item->offset % typesize != 0 ||
+ item->step % typesize != 0 ||
+ item->in_step_offset % typesize != 0) {
+ pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n",
+ item->name, item->offset, item->step,
+ item->in_step_offset, typesize);
+ BUG();
+ }
+
+ return ((item->offset + item->step * index + item->in_step_offset) /
+ typesize);
+}
+
+static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16));
+ __be16 *b = (__be16 *) buf;
+ u16 tmp;
+
+ tmp = be16_to_cpu(b[offset]);
+ tmp >>= item->shift;
+ tmp &= GENMASK(item->size.bits - 1, 0);
+ if (item->no_real_shift)
+ tmp <<= item->shift;
+ return tmp;
+}
+
+static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item,
+ unsigned short index, u16 val)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index,
+ sizeof(u16));
+ __be16 *b = (__be16 *) buf;
+ u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+ u16 tmp;
+
+ if (!item->no_real_shift)
+ val <<= item->shift;
+ val &= mask;
+ tmp = be16_to_cpu(b[offset]);
+ tmp &= ~mask;
+ tmp |= val;
+ b[offset] = cpu_to_be16(tmp);
+}
+
+static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32));
+ __be32 *b = (__be32 *) buf;
+ u32 tmp;
+
+ tmp = be32_to_cpu(b[offset]);
+ tmp >>= item->shift;
+ tmp &= GENMASK(item->size.bits - 1, 0);
+ if (item->no_real_shift)
+ tmp <<= item->shift;
+ return tmp;
+}
+
+static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item,
+ unsigned short index, u32 val)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index,
+ sizeof(u32));
+ __be32 *b = (__be32 *) buf;
+ u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+ u32 tmp;
+
+ if (!item->no_real_shift)
+ val <<= item->shift;
+ val &= mask;
+ tmp = be32_to_cpu(b[offset]);
+ tmp &= ~mask;
+ tmp |= val;
+ b[offset] = cpu_to_be32(tmp);
+}
+
+static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+ __be64 *b = (__be64 *) buf;
+ u64 tmp;
+
+ tmp = be64_to_cpu(b[offset]);
+ tmp >>= item->shift;
+ tmp &= GENMASK_ULL(item->size.bits - 1, 0);
+ if (item->no_real_shift)
+ tmp <<= item->shift;
+ return tmp;
+}
+
+static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item,
+ unsigned short index, u64 val)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+ __be64 *b = (__be64 *) buf;
+ u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift;
+ u64 tmp;
+
+ if (!item->no_real_shift)
+ val <<= item->shift;
+ val &= mask;
+ tmp = be64_to_cpu(b[offset]);
+ tmp &= ~mask;
+ tmp |= val;
+ b[offset] = cpu_to_be64(tmp);
+}
+
+static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
+ struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
+
+ memcpy(dst, &buf[offset], item->size.bytes);
+}
+
+static inline void __mlxsw_item_memcpy_to(char *buf, const char *src,
+ struct mlxsw_item *item,
+ unsigned short index)
+{
+ unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
+
+ memcpy(&buf[offset], src, item->size.bytes);
+}
+
+static inline u16
+__mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
+{
+ u16 max_index, be_index;
+ u16 offset; /* byte offset inside the array */
+ u8 in_byte_index;
+
+ BUG_ON(index && !item->element_size);
+ if (item->offset % sizeof(u32) != 0 ||
+ BITS_PER_BYTE % item->element_size != 0) {
+ pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n",
+ item->name, item->offset, item->element_size);
+ BUG();
+ }
+
+ max_index = (item->size.bytes << 3) / item->element_size - 1;
+ be_index = max_index - index;
+ offset = be_index * item->element_size >> 3;
+ in_byte_index = index % (BITS_PER_BYTE / item->element_size);
+ *shift = in_byte_index * item->element_size;
+
+ return item->offset + offset;
+}
+
+static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item,
+ u16 index)
+{
+ u8 shift, tmp;
+ u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+
+ tmp = buf[offset];
+ tmp >>= shift;
+ tmp &= GENMASK(item->element_size - 1, 0);
+ return tmp;
+}
+
+static inline void __mlxsw_item_bit_array_set(char *buf, struct mlxsw_item *item,
+ u16 index, u8 val)
+{
+ u8 shift, tmp;
+ u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+ u8 mask = GENMASK(item->element_size - 1, 0) << shift;
+
+ val <<= shift;
+ val &= mask;
+ tmp = buf[offset];
+ tmp &= ~mask;
+ tmp |= val;
+ buf[offset] = tmp;
+}
+
+#define __ITEM_NAME(_type, _cname, _iname) \
+ mlxsw_##_type##_##_cname##_##_iname##_item
+
+/* _type: cmd_mbox, reg, etc.
+ * _cname: containter name (e.g. command name, register name)
+ * _iname: item name within the container
+ */
+
+#define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
+{ \
+ return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\
+{ \
+ __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
+}
+
+#define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
+ _step, _instepoffset, _norealshift) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .shift = _shift, \
+ .no_real_shift = _norealshift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u16 \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
+{ \
+ return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
+ u16 val) \
+{ \
+ __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+}
+
+#define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
+{ \
+ return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\
+{ \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
+}
+
+#define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
+ _step, _instepoffset, _norealshift) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .shift = _shift, \
+ .no_real_shift = _norealshift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u32 \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
+{ \
+ return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
+ u32 val) \
+{ \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+}
+
+#define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .shift = _shift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \
+{ \
+ return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\
+{ \
+ __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
+}
+
+#define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift, \
+ _sizebits, _step, _instepoffset, _norealshift) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .shift = _shift, \
+ .no_real_shift = _norealshift, \
+ .size = {.bits = _sizebits,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u64 \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \
+{ \
+ return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
+ u64 val) \
+{ \
+ __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+}
+
+#define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .size = {.bytes = _sizebytes,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst) \
+{ \
+ __mlxsw_item_memcpy_from(buf, dst, \
+ &__ITEM_NAME(_type, _cname, _iname), 0); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \
+{ \
+ __mlxsw_item_memcpy_to(buf, src, \
+ &__ITEM_NAME(_type, _cname, _iname), 0); \
+}
+
+#define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes, \
+ _step, _instepoffset) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .step = _step, \
+ .in_step_offset = _instepoffset, \
+ .size = {.bytes = _sizebytes,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, \
+ unsigned short index, \
+ char *dst) \
+{ \
+ __mlxsw_item_memcpy_from(buf, dst, \
+ &__ITEM_NAME(_type, _cname, _iname), index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \
+ unsigned short index, \
+ const char *src) \
+{ \
+ __mlxsw_item_memcpy_to(buf, src, \
+ &__ITEM_NAME(_type, _cname, _iname), index); \
+}
+
+#define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \
+ _element_size) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
+ .offset = _offset, \
+ .element_size = _element_size, \
+ .size = {.bytes = _sizebytes,}, \
+ .name = #_type "_" #_cname "_" #_iname, \
+}; \
+static inline u8 \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, u16 index) \
+{ \
+ return __mlxsw_item_bit_array_get(buf, \
+ &__ITEM_NAME(_type, _cname, _iname), \
+ index); \
+} \
+static inline void \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \
+{ \
+ return __mlxsw_item_bit_array_set(buf, \
+ &__ITEM_NAME(_type, _cname, _iname), \
+ index, val); \
+} \
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
new file mode 100644
index 000000000000..de69e719dc9d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -0,0 +1,1847 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/log2.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+
+#include "pci.h"
+#include "core.h"
+#include "cmd.h"
+#include "port.h"
+
+static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
+
+static const struct pci_device_id mlxsw_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
+ {0, }
+};
+
+static struct dentry *mlxsw_pci_dbg_root;
+
+static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id)
+{
+ switch (id->device) {
+ case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
+ return MLXSW_DEVICE_KIND_SWITCHX2;
+ case PCI_DEVICE_ID_MELLANOX_SPECTRUM:
+ return MLXSW_DEVICE_KIND_SPECTRUM;
+ default:
+ BUG();
+ }
+}
+
+#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
+ iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+#define mlxsw_pci_read32(mlxsw_pci, reg) \
+ ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+
+enum mlxsw_pci_queue_type {
+ MLXSW_PCI_QUEUE_TYPE_SDQ,
+ MLXSW_PCI_QUEUE_TYPE_RDQ,
+ MLXSW_PCI_QUEUE_TYPE_CQ,
+ MLXSW_PCI_QUEUE_TYPE_EQ,
+};
+
+static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type)
+{
+ switch (q_type) {
+ case MLXSW_PCI_QUEUE_TYPE_SDQ:
+ return "sdq";
+ case MLXSW_PCI_QUEUE_TYPE_RDQ:
+ return "rdq";
+ case MLXSW_PCI_QUEUE_TYPE_CQ:
+ return "cq";
+ case MLXSW_PCI_QUEUE_TYPE_EQ:
+ return "eq";
+ }
+ BUG();
+}
+
+#define MLXSW_PCI_QUEUE_TYPE_COUNT 4
+
+static const u16 mlxsw_pci_doorbell_type_offset[] = {
+ MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
+ MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
+ MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+ MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
+ 0, /* unused */
+ 0, /* unused */
+ MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+ MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+struct mlxsw_pci_mem_item {
+ char *buf;
+ dma_addr_t mapaddr;
+ size_t size;
+};
+
+struct mlxsw_pci_queue_elem_info {
+ char *elem; /* pointer to actual dma mapped element mem chunk */
+ union {
+ struct {
+ struct sk_buff *skb;
+ } sdq;
+ struct {
+ struct sk_buff *skb;
+ } rdq;
+ } u;
+};
+
+struct mlxsw_pci_queue {
+ spinlock_t lock; /* for queue accesses */
+ struct mlxsw_pci_mem_item mem_item;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ u16 producer_counter;
+ u16 consumer_counter;
+ u16 count; /* number of elements in queue */
+ u8 num; /* queue number */
+ u8 elem_size; /* size of one element */
+ enum mlxsw_pci_queue_type type;
+ struct tasklet_struct tasklet; /* queue processing tasklet */
+ struct mlxsw_pci *pci;
+ union {
+ struct {
+ u32 comp_sdq_count;
+ u32 comp_rdq_count;
+ } cq;
+ struct {
+ u32 ev_cmd_count;
+ u32 ev_comp_count;
+ u32 ev_other_count;
+ } eq;
+ } u;
+};
+
+struct mlxsw_pci_queue_type_group {
+ struct mlxsw_pci_queue *q;
+ u8 count; /* number of queues in group */
+};
+
+struct mlxsw_pci {
+ struct pci_dev *pdev;
+ u8 __iomem *hw_addr;
+ struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
+ u32 doorbell_offset;
+ struct msix_entry msix_entry;
+ struct mlxsw_core *core;
+ struct {
+ struct mlxsw_pci_mem_item *items;
+ unsigned int count;
+ } fw_area;
+ struct {
+ struct mlxsw_pci_mem_item out_mbox;
+ struct mlxsw_pci_mem_item in_mbox;
+ struct mutex lock; /* Lock access to command registers */
+ bool nopoll;
+ wait_queue_head_t wait;
+ bool wait_done;
+ struct {
+ u8 status;
+ u64 out_param;
+ } comp;
+ } cmd;
+ struct mlxsw_bus_info bus_info;
+ struct dentry *dbg_dir;
+};
+
+static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
+{
+ tasklet_schedule(&q->tasklet);
+}
+
+static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
+ size_t elem_size, int elem_index)
+{
+ return q->mem_item.buf + (elem_size * elem_index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+ return &q->elem_info[elem_index];
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
+{
+ int index = q->producer_counter & (q->count - 1);
+
+ if ((q->producer_counter - q->consumer_counter) == q->count)
+ return NULL;
+ return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
+{
+ int index = q->consumer_counter & (q->count - 1);
+
+ return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+ return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
+}
+
+static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
+{
+ return owner_bit != !!(q->consumer_counter & q->count);
+}
+
+static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
+ u32 (*get_elem_owner_func)(char *))
+{
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *elem;
+ bool owner_bit;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ elem = elem_info->elem;
+ owner_bit = get_elem_owner_func(elem);
+ if (mlxsw_pci_elem_hw_owned(q, owner_bit))
+ return NULL;
+ q->consumer_counter++;
+ rmb(); /* make sure we read owned bit before the rest of elem */
+ return elem;
+}
+
+static struct mlxsw_pci_queue_type_group *
+mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
+ enum mlxsw_pci_queue_type q_type)
+{
+ return &mlxsw_pci->queues[q_type];
+}
+
+static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
+ enum mlxsw_pci_queue_type q_type)
+{
+ struct mlxsw_pci_queue_type_group *queue_group;
+
+ queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
+ return queue_group->count;
+}
+
+static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
+}
+
+static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ);
+}
+
+static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
+}
+
+static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci)
+{
+ return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ);
+}
+
+static struct mlxsw_pci_queue *
+__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
+ enum mlxsw_pci_queue_type q_type, u8 q_num)
+{
+ return &mlxsw_pci->queues[q_type].q[q_num];
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci,
+ MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci,
+ MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
+ u8 q_num)
+{
+ return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
+}
+
+static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 val)
+{
+ mlxsw_pci_write32(mlxsw_pci,
+ DOORBELL(mlxsw_pci->doorbell_offset,
+ mlxsw_pci_doorbell_type_offset[q->type],
+ q->num), val);
+}
+
+static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 val)
+{
+ mlxsw_pci_write32(mlxsw_pci,
+ DOORBELL(mlxsw_pci->doorbell_offset,
+ mlxsw_pci_doorbell_arm_type_offset[q->type],
+ q->num), val);
+}
+
+static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ wmb(); /* ensure all writes are done before we ring a bell */
+ __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
+}
+
+static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ wmb(); /* ensure all writes are done before we ring a bell */
+ __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
+ q->consumer_counter + q->count);
+}
+
+static void
+mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ wmb(); /* ensure all writes are done before we ring a bell */
+ __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
+}
+
+static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
+ int page_index)
+{
+ return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
+}
+
+static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int i;
+ int err;
+
+ q->producer_counter = 0;
+ q->consumer_counter = 0;
+
+ /* Set CQ of same number of this SDQ. */
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
+ mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7);
+ mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+ }
+
+ err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+ struct mlxsw_pci_queue *q;
+ int i;
+ static const char hdr[] =
+ "NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) {
+ q = mlxsw_pci_sdq_get(mlxsw_pci, i);
+ spin_lock_bh(&q->lock);
+ seq_printf(file, "%3d %10d %10d %5d\n",
+ i, q->producer_counter, q->consumer_counter,
+ q->count);
+ spin_unlock_bh(&q->lock);
+ }
+ return 0;
+}
+
+static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
+ int index, char *frag_data, size_t frag_len,
+ int direction)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ dma_addr_t mapaddr;
+
+ mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
+ if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
+ dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
+ return -EIO;
+ }
+ mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
+ mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
+ return 0;
+}
+
+static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
+ int index, int direction)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
+ dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
+
+ if (!frag_len)
+ return;
+ pci_unmap_single(pdev, mapaddr, frag_len, direction);
+}
+
+static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue_elem_info *elem_info)
+{
+ size_t buf_len = MLXSW_PORT_MAX_MTU;
+ char *wqe = elem_info->elem;
+ struct sk_buff *skb;
+ int err;
+
+ elem_info->u.rdq.skb = NULL;
+ skb = netdev_alloc_skb_ip_align(NULL, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Assume that wqe was previously zeroed. */
+
+ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+ buf_len, DMA_FROM_DEVICE);
+ if (err)
+ goto err_frag_map;
+
+ elem_info->u.rdq.skb = skb;
+ return 0;
+
+err_frag_map:
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue_elem_info *elem_info)
+{
+ struct sk_buff *skb;
+ char *wqe;
+
+ skb = elem_info->u.rdq.skb;
+ wqe = elem_info->elem;
+
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+}
+
+static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
+ int i;
+ int err;
+
+ q->producer_counter = 0;
+ q->consumer_counter = 0;
+
+ /* Set CQ of same number of this RDQ with base
+ * above SDQ count as the lower ones are assigned to SDQs.
+ */
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
+ mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+ }
+
+ err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+ for (i = 0; i < q->count; i++) {
+ elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+ BUG_ON(!elem_info);
+ err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+ if (err)
+ goto rollback;
+ /* Everything is set up, ring doorbell to pass elem to HW */
+ q->producer_counter++;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+ }
+
+ return 0;
+
+rollback:
+ for (i--; i >= 0; i--) {
+ elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+ mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ }
+ mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+
+ return err;
+}
+
+static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ int i;
+
+ mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+ for (i = 0; i < q->count; i++) {
+ elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+ mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ }
+}
+
+static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+ struct mlxsw_pci_queue *q;
+ int i;
+ static const char hdr[] =
+ "NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) {
+ q = mlxsw_pci_rdq_get(mlxsw_pci, i);
+ spin_lock_bh(&q->lock);
+ seq_printf(file, "%3d %10d %10d %5d\n",
+ i, q->producer_counter, q->consumer_counter,
+ q->count);
+ spin_unlock_bh(&q->lock);
+ }
+ return 0;
+}
+
+static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int i;
+ int err;
+
+ q->consumer_counter = 0;
+
+ for (i = 0; i < q->count; i++) {
+ char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+ mlxsw_pci_cqe_owner_set(elem, 1);
+ }
+
+ mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
+ mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
+ mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
+ mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
+ mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
+ }
+ err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+
+ struct mlxsw_pci_queue *q;
+ int i;
+ static const char hdr[] =
+ "NUM CONS_INDEX SDQ_COUNT RDQ_COUNT COUNT\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) {
+ q = mlxsw_pci_cq_get(mlxsw_pci, i);
+ spin_lock_bh(&q->lock);
+ seq_printf(file, "%3d %10d %10d %10d %5d\n",
+ i, q->consumer_counter, q->u.cq.comp_sdq_count,
+ q->u.cq.comp_rdq_count, q->count);
+ spin_unlock_bh(&q->lock);
+ }
+ return 0;
+}
+
+static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 consumer_counter_limit,
+ char *cqe)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *wqe;
+ struct sk_buff *skb;
+ int i;
+
+ spin_lock(&q->lock);
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ skb = elem_info->u.sdq.skb;
+ wqe = elem_info->elem;
+ for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ elem_info->u.sdq.skb = NULL;
+
+ if (q->consumer_counter++ != consumer_counter_limit)
+ dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
+ spin_unlock(&q->lock);
+}
+
+static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q,
+ u16 consumer_counter_limit,
+ char *cqe)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *wqe;
+ struct sk_buff *skb;
+ struct mlxsw_rx_info rx_info;
+ u16 byte_count;
+ int err;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ skb = elem_info->u.sdq.skb;
+ if (!skb)
+ return;
+ wqe = elem_info->elem;
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+
+ if (q->consumer_counter++ != consumer_counter_limit)
+ dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
+
+ /* We do not support lag now */
+ if (mlxsw_pci_cqe_lag_get(cqe))
+ goto drop;
+
+ rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
+ rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
+
+ byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
+ if (mlxsw_pci_cqe_crc_get(cqe))
+ byte_count -= ETH_FCS_LEN;
+ skb_put(skb, byte_count);
+ mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
+
+put_new_skb:
+ memset(wqe, 0, q->elem_size);
+ err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+ if (err)
+ dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
+ /* Everything is set up, ring doorbell to pass elem to HW */
+ q->producer_counter++;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+ return;
+
+drop:
+ dev_kfree_skb_any(skb);
+ goto put_new_skb;
+}
+
+static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
+{
+ return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
+}
+
+static void mlxsw_pci_cq_tasklet(unsigned long data)
+{
+ struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ char *cqe;
+ int items = 0;
+ int credits = q->count >> 1;
+
+ while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
+ u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
+ u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
+ u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
+
+ if (sendq) {
+ struct mlxsw_pci_queue *sdq;
+
+ sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
+ mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
+ wqe_counter, cqe);
+ q->u.cq.comp_sdq_count++;
+ } else {
+ struct mlxsw_pci_queue *rdq;
+
+ rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
+ mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+ wqe_counter, cqe);
+ q->u.cq.comp_rdq_count++;
+ }
+ if (++items == credits)
+ break;
+ }
+ if (items) {
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ }
+}
+
+static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int i;
+ int err;
+
+ q->consumer_counter = 0;
+
+ for (i = 0; i < q->count; i++) {
+ char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+ mlxsw_pci_eqe_owner_set(elem, 1);
+ }
+
+ mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
+ mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
+ mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
+ mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
+ }
+ err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+ struct mlxsw_pci_queue *q;
+ int i;
+ static const char hdr[] =
+ "NUM CONS_COUNT EV_CMD EV_COMP EV_OTHER COUNT\n";
+
+ seq_printf(file, hdr);
+ for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) {
+ q = mlxsw_pci_eq_get(mlxsw_pci, i);
+ spin_lock_bh(&q->lock);
+ seq_printf(file, "%3d %10d %10d %10d %10d %5d\n",
+ i, q->consumer_counter, q->u.eq.ev_cmd_count,
+ q->u.eq.ev_comp_count, q->u.eq.ev_other_count,
+ q->count);
+ spin_unlock_bh(&q->lock);
+ }
+ return 0;
+}
+
+static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
+{
+ mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
+ mlxsw_pci->cmd.comp.out_param =
+ ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
+ mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
+ mlxsw_pci->cmd.wait_done = true;
+ wake_up(&mlxsw_pci->cmd.wait);
+}
+
+static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
+{
+ return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
+}
+
+static void mlxsw_pci_eq_tasklet(unsigned long data)
+{
+ struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
+ unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
+ char *eqe;
+ u8 cqn;
+ bool cq_handle = false;
+ int items = 0;
+ int credits = q->count >> 1;
+
+ memset(&active_cqns, 0, sizeof(active_cqns));
+
+ while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
+ u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
+
+ switch (event_type) {
+ case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
+ mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
+ q->u.eq.ev_cmd_count++;
+ break;
+ case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
+ cqn = mlxsw_pci_eqe_cqn_get(eqe);
+ set_bit(cqn, active_cqns);
+ cq_handle = true;
+ q->u.eq.ev_comp_count++;
+ break;
+ default:
+ q->u.eq.ev_other_count++;
+ }
+ if (++items == credits)
+ break;
+ }
+ if (items) {
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ }
+
+ if (!cq_handle)
+ return;
+ for_each_set_bit(cqn, active_cqns, cq_count) {
+ q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
+ mlxsw_pci_queue_tasklet_schedule(q);
+ }
+}
+
+struct mlxsw_pci_queue_ops {
+ const char *name;
+ enum mlxsw_pci_queue_type type;
+ int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q);
+ void (*fini)(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q);
+ void (*tasklet)(unsigned long data);
+ int (*dbg_read)(struct seq_file *s, void *data);
+ u16 elem_count;
+ u8 elem_size;
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
+ .init = mlxsw_pci_sdq_init,
+ .fini = mlxsw_pci_sdq_fini,
+ .dbg_read = mlxsw_pci_sdq_dbg_read,
+ .elem_count = MLXSW_PCI_WQE_COUNT,
+ .elem_size = MLXSW_PCI_WQE_SIZE,
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
+ .init = mlxsw_pci_rdq_init,
+ .fini = mlxsw_pci_rdq_fini,
+ .dbg_read = mlxsw_pci_rdq_dbg_read,
+ .elem_count = MLXSW_PCI_WQE_COUNT,
+ .elem_size = MLXSW_PCI_WQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_CQ,
+ .init = mlxsw_pci_cq_init,
+ .fini = mlxsw_pci_cq_fini,
+ .tasklet = mlxsw_pci_cq_tasklet,
+ .dbg_read = mlxsw_pci_cq_dbg_read,
+ .elem_count = MLXSW_PCI_CQE_COUNT,
+ .elem_size = MLXSW_PCI_CQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
+ .type = MLXSW_PCI_QUEUE_TYPE_EQ,
+ .init = mlxsw_pci_eq_init,
+ .fini = mlxsw_pci_eq_fini,
+ .tasklet = mlxsw_pci_eq_tasklet,
+ .dbg_read = mlxsw_pci_eq_dbg_read,
+ .elem_count = MLXSW_PCI_EQE_COUNT,
+ .elem_size = MLXSW_PCI_EQE_SIZE
+};
+
+static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ const struct mlxsw_pci_queue_ops *q_ops,
+ struct mlxsw_pci_queue *q, u8 q_num)
+{
+ struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+ int i;
+ int err;
+
+ spin_lock_init(&q->lock);
+ q->num = q_num;
+ q->count = q_ops->elem_count;
+ q->elem_size = q_ops->elem_size;
+ q->type = q_ops->type;
+ q->pci = mlxsw_pci;
+
+ if (q_ops->tasklet)
+ tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
+
+ mem_item->size = MLXSW_PCI_AQ_SIZE;
+ mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+ mem_item->size,
+ &mem_item->mapaddr);
+ if (!mem_item->buf)
+ return -ENOMEM;
+ memset(mem_item->buf, 0, mem_item->size);
+
+ q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
+ if (!q->elem_info) {
+ err = -ENOMEM;
+ goto err_elem_info_alloc;
+ }
+
+ /* Initialize dma mapped elements info elem_info for
+ * future easy access.
+ */
+ for (i = 0; i < q->count; i++) {
+ struct mlxsw_pci_queue_elem_info *elem_info;
+
+ elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+ elem_info->elem =
+ __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
+ }
+
+ mlxsw_cmd_mbox_zero(mbox);
+ err = q_ops->init(mlxsw_pci, mbox, q);
+ if (err)
+ goto err_q_ops_init;
+ return 0;
+
+err_q_ops_init:
+ kfree(q->elem_info);
+err_elem_info_alloc:
+ pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+ return err;
+}
+
+static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_pci_queue_ops *q_ops,
+ struct mlxsw_pci_queue *q)
+{
+ struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+
+ q_ops->fini(mlxsw_pci, q);
+ kfree(q->elem_info);
+ pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+}
+
+static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ const struct mlxsw_pci_queue_ops *q_ops,
+ u8 num_qs)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct mlxsw_pci_queue_type_group *queue_group;
+ char tmp[16];
+ int i;
+ int err;
+
+ queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+ queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
+ if (!queue_group->q)
+ return -ENOMEM;
+
+ for (i = 0; i < num_qs; i++) {
+ err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
+ &queue_group->q[i], i);
+ if (err)
+ goto err_queue_init;
+ }
+ queue_group->count = num_qs;
+
+ sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type));
+ debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir,
+ q_ops->dbg_read);
+
+ return 0;
+
+err_queue_init:
+ for (i--; i >= 0; i--)
+ mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+ kfree(queue_group->q);
+ return err;
+}
+
+static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_pci_queue_ops *q_ops)
+{
+ struct mlxsw_pci_queue_type_group *queue_group;
+ int i;
+
+ queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+ for (i = 0; i < queue_group->count; i++)
+ mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+ kfree(queue_group->q);
+}
+
+static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ u8 num_sdqs;
+ u8 sdq_log2sz;
+ u8 num_rdqs;
+ u8 rdq_log2sz;
+ u8 num_cqs;
+ u8 cq_log2sz;
+ u8 num_eqs;
+ u8 eq_log2sz;
+ int err;
+
+ mlxsw_cmd_mbox_zero(mbox);
+ err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
+ if (err)
+ return err;
+
+ num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
+ sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
+ num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
+ rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
+ num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
+ cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
+ num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
+ eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
+
+ if (num_sdqs + num_rdqs > num_cqs ||
+ num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
+ dev_err(&pdev->dev, "Unsupported number of queues\n");
+ return -EINVAL;
+ }
+
+ if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+ (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+ (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
+ (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
+ dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
+ return -EINVAL;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
+ num_eqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize event queues\n");
+ return err;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
+ num_cqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize completion queues\n");
+ goto err_cqs_init;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
+ num_sdqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
+ goto err_sdqs_init;
+ }
+
+ err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
+ num_rdqs);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
+ goto err_rdqs_init;
+ }
+
+ /* We have to poll in command interface until queues are initialized */
+ mlxsw_pci->cmd.nopoll = true;
+ return 0;
+
+err_rdqs_init:
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+err_sdqs_init:
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+err_cqs_init:
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+ return err;
+}
+
+static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
+{
+ mlxsw_pci->cmd.nopoll = false;
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+ mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+}
+
+static void
+mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
+ char *mbox, int index,
+ const struct mlxsw_swid_config *swid)
+{
+ u8 mask = 0;
+
+ if (swid->used_type) {
+ mlxsw_cmd_mbox_config_profile_swid_config_type_set(
+ mbox, index, swid->type);
+ mask |= 1;
+ }
+ if (swid->used_properties) {
+ mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
+ mbox, index, swid->properties);
+ mask |= 2;
+ }
+ mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
+}
+
+static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ const struct mlxsw_config_profile *profile)
+{
+ int i;
+
+ mlxsw_cmd_mbox_zero(mbox);
+
+ if (profile->used_max_vepa_channels) {
+ mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
+ mbox, profile->max_vepa_channels);
+ }
+ if (profile->used_max_lag) {
+ mlxsw_cmd_mbox_config_profile_set_max_lag_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_lag_set(
+ mbox, profile->max_lag);
+ }
+ if (profile->used_max_port_per_lag) {
+ mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
+ mbox, profile->max_port_per_lag);
+ }
+ if (profile->used_max_mid) {
+ mlxsw_cmd_mbox_config_profile_set_max_mid_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_mid_set(
+ mbox, profile->max_mid);
+ }
+ if (profile->used_max_pgt) {
+ mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_pgt_set(
+ mbox, profile->max_pgt);
+ }
+ if (profile->used_max_system_port) {
+ mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_system_port_set(
+ mbox, profile->max_system_port);
+ }
+ if (profile->used_max_vlan_groups) {
+ mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
+ mbox, profile->max_vlan_groups);
+ }
+ if (profile->used_max_regions) {
+ mlxsw_cmd_mbox_config_profile_set_max_regions_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_regions_set(
+ mbox, profile->max_regions);
+ }
+ if (profile->used_flood_tables) {
+ mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
+ mbox, profile->max_flood_tables);
+ mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
+ mbox, profile->max_vid_flood_tables);
+ mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
+ mbox, profile->max_fid_offset_flood_tables);
+ mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
+ mbox, profile->fid_offset_flood_table_size);
+ mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
+ mbox, profile->max_fid_flood_tables);
+ mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
+ mbox, profile->fid_flood_table_size);
+ }
+ if (profile->used_flood_mode) {
+ mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_flood_mode_set(
+ mbox, profile->flood_mode);
+ }
+ if (profile->used_max_ib_mc) {
+ mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
+ mbox, profile->max_ib_mc);
+ }
+ if (profile->used_max_pkey) {
+ mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_pkey_set(
+ mbox, profile->max_pkey);
+ }
+ if (profile->used_ar_sec) {
+ mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_ar_sec_set(
+ mbox, profile->ar_sec);
+ }
+ if (profile->used_adaptive_routing_group_cap) {
+ mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
+ mbox, profile->adaptive_routing_group_cap);
+ }
+
+ for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
+ mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
+ &profile->swid_config[i]);
+
+ return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
+}
+
+static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+ struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
+ int err;
+
+ mlxsw_cmd_mbox_zero(mbox);
+ err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
+ if (err)
+ return err;
+ mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
+ mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
+ return 0;
+}
+
+static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ u16 num_pages)
+{
+ struct mlxsw_pci_mem_item *mem_item;
+ int nent = 0;
+ int i;
+ int err;
+
+ mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
+ GFP_KERNEL);
+ if (!mlxsw_pci->fw_area.items)
+ return -ENOMEM;
+ mlxsw_pci->fw_area.count = num_pages;
+
+ mlxsw_cmd_mbox_zero(mbox);
+ for (i = 0; i < num_pages; i++) {
+ mem_item = &mlxsw_pci->fw_area.items[i];
+
+ mem_item->size = MLXSW_PCI_PAGE_SIZE;
+ mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+ mem_item->size,
+ &mem_item->mapaddr);
+ if (!mem_item->buf) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
+ mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
+ if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
+ err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
+ if (err)
+ goto err_cmd_map_fa;
+ nent = 0;
+ mlxsw_cmd_mbox_zero(mbox);
+ }
+ }
+
+ if (nent) {
+ err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
+ if (err)
+ goto err_cmd_map_fa;
+ }
+
+ return 0;
+
+err_cmd_map_fa:
+err_alloc:
+ for (i--; i >= 0; i--) {
+ mem_item = &mlxsw_pci->fw_area.items[i];
+
+ pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+ }
+ kfree(mlxsw_pci->fw_area.items);
+ return err;
+}
+
+static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
+{
+ struct mlxsw_pci_mem_item *mem_item;
+ int i;
+
+ mlxsw_cmd_unmap_fa(mlxsw_pci->core);
+
+ for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
+ mem_item = &mlxsw_pci->fw_area.items[i];
+
+ pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
+ }
+ kfree(mlxsw_pci->fw_area.items);
+}
+
+static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
+{
+ struct mlxsw_pci *mlxsw_pci = dev_id;
+ struct mlxsw_pci_queue *q;
+ int i;
+
+ for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
+ q = mlxsw_pci_eq_get(mlxsw_pci, i);
+ mlxsw_pci_queue_tasklet_schedule(q);
+ }
+ return IRQ_HANDLED;
+}
+
+static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_mem_item *mbox)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ int err = 0;
+
+ mbox->size = MLXSW_CMD_MBOX_SIZE;
+ mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE,
+ &mbox->mapaddr);
+ if (!mbox->buf) {
+ dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
+ err = -ENOMEM;
+ }
+
+ return err;
+}
+
+static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_mem_item *mbox)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+
+ pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
+ mbox->mapaddr);
+}
+
+static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_config_profile *profile)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ char *mbox;
+ u16 num_pages;
+ int err;
+
+ mutex_init(&mlxsw_pci->cmd.lock);
+ init_waitqueue_head(&mlxsw_pci->cmd.wait);
+
+ mlxsw_pci->core = mlxsw_core;
+
+ mbox = mlxsw_cmd_mbox_alloc();
+ if (!mbox)
+ return -ENOMEM;
+
+ err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+ if (err)
+ goto mbox_put;
+
+ err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+ if (err)
+ goto err_out_mbox_alloc;
+
+ err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
+ if (err)
+ goto err_query_fw;
+
+ mlxsw_pci->bus_info.fw_rev.major =
+ mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
+ mlxsw_pci->bus_info.fw_rev.minor =
+ mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
+ mlxsw_pci->bus_info.fw_rev.subminor =
+ mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
+
+ if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
+ dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
+ err = -EINVAL;
+ goto err_iface_rev;
+ }
+ if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
+ err = -EINVAL;
+ goto err_doorbell_page_bar;
+ }
+
+ mlxsw_pci->doorbell_offset =
+ mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
+
+ num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
+ err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
+ if (err)
+ goto err_fw_area_init;
+
+ err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
+ if (err)
+ goto err_boardinfo;
+
+ err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
+ if (err)
+ goto err_config_profile;
+
+ err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
+ if (err)
+ goto err_aqs_init;
+
+ err = request_irq(mlxsw_pci->msix_entry.vector,
+ mlxsw_pci_eq_irq_handler, 0,
+ mlxsw_pci_driver_name, mlxsw_pci);
+ if (err) {
+ dev_err(&pdev->dev, "IRQ request failed\n");
+ goto err_request_eq_irq;
+ }
+
+ goto mbox_put;
+
+err_request_eq_irq:
+ mlxsw_pci_aqs_fini(mlxsw_pci);
+err_aqs_init:
+err_config_profile:
+err_boardinfo:
+ mlxsw_pci_fw_area_fini(mlxsw_pci);
+err_fw_area_init:
+err_doorbell_page_bar:
+err_iface_rev:
+err_query_fw:
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+err_out_mbox_alloc:
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+mbox_put:
+ mlxsw_cmd_mbox_free(mbox);
+ return err;
+}
+
+static void mlxsw_pci_fini(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci);
+ mlxsw_pci_aqs_fini(mlxsw_pci);
+ mlxsw_pci_fw_area_fini(mlxsw_pci);
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+}
+
+static struct mlxsw_pci_queue *
+mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_tx_info *tx_info)
+{
+ u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
+
+ return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
+}
+
+static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+
+ return !mlxsw_pci_queue_elem_info_producer_get(q);
+}
+
+static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ struct mlxsw_pci_queue *q;
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ char *wqe;
+ int i;
+ int err;
+
+ if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
+ err = skb_linearize(skb);
+ if (err)
+ return err;
+ }
+
+ q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+ spin_lock_bh(&q->lock);
+ elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+ if (!elem_info) {
+ /* queue is full */
+ err = -EAGAIN;
+ goto unlock;
+ }
+ elem_info->u.sdq.skb = skb;
+
+ wqe = elem_info->elem;
+ mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
+ mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
+ mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
+
+ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (err)
+ goto unlock;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
+ skb_frag_address(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (err)
+ goto unmap_frags;
+ }
+
+ /* Set unused sq entries byte count to zero. */
+ for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+ mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
+
+ /* Everything is set up, ring producer doorbell to get HW going */
+ q->producer_counter++;
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+ goto unlock;
+
+unmap_frags:
+ for (; i >= 0; i--)
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+unlock:
+ spin_unlock_bh(&q->lock);
+ return err;
+}
+
+static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
+ u32 in_mod, bool out_mbox_direct,
+ char *in_mbox, size_t in_mbox_size,
+ char *out_mbox, size_t out_mbox_size,
+ u8 *p_status)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+ dma_addr_t in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
+ dma_addr_t out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
+ bool evreq = mlxsw_pci->cmd.nopoll;
+ unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
+ bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
+ int err;
+
+ *p_status = MLXSW_CMD_STATUS_OK;
+
+ err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
+ if (err)
+ return err;
+
+ if (in_mbox)
+ memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
+ mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
+ mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
+
+ mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
+ mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
+
+ mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
+ mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
+
+ *p_wait_done = false;
+
+ wmb(); /* all needs to be written before we write control register */
+ mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
+ MLXSW_PCI_CIR_CTRL_GO_BIT |
+ (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
+ (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
+ opcode);
+
+ if (!evreq) {
+ unsigned long end;
+
+ end = jiffies + timeout;
+ do {
+ u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
+
+ if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
+ *p_wait_done = true;
+ *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
+ break;
+ }
+ cond_resched();
+ } while (time_before(jiffies, end));
+ } else {
+ wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
+ *p_status = mlxsw_pci->cmd.comp.status;
+ }
+
+ err = 0;
+ if (*p_wait_done) {
+ if (*p_status)
+ err = -EIO;
+ } else {
+ err = -ETIMEDOUT;
+ }
+
+ if (!err && out_mbox && out_mbox_direct) {
+ /* Some commands don't use output param as address to mailbox
+ * but they store output directly into registers. In that case,
+ * copy registers into mbox buffer.
+ */
+ __be32 tmp;
+
+ if (!evreq) {
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_HI));
+ memcpy(out_mbox, &tmp, sizeof(tmp));
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_LO));
+ memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
+ }
+ } else if (!err && out_mbox) {
+ memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
+ }
+
+ mutex_unlock(&mlxsw_pci->cmd.lock);
+
+ return err;
+}
+
+static const struct mlxsw_bus mlxsw_pci_bus = {
+ .kind = "pci",
+ .init = mlxsw_pci_init,
+ .fini = mlxsw_pci_fini,
+ .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
+ .skb_transmit = mlxsw_pci_skb_transmit,
+ .cmd_exec = mlxsw_pci_cmd_exec,
+};
+
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
+{
+ mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
+ /* Current firware does not let us know when the reset is done.
+ * So we just wait here for constant time and hope for the best.
+ */
+ msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+ return 0;
+}
+
+static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mlxsw_pci *mlxsw_pci;
+ int err;
+
+ mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
+ if (!mlxsw_pci)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ goto err_pci_enable_device;
+ }
+
+ err = pci_request_regions(pdev, mlxsw_pci_driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
+ goto err_pci_request_regions;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
+ goto err_pci_set_dma_mask;
+ }
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+ goto err_pci_set_dma_mask;
+ }
+ }
+
+ if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
+ dev_err(&pdev->dev, "invalid PCI region size\n");
+ err = -EINVAL;
+ goto err_pci_resource_len_check;
+ }
+
+ mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!mlxsw_pci->hw_addr) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+ pci_set_master(pdev);
+
+ mlxsw_pci->pdev = pdev;
+ pci_set_drvdata(pdev, mlxsw_pci);
+
+ err = mlxsw_pci_sw_reset(mlxsw_pci);
+ if (err) {
+ dev_err(&pdev->dev, "Software reset failed\n");
+ goto err_sw_reset;
+ }
+
+ err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1);
+ if (err) {
+ dev_err(&pdev->dev, "MSI-X init failed\n");
+ goto err_msix_init;
+ }
+
+ mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id);
+ mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
+ mlxsw_pci->bus_info.dev = &pdev->dev;
+
+ mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name,
+ mlxsw_pci_dbg_root);
+ if (!mlxsw_pci->dbg_dir) {
+ dev_err(&pdev->dev, "Failed to create debugfs dir\n");
+ err = -ENOMEM;
+ goto err_dbg_create_dir;
+ }
+
+ err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
+ &mlxsw_pci_bus, mlxsw_pci);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register bus device\n");
+ goto err_bus_device_register;
+ }
+
+ return 0;
+
+err_bus_device_register:
+ debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+err_dbg_create_dir:
+ pci_disable_msix(mlxsw_pci->pdev);
+err_msix_init:
+err_sw_reset:
+ iounmap(mlxsw_pci->hw_addr);
+err_ioremap:
+err_pci_resource_len_check:
+err_pci_set_dma_mask:
+ pci_release_regions(pdev);
+err_pci_request_regions:
+ pci_disable_device(pdev);
+err_pci_enable_device:
+ kfree(mlxsw_pci);
+ return err;
+}
+
+static void mlxsw_pci_remove(struct pci_dev *pdev)
+{
+ struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
+
+ mlxsw_core_bus_device_unregister(mlxsw_pci->core);
+ debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+ pci_disable_msix(mlxsw_pci->pdev);
+ iounmap(mlxsw_pci->hw_addr);
+ pci_release_regions(mlxsw_pci->pdev);
+ pci_disable_device(mlxsw_pci->pdev);
+ kfree(mlxsw_pci);
+}
+
+static struct pci_driver mlxsw_pci_driver = {
+ .name = mlxsw_pci_driver_name,
+ .id_table = mlxsw_pci_id_table,
+ .probe = mlxsw_pci_probe,
+ .remove = mlxsw_pci_remove,
+};
+
+static int __init mlxsw_pci_module_init(void)
+{
+ int err;
+
+ mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL);
+ if (!mlxsw_pci_dbg_root)
+ return -ENOMEM;
+ err = pci_register_driver(&mlxsw_pci_driver);
+ if (err)
+ goto err_register_driver;
+ return 0;
+
+err_register_driver:
+ debugfs_remove_recursive(mlxsw_pci_dbg_root);
+ return err;
+}
+
+static void __exit mlxsw_pci_module_exit(void)
+{
+ pci_unregister_driver(&mlxsw_pci_driver);
+ debugfs_remove_recursive(mlxsw_pci_dbg_root);
+}
+
+module_init(mlxsw_pci_module_init);
+module_exit(mlxsw_pci_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
+MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
new file mode 100644
index 000000000000..142f33d978c5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -0,0 +1,226 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_PCI_H
+#define _MLXSW_PCI_H
+
+#include <linux/bitops.h>
+
+#include "item.h"
+
+#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84
+#define MLXSW_PCI_BAR0_SIZE (1024 * 1024) /* 1MB */
+#define MLXSW_PCI_PAGE_SIZE 4096
+
+#define MLXSW_PCI_CIR_BASE 0x71000
+#define MLXSW_PCI_CIR_IN_PARAM_HI MLXSW_PCI_CIR_BASE
+#define MLXSW_PCI_CIR_IN_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x04)
+#define MLXSW_PCI_CIR_IN_MODIFIER (MLXSW_PCI_CIR_BASE + 0x08)
+#define MLXSW_PCI_CIR_OUT_PARAM_HI (MLXSW_PCI_CIR_BASE + 0x0C)
+#define MLXSW_PCI_CIR_OUT_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x10)
+#define MLXSW_PCI_CIR_TOKEN (MLXSW_PCI_CIR_BASE + 0x14)
+#define MLXSW_PCI_CIR_CTRL (MLXSW_PCI_CIR_BASE + 0x18)
+#define MLXSW_PCI_CIR_CTRL_GO_BIT BIT(23)
+#define MLXSW_PCI_CIR_CTRL_EVREQ_BIT BIT(22)
+#define MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT 12
+#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT 24
+#define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000
+
+#define MLXSW_PCI_SW_RESET 0xF0010
+#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
+
+#define MLXSW_PCI_DOORBELL_SDQ_OFFSET 0x000
+#define MLXSW_PCI_DOORBELL_RDQ_OFFSET 0x200
+#define MLXSW_PCI_DOORBELL_CQ_OFFSET 0x400
+#define MLXSW_PCI_DOORBELL_EQ_OFFSET 0x600
+#define MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET 0x800
+#define MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET 0xA00
+
+#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \
+ ((offset) + (type_offset) + (num) * 4)
+
+#define MLXSW_PCI_CQS_MAX 96
+#define MLXSW_PCI_EQS_COUNT 2
+#define MLXSW_PCI_EQ_ASYNC_NUM 0
+#define MLXSW_PCI_EQ_COMP_NUM 1
+
+#define MLXSW_PCI_AQ_PAGES 8
+#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
+#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
+#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */
+#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
+#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
+#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE)
+#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
+#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80
+
+#define MLXSW_PCI_WQE_SG_ENTRIES 3
+#define MLXSW_PCI_WQE_TYPE_ETHERNET 0xA
+
+/* pci_wqe_c
+ * If set it indicates that a completion should be reported upon
+ * execution of this descriptor.
+ */
+MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1);
+
+/* pci_wqe_lp
+ * Local Processing, set if packet should be processed by the local
+ * switch hardware:
+ * For Ethernet EMAD (Direct Route and non Direct Route) -
+ * must be set if packet destination is local device
+ * For InfiniBand CTL - must be set if packet destination is local device
+ * Otherwise it must be clear
+ * Local Process packets must not exceed the size of 2K (including payload
+ * and headers).
+ */
+MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
+
+/* pci_wqe_type
+ * Packet type.
+ */
+MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
+
+/* pci_wqe_byte_count
+ * Size of i-th scatter/gather entry, 0 if entry is unused.
+ */
+MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
+
+/* pci_wqe_address
+ * Physical address of i-th scatter/gather entry.
+ * Gather Entries must be 2Byte aligned.
+ */
+MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
+
+/* pci_cqe_lag
+ * Packet arrives from a port which is a LAG
+ */
+MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
+
+/* pci_cqe_system_port
+ * When lag=0: System port on which the packet was received
+ * When lag=1:
+ * bits [15:4] LAG ID on which the packet was received
+ * bits [3:0] sub_port on which the packet was received
+ */
+MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
+
+/* pci_cqe_wqe_counter
+ * WQE count of the WQEs completed on the associated dqn
+ */
+MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
+
+/* pci_cqe_byte_count
+ * Byte count of received packets including additional two
+ * Reserved Bytes that are append to the end of the frame.
+ * Reserved for Send CQE.
+ */
+MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
+
+/* pci_cqe_trap_id
+ * Trap ID that captured the packet.
+ */
+MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8);
+
+/* pci_cqe_crc
+ * Length include CRC. Indicates the length field includes
+ * the packet's CRC.
+ */
+MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1);
+
+/* pci_cqe_e
+ * CQE with Error.
+ */
+MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1);
+
+/* pci_cqe_sr
+ * 1 - Send Queue
+ * 0 - Receive Queue
+ */
+MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1);
+
+/* pci_cqe_dqn
+ * Descriptor Queue (DQ) Number.
+ */
+MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5);
+
+/* pci_cqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_event_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8);
+#define MLXSW_PCI_EQE_EVENT_TYPE_COMP 0x00
+#define MLXSW_PCI_EQE_EVENT_TYPE_CMD 0x0A
+
+/* pci_eqe_event_sub_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8);
+
+/* pci_eqe_cqn
+ * Completion Queue that triggeret this EQE.
+ */
+MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7);
+
+/* pci_eqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_cmd_token
+ * Command completion event - token
+ */
+MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
+
+/* pci_eqe_cmd_status
+ * Command completion event - status
+ */
+MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
+
+/* pci_eqe_cmd_out_param_h
+ * Command completion event - output parameter - higher part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
+
+/* pci_eqe_cmd_out_param_l
+ * Command completion event - output parameter - lower part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
new file mode 100644
index 000000000000..726f5435b32f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -0,0 +1,75 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/port.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_PORT_H
+#define _MLXSW_PORT_H
+
+#include <linux/types.h>
+
+#define MLXSW_PORT_MAX_MTU 10000
+
+#define MLXSW_PORT_DEFAULT_VID 1
+
+#define MLXSW_PORT_SWID_DISABLED_PORT 255
+#define MLXSW_PORT_SWID_ALL_SWIDS 254
+#define MLXSW_PORT_SWID_TYPE_ETH 2
+
+#define MLXSW_PORT_MID 0xd000
+
+#define MLXSW_PORT_MAX_PHY_PORTS 0x40
+#define MLXSW_PORT_MAX_PORTS MLXSW_PORT_MAX_PHY_PORTS
+
+#define MLXSW_PORT_DEVID_BITS_OFFSET 10
+#define MLXSW_PORT_PHY_BITS_OFFSET 4
+#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1)
+
+#define MLXSW_PORT_CPU_PORT 0x0
+
+#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS)
+
+enum mlxsw_port_admin_status {
+ MLXSW_PORT_ADMIN_STATUS_UP = 1,
+ MLXSW_PORT_ADMIN_STATUS_DOWN = 2,
+ MLXSW_PORT_ADMIN_STATUS_UP_ONCE = 3,
+ MLXSW_PORT_ADMIN_STATUS_DISABLED = 4,
+};
+
+enum mlxsw_reg_pude_oper_status {
+ MLXSW_PORT_OPER_STATUS_UP = 1,
+ MLXSW_PORT_OPER_STATUS_DOWN = 2,
+ MLXSW_PORT_OPER_STATUS_FAILURE = 4, /* Can be set to up again. */
+};
+
+#endif /* _MLXSW_PORT_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
new file mode 100644
index 000000000000..236fb5d2ad69
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -0,0 +1,2460 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/reg.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_REG_H
+#define _MLXSW_REG_H
+
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+#include "item.h"
+#include "port.h"
+
+struct mlxsw_reg_info {
+ u16 id;
+ u16 len; /* In u8 */
+};
+
+#define MLXSW_REG(type) (&mlxsw_reg_##type)
+#define MLXSW_REG_LEN(type) MLXSW_REG(type)->len
+#define MLXSW_REG_ZERO(type, payload) memset(payload, 0, MLXSW_REG(type)->len)
+
+/* SGCR - Switch General Configuration Register
+ * --------------------------------------------
+ * This register is used for configuration of the switch capabilities.
+ */
+#define MLXSW_REG_SGCR_ID 0x2000
+#define MLXSW_REG_SGCR_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sgcr = {
+ .id = MLXSW_REG_SGCR_ID,
+ .len = MLXSW_REG_SGCR_LEN,
+};
+
+/* reg_sgcr_llb
+ * Link Local Broadcast (Default=0)
+ * When set, all Link Local packets (224.0.0.X) will be treated as broadcast
+ * packets and ignore the IGMP snooping entries.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sgcr, llb, 0x04, 0, 1);
+
+static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb)
+{
+ MLXSW_REG_ZERO(sgcr, payload);
+ mlxsw_reg_sgcr_llb_set(payload, !!llb);
+}
+
+/* SPAD - Switch Physical Address Register
+ * ---------------------------------------
+ * The SPAD register configures the switch physical MAC address.
+ */
+#define MLXSW_REG_SPAD_ID 0x2002
+#define MLXSW_REG_SPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_spad = {
+ .id = MLXSW_REG_SPAD_ID,
+ .len = MLXSW_REG_SPAD_LEN,
+};
+
+/* reg_spad_base_mac
+ * Base MAC address for the switch partitions.
+ * Per switch partition MAC address is equal to:
+ * base_mac + swid
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
+
+/* SSPR - Switch System Port Record Register
+ * -----------------------------------------
+ * Configures the system port to local port mapping.
+ */
+#define MLXSW_REG_SSPR_ID 0x2008
+#define MLXSW_REG_SSPR_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_sspr = {
+ .id = MLXSW_REG_SSPR_ID,
+ .len = MLXSW_REG_SSPR_LEN,
+};
+
+/* reg_sspr_m
+ * Master - if set, then the record describes the master system port.
+ * This is needed in case a local port is mapped into several system ports
+ * (for multipathing). That number will be reported as the source system
+ * port when packets are forwarded to the CPU. Only one master port is allowed
+ * per local port.
+ *
+ * Note: Must be set for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1);
+
+/* reg_sspr_local_port
+ * Local port number.
+ *
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, local_port, 0x00, 16, 8);
+
+/* reg_sspr_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ *
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8);
+
+/* reg_sspr_system_port
+ * Unique identifier within the stacking domain that represents all the ports
+ * that are available in the system (external ports).
+ *
+ * Currently, only single-ASIC configurations are supported, so we default to
+ * 1:1 mapping between system ports and local ports.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sspr, system_port, 0x04, 0, 16);
+
+static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(sspr, payload);
+ mlxsw_reg_sspr_m_set(payload, 1);
+ mlxsw_reg_sspr_local_port_set(payload, local_port);
+ mlxsw_reg_sspr_sub_port_set(payload, 0);
+ mlxsw_reg_sspr_system_port_set(payload, local_port);
+}
+
+/* SFDAT - Switch Filtering Database Aging Time
+ * --------------------------------------------
+ * Controls the Switch aging time. Aging time is able to be set per Switch
+ * Partition.
+ */
+#define MLXSW_REG_SFDAT_ID 0x2009
+#define MLXSW_REG_SFDAT_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_sfdat = {
+ .id = MLXSW_REG_SFDAT_ID,
+ .len = MLXSW_REG_SFDAT_LEN,
+};
+
+/* reg_sfdat_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfdat, swid, 0x00, 24, 8);
+
+/* reg_sfdat_age_time
+ * Aging time in seconds
+ * Min - 10 seconds
+ * Max - 1,000,000 seconds
+ * Default is 300 seconds.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdat, age_time, 0x04, 0, 20);
+
+static inline void mlxsw_reg_sfdat_pack(char *payload, u32 age_time)
+{
+ MLXSW_REG_ZERO(sfdat, payload);
+ mlxsw_reg_sfdat_swid_set(payload, 0);
+ mlxsw_reg_sfdat_age_time_set(payload, age_time);
+}
+
+/* SFD - Switch Filtering Database
+ * -------------------------------
+ * The following register defines the access to the filtering database.
+ * The register supports querying, adding, removing and modifying the database.
+ * The access is optimized for bulk updates in which case more than one
+ * FDB record is present in the same command.
+ */
+#define MLXSW_REG_SFD_ID 0x200A
+#define MLXSW_REG_SFD_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_SFD_REC_LEN 0x10 /* record length */
+#define MLXSW_REG_SFD_REC_MAX_COUNT 64
+#define MLXSW_REG_SFD_LEN (MLXSW_REG_SFD_BASE_LEN + \
+ MLXSW_REG_SFD_REC_LEN * MLXSW_REG_SFD_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_sfd = {
+ .id = MLXSW_REG_SFD_ID,
+ .len = MLXSW_REG_SFD_LEN,
+};
+
+/* reg_sfd_swid
+ * Switch partition ID for queries. Reserved on Write.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfd, swid, 0x00, 24, 8);
+
+enum mlxsw_reg_sfd_op {
+ /* Dump entire FDB a (process according to record_locator) */
+ MLXSW_REG_SFD_OP_QUERY_DUMP = 0,
+ /* Query records by {MAC, VID/FID} value */
+ MLXSW_REG_SFD_OP_QUERY_QUERY = 1,
+ /* Query and clear activity. Query records by {MAC, VID/FID} value */
+ MLXSW_REG_SFD_OP_QUERY_QUERY_AND_CLEAR_ACTIVITY = 2,
+ /* Test. Response indicates if each of the records could be
+ * added to the FDB.
+ */
+ MLXSW_REG_SFD_OP_WRITE_TEST = 0,
+ /* Add/modify. Aged-out records cannot be added. This command removes
+ * the learning notification of the {MAC, VID/FID}. Response includes
+ * the entries that were added to the FDB.
+ */
+ MLXSW_REG_SFD_OP_WRITE_EDIT = 1,
+ /* Remove record by {MAC, VID/FID}. This command also removes
+ * the learning notification and aged-out notifications
+ * of the {MAC, VID/FID}. The response provides current (pre-removal)
+ * entries as non-aged-out.
+ */
+ MLXSW_REG_SFD_OP_WRITE_REMOVE = 2,
+ /* Remove learned notification by {MAC, VID/FID}. The response provides
+ * the removed learning notification.
+ */
+ MLXSW_REG_SFD_OP_WRITE_REMOVE_NOTIFICATION = 2,
+};
+
+/* reg_sfd_op
+ * Operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sfd, op, 0x04, 30, 2);
+
+/* reg_sfd_record_locator
+ * Used for querying the FDB. Use record_locator=0 to initiate the
+ * query. When a record is returned, a new record_locator is
+ * returned to be used in the subsequent query.
+ * Reserved for database update.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfd, record_locator, 0x04, 0, 30);
+
+/* reg_sfd_num_rec
+ * Request: Number of records to read/add/modify/remove
+ * Response: Number of records read/added/replaced/removed
+ * See above description for more details.
+ * Ranges 0..64
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfd, num_rec, 0x08, 0, 8);
+
+static inline void mlxsw_reg_sfd_pack(char *payload, enum mlxsw_reg_sfd_op op,
+ u32 record_locator)
+{
+ MLXSW_REG_ZERO(sfd, payload);
+ mlxsw_reg_sfd_op_set(payload, op);
+ mlxsw_reg_sfd_record_locator_set(payload, record_locator);
+}
+
+/* reg_sfd_rec_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_swid, MLXSW_REG_SFD_BASE_LEN, 24, 8,
+ MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfd_rec_type {
+ MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0,
+};
+
+/* reg_sfd_rec_type
+ * FDB record type.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_type, MLXSW_REG_SFD_BASE_LEN, 20, 4,
+ MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfd_rec_policy {
+ /* Replacement disabled, aging disabled. */
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY = 0,
+ /* (mlag remote): Replacement enabled, aging disabled,
+ * learning notification enabled on this port.
+ */
+ MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG = 1,
+ /* (ingress device): Replacement enabled, aging enabled. */
+ MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS = 3,
+};
+
+/* reg_sfd_rec_policy
+ * Policy.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_policy, MLXSW_REG_SFD_BASE_LEN, 18, 2,
+ MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+/* reg_sfd_rec_a
+ * Activity. Set for new static entries. Set for static entries if a frame SMAC
+ * lookup hits on the entry.
+ * To clear the a bit, use "query and clear activity" op.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_a, MLXSW_REG_SFD_BASE_LEN, 16, 1,
+ MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+/* reg_sfd_rec_mac
+ * MAC address.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF_INDEXED(reg, sfd, rec_mac, MLXSW_REG_SFD_BASE_LEN, 6,
+ MLXSW_REG_SFD_REC_LEN, 0x02);
+
+enum mlxsw_reg_sfd_rec_action {
+ /* forward */
+ MLXSW_REG_SFD_REC_ACTION_NOP = 0,
+ /* forward and trap, trap_id is FDB_TRAP */
+ MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1,
+ /* trap and do not forward, trap_id is FDB_TRAP */
+ MLXSW_REG_SFD_REC_ACTION_TRAP = 3,
+ MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15,
+};
+
+/* reg_sfd_rec_action
+ * Action to apply on the packet.
+ * Note: Dynamic entries can only be configured with NOP action.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4,
+ MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+/* reg_sfd_uc_sub_port
+ * VEPA channel on local port.
+ * Valid only if local port is a non-stacking port. Must be 0 if multichannel
+ * VEPA is not enabled.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_fid_vid
+ * Filtering ID or VLAN ID
+ * For SwitchX and SwitchX-2:
+ * - Dynamic entries (policy 2,3) use FID
+ * - Static entries (policy 0) use VID
+ * - When independent learning is configured, VID=FID
+ * For Spectrum: use FID for both Dynamic and Static entries.
+ * VID should not be used.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_system_port
+ * Unique port identifier for the final destination of the packet.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_system_port, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+ MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
+ enum mlxsw_reg_sfd_rec_policy policy,
+ const char *mac, u16 vid,
+ enum mlxsw_reg_sfd_rec_action action,
+ u8 local_port)
+{
+ u8 num_rec = mlxsw_reg_sfd_num_rec_get(payload);
+
+ if (rec_index >= num_rec)
+ mlxsw_reg_sfd_num_rec_set(payload, rec_index + 1);
+ mlxsw_reg_sfd_rec_swid_set(payload, rec_index, 0);
+ mlxsw_reg_sfd_rec_type_set(payload, rec_index,
+ MLXSW_REG_SFD_REC_TYPE_UNICAST);
+ mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
+ mlxsw_reg_sfd_rec_mac_memcpy_to(payload, rec_index, mac);
+ mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0);
+ mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, vid);
+ mlxsw_reg_sfd_rec_action_set(payload, rec_index, action);
+ mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
+}
+
+static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
+ char *mac, u16 *p_vid,
+ u8 *p_local_port)
+{
+ mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
+ *p_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index);
+ *p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index);
+}
+
+/* SFN - Switch FDB Notification Register
+ * -------------------------------------------
+ * The switch provides notifications on newly learned FDB entries and
+ * aged out entries. The notifications can be polled by software.
+ */
+#define MLXSW_REG_SFN_ID 0x200B
+#define MLXSW_REG_SFN_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_SFN_REC_LEN 0x10 /* record length */
+#define MLXSW_REG_SFN_REC_MAX_COUNT 64
+#define MLXSW_REG_SFN_LEN (MLXSW_REG_SFN_BASE_LEN + \
+ MLXSW_REG_SFN_REC_LEN * MLXSW_REG_SFN_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_sfn = {
+ .id = MLXSW_REG_SFN_ID,
+ .len = MLXSW_REG_SFN_LEN,
+};
+
+/* reg_sfn_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfn, swid, 0x00, 24, 8);
+
+/* reg_sfn_num_rec
+ * Request: Number of learned notifications and aged-out notification
+ * records requested.
+ * Response: Number of notification records returned (must be smaller
+ * than or equal to the value requested)
+ * Ranges 0..64
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sfn, num_rec, 0x04, 0, 8);
+
+static inline void mlxsw_reg_sfn_pack(char *payload)
+{
+ MLXSW_REG_ZERO(sfn, payload);
+ mlxsw_reg_sfn_swid_set(payload, 0);
+ mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT);
+}
+
+/* reg_sfn_rec_swid
+ * Switch partition ID.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, rec_swid, MLXSW_REG_SFN_BASE_LEN, 24, 8,
+ MLXSW_REG_SFN_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfn_rec_type {
+ /* MAC addresses learned on a regular port. */
+ MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC = 0x5,
+ /* Aged-out MAC address on a regular port */
+ MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7,
+};
+
+/* reg_sfn_rec_type
+ * Notification record type.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, rec_type, MLXSW_REG_SFN_BASE_LEN, 20, 4,
+ MLXSW_REG_SFN_REC_LEN, 0x00, false);
+
+/* reg_sfn_rec_mac
+ * MAC address.
+ * Access: RO
+ */
+MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6,
+ MLXSW_REG_SFN_REC_LEN, 0x02);
+
+/* reg_sfn_mac_sub_port
+ * VEPA channel on the local port.
+ * 0 if multichannel VEPA is not enabled.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_sub_port, MLXSW_REG_SFN_BASE_LEN, 16, 8,
+ MLXSW_REG_SFN_REC_LEN, 0x08, false);
+
+/* reg_sfn_mac_fid
+ * Filtering identifier.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_fid, MLXSW_REG_SFN_BASE_LEN, 0, 16,
+ MLXSW_REG_SFN_REC_LEN, 0x08, false);
+
+/* reg_sfn_mac_system_port
+ * Unique port identifier for the final destination of the packet.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_system_port, MLXSW_REG_SFN_BASE_LEN, 0, 16,
+ MLXSW_REG_SFN_REC_LEN, 0x0C, false);
+
+static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index,
+ char *mac, u16 *p_vid,
+ u8 *p_local_port)
+{
+ mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
+ *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
+ *p_local_port = mlxsw_reg_sfn_mac_system_port_get(payload, rec_index);
+}
+
+/* SPMS - Switch Port MSTP/RSTP State Register
+ * -------------------------------------------
+ * Configures the spanning tree state of a physical port.
+ */
+#define MLXSW_REG_SPMS_ID 0x200D
+#define MLXSW_REG_SPMS_LEN 0x404
+
+static const struct mlxsw_reg_info mlxsw_reg_spms = {
+ .id = MLXSW_REG_SPMS_ID,
+ .len = MLXSW_REG_SPMS_LEN,
+};
+
+/* reg_spms_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_spms_state {
+ MLXSW_REG_SPMS_STATE_NO_CHANGE,
+ MLXSW_REG_SPMS_STATE_DISCARDING,
+ MLXSW_REG_SPMS_STATE_LEARNING,
+ MLXSW_REG_SPMS_STATE_FORWARDING,
+};
+
+/* reg_spms_state
+ * Spanning tree state of each VLAN ID (VID) of the local port.
+ * 0 - Do not change spanning tree state (used only when writing).
+ * 1 - Discarding. No learning or forwarding to/from this port (default).
+ * 2 - Learning. Port is learning, but not forwarding.
+ * 3 - Forwarding. Port is learning and forwarding.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
+
+static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(spms, payload);
+ mlxsw_reg_spms_local_port_set(payload, local_port);
+}
+
+static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid,
+ enum mlxsw_reg_spms_state state)
+{
+ mlxsw_reg_spms_state_set(payload, vid, state);
+}
+
+/* SPVID - Switch Port VID
+ * -----------------------
+ * The switch port VID configures the default VID for a port.
+ */
+#define MLXSW_REG_SPVID_ID 0x200E
+#define MLXSW_REG_SPVID_LEN 0x08
+
+static const struct mlxsw_reg_info mlxsw_reg_spvid = {
+ .id = MLXSW_REG_SPVID_ID,
+ .len = MLXSW_REG_SPVID_LEN,
+};
+
+/* reg_spvid_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8);
+
+/* reg_spvid_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8);
+
+/* reg_spvid_pvid
+ * Port default VID
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12);
+
+static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
+{
+ MLXSW_REG_ZERO(spvid, payload);
+ mlxsw_reg_spvid_local_port_set(payload, local_port);
+ mlxsw_reg_spvid_pvid_set(payload, pvid);
+}
+
+/* SPVM - Switch Port VLAN Membership
+ * ----------------------------------
+ * The Switch Port VLAN Membership register configures the VLAN membership
+ * of a port in a VLAN denoted by VID. VLAN membership is managed per
+ * virtual port. The register can be used to add and remove VID(s) from a port.
+ */
+#define MLXSW_REG_SPVM_ID 0x200F
+#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
+#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_SPVM_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \
+ MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_spvm = {
+ .id = MLXSW_REG_SPVM_ID,
+ .len = MLXSW_REG_SPVM_LEN,
+};
+
+/* reg_spvm_pt
+ * Priority tagged. If this bit is set, packets forwarded to the port with
+ * untagged VLAN membership (u bit is set) will be tagged with priority tag
+ * (VID=0)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvm, pt, 0x00, 31, 1);
+
+/* reg_spvm_pte
+ * Priority Tagged Update Enable. On Write operations, if this bit is cleared,
+ * the pt bit will NOT be updated. To update the pt bit, pte must be set.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, spvm, pte, 0x00, 30, 1);
+
+/* reg_spvm_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvm, local_port, 0x00, 16, 8);
+
+/* reg_spvm_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvm, sub_port, 0x00, 8, 8);
+
+/* reg_spvm_num_rec
+ * Number of records to update. Each record contains: i, e, u, vid.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvm, num_rec, 0x00, 0, 8);
+
+/* reg_spvm_rec_i
+ * Ingress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_i,
+ MLXSW_REG_SPVM_BASE_LEN, 14, 1,
+ MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_e
+ * Egress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_e,
+ MLXSW_REG_SPVM_BASE_LEN, 13, 1,
+ MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_u
+ * Untagged - port is an untagged member - egress transmission uses untagged
+ * frames on VID<n>
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_u,
+ MLXSW_REG_SPVM_BASE_LEN, 12, 1,
+ MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_vid
+ * Egress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_vid,
+ MLXSW_REG_SPVM_BASE_LEN, 0, 12,
+ MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port,
+ u16 vid_begin, u16 vid_end,
+ bool is_member, bool untagged)
+{
+ int size = vid_end - vid_begin + 1;
+ int i;
+
+ MLXSW_REG_ZERO(spvm, payload);
+ mlxsw_reg_spvm_local_port_set(payload, local_port);
+ mlxsw_reg_spvm_num_rec_set(payload, size);
+
+ for (i = 0; i < size; i++) {
+ mlxsw_reg_spvm_rec_i_set(payload, i, is_member);
+ mlxsw_reg_spvm_rec_e_set(payload, i, is_member);
+ mlxsw_reg_spvm_rec_u_set(payload, i, untagged);
+ mlxsw_reg_spvm_rec_vid_set(payload, i, vid_begin + i);
+ }
+}
+
+/* SFGC - Switch Flooding Group Configuration
+ * ------------------------------------------
+ * The following register controls the association of flooding tables and MIDs
+ * to packet types used for flooding.
+ */
+#define MLXSW_REG_SFGC_ID 0x2011
+#define MLXSW_REG_SFGC_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
+ .id = MLXSW_REG_SFGC_ID,
+ .len = MLXSW_REG_SFGC_LEN,
+};
+
+enum mlxsw_reg_sfgc_type {
+ MLXSW_REG_SFGC_TYPE_BROADCAST,
+ MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
+ MLXSW_REG_SFGC_TYPE_RESERVED,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
+ MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL,
+ MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST,
+ MLXSW_REG_SFGC_TYPE_MAX,
+};
+
+/* reg_sfgc_type
+ * The traffic type to reach the flooding table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
+
+enum mlxsw_reg_sfgc_bridge_type {
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1,
+};
+
+/* reg_sfgc_bridge_type
+ * Access: Index
+ *
+ * Note: SwitchX-2 only supports 802.1Q mode.
+ */
+MLXSW_ITEM32(reg, sfgc, bridge_type, 0x04, 24, 3);
+
+enum mlxsw_flood_table_type {
+ MLXSW_REG_SFGC_TABLE_TYPE_VID = 1,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE = 2,
+ MLXSW_REG_SFGC_TABLE_TYPE_ANY = 0,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST = 3,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID = 4,
+};
+
+/* reg_sfgc_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ *
+ * Note: FID offset and FID types are not supported in SwitchX-2.
+ */
+MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3);
+
+/* reg_sfgc_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6);
+
+/* reg_sfgc_mid
+ * The multicast ID for the swid. Not supported for Spectrum
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, mid, 0x08, 0, 16);
+
+/* reg_sfgc_counter_set_type
+ * Counter Set Type for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8);
+
+/* reg_sfgc_counter_index
+ * Counter Index for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24);
+
+static inline void
+mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
+ enum mlxsw_reg_sfgc_bridge_type bridge_type,
+ enum mlxsw_flood_table_type table_type,
+ unsigned int flood_table)
+{
+ MLXSW_REG_ZERO(sfgc, payload);
+ mlxsw_reg_sfgc_type_set(payload, type);
+ mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
+ mlxsw_reg_sfgc_table_type_set(payload, table_type);
+ mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
+ mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
+}
+
+/* SFTR - Switch Flooding Table Register
+ * -------------------------------------
+ * The switch flooding table is used for flooding packet replication. The table
+ * defines a bit mask of ports for packet replication.
+ */
+#define MLXSW_REG_SFTR_ID 0x2012
+#define MLXSW_REG_SFTR_LEN 0x420
+
+static const struct mlxsw_reg_info mlxsw_reg_sftr = {
+ .id = MLXSW_REG_SFTR_ID,
+ .len = MLXSW_REG_SFTR_LEN,
+};
+
+/* reg_sftr_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8);
+
+/* reg_sftr_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6);
+
+/* reg_sftr_index
+ * Index. Used as an index into the Flooding Table in case the table is
+ * configured to use VID / FID or FID Offset.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16);
+
+/* reg_sftr_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3);
+
+/* reg_sftr_range
+ * Range of entries to update
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16);
+
+/* reg_sftr_port
+ * Local port membership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1);
+
+/* reg_sftr_cpu_port_mask
+ * CPU port mask (1 bit per port).
+ * Access: W
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1);
+
+static inline void mlxsw_reg_sftr_pack(char *payload,
+ unsigned int flood_table,
+ unsigned int index,
+ enum mlxsw_flood_table_type table_type,
+ unsigned int range, u8 port, bool set)
+{
+ MLXSW_REG_ZERO(sftr, payload);
+ mlxsw_reg_sftr_swid_set(payload, 0);
+ mlxsw_reg_sftr_flood_table_set(payload, flood_table);
+ mlxsw_reg_sftr_index_set(payload, index);
+ mlxsw_reg_sftr_table_type_set(payload, table_type);
+ mlxsw_reg_sftr_range_set(payload, range);
+ mlxsw_reg_sftr_port_set(payload, port, set);
+ mlxsw_reg_sftr_port_mask_set(payload, port, 1);
+}
+
+/* SPMLR - Switch Port MAC Learning Register
+ * -----------------------------------------
+ * Controls the Switch MAC learning policy per port.
+ */
+#define MLXSW_REG_SPMLR_ID 0x2018
+#define MLXSW_REG_SPMLR_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_spmlr = {
+ .id = MLXSW_REG_SPMLR_ID,
+ .len = MLXSW_REG_SPMLR_LEN,
+};
+
+/* reg_spmlr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8);
+
+/* reg_spmlr_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, sub_port, 0x00, 8, 8);
+
+enum mlxsw_reg_spmlr_learn_mode {
+ MLXSW_REG_SPMLR_LEARN_MODE_DISABLE = 0,
+ MLXSW_REG_SPMLR_LEARN_MODE_ENABLE = 2,
+ MLXSW_REG_SPMLR_LEARN_MODE_SEC = 3,
+};
+
+/* reg_spmlr_learn_mode
+ * Learning mode on the port.
+ * 0 - Learning disabled.
+ * 2 - Learning enabled.
+ * 3 - Security mode.
+ *
+ * In security mode the switch does not learn MACs on the port, but uses the
+ * SMAC to see if it exists on another ingress port. If so, the packet is
+ * classified as a bad packet and is discarded unless the software registers
+ * to receive port security error packets usign HPKT.
+ */
+MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2);
+
+static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_spmlr_learn_mode mode)
+{
+ MLXSW_REG_ZERO(spmlr, payload);
+ mlxsw_reg_spmlr_local_port_set(payload, local_port);
+ mlxsw_reg_spmlr_sub_port_set(payload, 0);
+ mlxsw_reg_spmlr_learn_mode_set(payload, mode);
+}
+
+/* SVFA - Switch VID to FID Allocation Register
+ * --------------------------------------------
+ * Controls the VID to FID mapping and {Port, VID} to FID mapping for
+ * virtualized ports.
+ */
+#define MLXSW_REG_SVFA_ID 0x201C
+#define MLXSW_REG_SVFA_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_svfa = {
+ .id = MLXSW_REG_SVFA_ID,
+ .len = MLXSW_REG_SVFA_LEN,
+};
+
+/* reg_svfa_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, svfa, swid, 0x00, 24, 8);
+
+/* reg_svfa_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32(reg, svfa, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_svfa_mt {
+ MLXSW_REG_SVFA_MT_VID_TO_FID,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+};
+
+/* reg_svfa_mapping_table
+ * Mapping table:
+ * 0 - VID to FID
+ * 1 - {Port, VID} to FID
+ * Access: Index
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, mapping_table, 0x00, 8, 3);
+
+/* reg_svfa_v
+ * Valid.
+ * Valid if set.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, v, 0x00, 0, 1);
+
+/* reg_svfa_fid
+ * Filtering ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, svfa, fid, 0x04, 16, 16);
+
+/* reg_svfa_vid
+ * VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, svfa, vid, 0x04, 0, 12);
+
+/* reg_svfa_counter_set_type
+ * Counter set type for flow counters.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8);
+
+/* reg_svfa_counter_index
+ * Counter index for flow counters.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24);
+
+static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_svfa_mt mt, bool valid,
+ u16 fid, u16 vid)
+{
+ MLXSW_REG_ZERO(svfa, payload);
+ local_port = mt == MLXSW_REG_SVFA_MT_VID_TO_FID ? 0 : local_port;
+ mlxsw_reg_svfa_swid_set(payload, 0);
+ mlxsw_reg_svfa_local_port_set(payload, local_port);
+ mlxsw_reg_svfa_mapping_table_set(payload, mt);
+ mlxsw_reg_svfa_v_set(payload, valid);
+ mlxsw_reg_svfa_fid_set(payload, fid);
+ mlxsw_reg_svfa_vid_set(payload, vid);
+}
+
+/* SVPE - Switch Virtual-Port Enabling Register
+ * --------------------------------------------
+ * Enables port virtualization.
+ */
+#define MLXSW_REG_SVPE_ID 0x201E
+#define MLXSW_REG_SVPE_LEN 0x4
+
+static const struct mlxsw_reg_info mlxsw_reg_svpe = {
+ .id = MLXSW_REG_SVPE_ID,
+ .len = MLXSW_REG_SVPE_LEN,
+};
+
+/* reg_svpe_local_port
+ * Local port number
+ * Access: Index
+ *
+ * Note: CPU port is not supported (uses VLAN mode only).
+ */
+MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8);
+
+/* reg_svpe_vp_en
+ * Virtual port enable.
+ * 0 - Disable, VLAN mode (VID to FID).
+ * 1 - Enable, Virtual port mode ({Port, VID} to FID).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, svpe, vp_en, 0x00, 8, 1);
+
+static inline void mlxsw_reg_svpe_pack(char *payload, u8 local_port,
+ bool enable)
+{
+ MLXSW_REG_ZERO(svpe, payload);
+ mlxsw_reg_svpe_local_port_set(payload, local_port);
+ mlxsw_reg_svpe_vp_en_set(payload, enable);
+}
+
+/* SFMR - Switch FID Management Register
+ * -------------------------------------
+ * Creates and configures FIDs.
+ */
+#define MLXSW_REG_SFMR_ID 0x201F
+#define MLXSW_REG_SFMR_LEN 0x18
+
+static const struct mlxsw_reg_info mlxsw_reg_sfmr = {
+ .id = MLXSW_REG_SFMR_ID,
+ .len = MLXSW_REG_SFMR_LEN,
+};
+
+enum mlxsw_reg_sfmr_op {
+ MLXSW_REG_SFMR_OP_CREATE_FID,
+ MLXSW_REG_SFMR_OP_DESTROY_FID,
+};
+
+/* reg_sfmr_op
+ * Operation.
+ * 0 - Create or edit FID.
+ * 1 - Destroy FID.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, sfmr, op, 0x00, 24, 4);
+
+/* reg_sfmr_fid
+ * Filtering ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfmr, fid, 0x00, 0, 16);
+
+/* reg_sfmr_fid_offset
+ * FID offset.
+ * Used to point into the flooding table selected by SFGC register if
+ * the table is of type FID-Offset. Otherwise, this field is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfmr, fid_offset, 0x08, 0, 16);
+
+/* reg_sfmr_vtfp
+ * Valid Tunnel Flood Pointer.
+ * If not set, then nve_tunnel_flood_ptr is reserved and considered NULL.
+ * Access: RW
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32(reg, sfmr, vtfp, 0x0C, 31, 1);
+
+/* reg_sfmr_nve_tunnel_flood_ptr
+ * Underlay Flooding and BC Pointer.
+ * Used as a pointer to the first entry of the group based link lists of
+ * flooding or BC entries (for NVE tunnels).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfmr, nve_tunnel_flood_ptr, 0x0C, 0, 24);
+
+/* reg_sfmr_vv
+ * VNI Valid.
+ * If not set, then vni is reserved.
+ * Access: RW
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32(reg, sfmr, vv, 0x10, 31, 1);
+
+/* reg_sfmr_vni
+ * Virtual Network Identifier.
+ * Access: RW
+ *
+ * Note: A given VNI can only be assigned to one FID.
+ */
+MLXSW_ITEM32(reg, sfmr, vni, 0x10, 0, 24);
+
+static inline void mlxsw_reg_sfmr_pack(char *payload,
+ enum mlxsw_reg_sfmr_op op, u16 fid,
+ u16 fid_offset)
+{
+ MLXSW_REG_ZERO(sfmr, payload);
+ mlxsw_reg_sfmr_op_set(payload, op);
+ mlxsw_reg_sfmr_fid_set(payload, fid);
+ mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset);
+ mlxsw_reg_sfmr_vtfp_set(payload, false);
+ mlxsw_reg_sfmr_vv_set(payload, false);
+}
+
+/* SPVMLR - Switch Port VLAN MAC Learning Register
+ * -----------------------------------------------
+ * Controls the switch MAC learning policy per {Port, VID}.
+ */
+#define MLXSW_REG_SPVMLR_ID 0x2020
+#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
+#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
+ MLXSW_REG_SPVMLR_REC_LEN * \
+ MLXSW_REG_SPVMLR_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_spvmlr = {
+ .id = MLXSW_REG_SPVMLR_ID,
+ .len = MLXSW_REG_SPVMLR_LEN,
+};
+
+/* reg_spvmlr_local_port
+ * Local ingress port.
+ * Access: Index
+ *
+ * Note: CPU port is not supported.
+ */
+MLXSW_ITEM32(reg, spvmlr, local_port, 0x00, 16, 8);
+
+/* reg_spvmlr_num_rec
+ * Number of records to update.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvmlr, num_rec, 0x00, 0, 8);
+
+/* reg_spvmlr_rec_learn_enable
+ * 0 - Disable learning for {Port, VID}.
+ * 1 - Enable learning for {Port, VID}.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_learn_enable, MLXSW_REG_SPVMLR_BASE_LEN,
+ 31, 1, MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
+
+/* reg_spvmlr_rec_vid
+ * VLAN ID to be added/removed from port or for querying.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_vid, MLXSW_REG_SPVMLR_BASE_LEN, 0, 12,
+ MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
+ u16 vid_begin, u16 vid_end,
+ bool learn_enable)
+{
+ int num_rec = vid_end - vid_begin + 1;
+ int i;
+
+ WARN_ON(num_rec < 1 || num_rec > MLXSW_REG_SPVMLR_REC_MAX_COUNT);
+
+ MLXSW_REG_ZERO(spvmlr, payload);
+ mlxsw_reg_spvmlr_local_port_set(payload, local_port);
+ mlxsw_reg_spvmlr_num_rec_set(payload, num_rec);
+
+ for (i = 0; i < num_rec; i++) {
+ mlxsw_reg_spvmlr_rec_learn_enable_set(payload, i, learn_enable);
+ mlxsw_reg_spvmlr_rec_vid_set(payload, i, vid_begin + i);
+ }
+}
+
+/* PMLP - Ports Module to Local Port Register
+ * ------------------------------------------
+ * Configures the assignment of modules to local ports.
+ */
+#define MLXSW_REG_PMLP_ID 0x5002
+#define MLXSW_REG_PMLP_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_pmlp = {
+ .id = MLXSW_REG_PMLP_ID,
+ .len = MLXSW_REG_PMLP_LEN,
+};
+
+/* reg_pmlp_rxtx
+ * 0 - Tx value is used for both Tx and Rx.
+ * 1 - Rx value is taken from a separte field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1);
+
+/* reg_pmlp_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
+
+/* reg_pmlp_width
+ * 0 - Unmap local port.
+ * 1 - Lane 0 is used.
+ * 2 - Lanes 0 and 1 are used.
+ * 4 - Lanes 0, 1, 2 and 3 are used.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
+
+/* reg_pmlp_module
+ * Module number.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false);
+
+/* reg_pmlp_tx_lane
+ * Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false);
+
+/* reg_pmlp_rx_lane
+ * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
+ * equal to Tx lane.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false);
+
+static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(pmlp, payload);
+ mlxsw_reg_pmlp_local_port_set(payload, local_port);
+}
+
+/* PMTU - Port MTU Register
+ * ------------------------
+ * Configures and reports the port MTU.
+ */
+#define MLXSW_REG_PMTU_ID 0x5003
+#define MLXSW_REG_PMTU_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_pmtu = {
+ .id = MLXSW_REG_PMTU_ID,
+ .len = MLXSW_REG_PMTU_LEN,
+};
+
+/* reg_pmtu_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8);
+
+/* reg_pmtu_max_mtu
+ * Maximum MTU.
+ * When port type (e.g. Ethernet) is configured, the relevant MTU is
+ * reported, otherwise the minimum between the max_mtu of the different
+ * types is reported.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, max_mtu, 0x04, 16, 16);
+
+/* reg_pmtu_admin_mtu
+ * MTU value to set port to. Must be smaller or equal to max_mtu.
+ * Note: If port type is Infiniband, then port must be disabled, when its
+ * MTU is set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16);
+
+/* reg_pmtu_oper_mtu
+ * The actual MTU configured on the port. Packets exceeding this size
+ * will be dropped.
+ * Note: In Ethernet and FC oper_mtu == admin_mtu, however, in Infiniband
+ * oper_mtu might be smaller than admin_mtu.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16);
+
+static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port,
+ u16 new_mtu)
+{
+ MLXSW_REG_ZERO(pmtu, payload);
+ mlxsw_reg_pmtu_local_port_set(payload, local_port);
+ mlxsw_reg_pmtu_max_mtu_set(payload, 0);
+ mlxsw_reg_pmtu_admin_mtu_set(payload, new_mtu);
+ mlxsw_reg_pmtu_oper_mtu_set(payload, 0);
+}
+
+/* PTYS - Port Type and Speed Register
+ * -----------------------------------
+ * Configures and reports the port speed type.
+ *
+ * Note: When set while the link is up, the changes will not take effect
+ * until the port transitions from down to up state.
+ */
+#define MLXSW_REG_PTYS_ID 0x5004
+#define MLXSW_REG_PTYS_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_ptys = {
+ .id = MLXSW_REG_PTYS_ID,
+ .len = MLXSW_REG_PTYS_LEN,
+};
+
+/* reg_ptys_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
+
+#define MLXSW_REG_PTYS_PROTO_MASK_ETH BIT(2)
+
+/* reg_ptys_proto_mask
+ * Protocol mask. Indicates which protocol is used.
+ * 0 - Infiniband.
+ * 1 - Fibre Channel.
+ * 2 - Ethernet.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
+
+#define MLXSW_REG_PTYS_ETH_SPEED_SGMII BIT(0)
+#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX BIT(1)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 BIT(3)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR BIT(4)
+#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2 BIT(5)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 BIT(6)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 BIT(7)
+#define MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4 BIT(8)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR BIT(12)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR BIT(13)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR BIT(14)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 BIT(15)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4 BIT(16)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 BIT(19)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 BIT(20)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 BIT(22)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4 BIT(23)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX BIT(24)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T BIT(25)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T BIT(26)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR BIT(27)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR BIT(28)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR BIT(29)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 BIT(30)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2 BIT(31)
+
+/* reg_ptys_eth_proto_cap
+ * Ethernet port supported speeds and protocols.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32);
+
+/* reg_ptys_eth_proto_admin
+ * Speed and protocol to set port to.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
+
+/* reg_ptys_eth_proto_oper
+ * The current speed and protocol configured for the port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
+
+static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
+ u32 proto_admin)
+{
+ MLXSW_REG_ZERO(ptys, payload);
+ mlxsw_reg_ptys_local_port_set(payload, local_port);
+ mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH);
+ mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin);
+}
+
+static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap,
+ u32 *p_eth_proto_adm,
+ u32 *p_eth_proto_oper)
+{
+ if (p_eth_proto_cap)
+ *p_eth_proto_cap = mlxsw_reg_ptys_eth_proto_cap_get(payload);
+ if (p_eth_proto_adm)
+ *p_eth_proto_adm = mlxsw_reg_ptys_eth_proto_admin_get(payload);
+ if (p_eth_proto_oper)
+ *p_eth_proto_oper = mlxsw_reg_ptys_eth_proto_oper_get(payload);
+}
+
+/* PPAD - Port Physical Address Register
+ * -------------------------------------
+ * The PPAD register configures the per port physical MAC address.
+ */
+#define MLXSW_REG_PPAD_ID 0x5005
+#define MLXSW_REG_PPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_ppad = {
+ .id = MLXSW_REG_PPAD_ID,
+ .len = MLXSW_REG_PPAD_LEN,
+};
+
+/* reg_ppad_single_base_mac
+ * 0: base_mac, local port should be 0 and mac[7:0] is
+ * reserved. HW will set incremental
+ * 1: single_mac - mac of the local_port
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1);
+
+/* reg_ppad_local_port
+ * port number, if single_base_mac = 0 then local_port is reserved
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8);
+
+/* reg_ppad_mac
+ * If single_base_mac = 0 - base MAC address, mac[7:0] is reserved.
+ * If single_base_mac = 1 - the per port MAC address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6);
+
+static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac,
+ u8 local_port)
+{
+ MLXSW_REG_ZERO(ppad, payload);
+ mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac);
+ mlxsw_reg_ppad_local_port_set(payload, local_port);
+}
+
+/* PAOS - Ports Administrative and Operational Status Register
+ * -----------------------------------------------------------
+ * Configures and retrieves per port administrative and operational status.
+ */
+#define MLXSW_REG_PAOS_ID 0x5006
+#define MLXSW_REG_PAOS_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_paos = {
+ .id = MLXSW_REG_PAOS_ID,
+ .len = MLXSW_REG_PAOS_LEN,
+};
+
+/* reg_paos_swid
+ * Switch partition ID with which to associate the port.
+ * Note: while external ports uses unique local port numbers (and thus swid is
+ * redundant), router ports use the same local port number where swid is the
+ * only indication for the relevant port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8);
+
+/* reg_paos_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8);
+
+/* reg_paos_admin_status
+ * Port administrative state (the desired state of the port):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ * into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, admin_status, 0x00, 8, 4);
+
+/* reg_paos_oper_status
+ * Port operational state (the current state):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ * port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, paos, oper_status, 0x00, 0, 4);
+
+/* reg_paos_ase
+ * Admin state update enabled.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ase, 0x04, 31, 1);
+
+/* reg_paos_ee
+ * Event update enable. If this bit is set, event generation will be
+ * updated based on the e field.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1);
+
+/* reg_paos_e
+ * Event generation on operational state change:
+ * 0 - Do not generate event.
+ * 1 - Generate Event.
+ * 2 - Generate Single Event.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
+ enum mlxsw_port_admin_status status)
+{
+ MLXSW_REG_ZERO(paos, payload);
+ mlxsw_reg_paos_swid_set(payload, 0);
+ mlxsw_reg_paos_local_port_set(payload, local_port);
+ mlxsw_reg_paos_admin_status_set(payload, status);
+ mlxsw_reg_paos_oper_status_set(payload, 0);
+ mlxsw_reg_paos_ase_set(payload, 1);
+ mlxsw_reg_paos_ee_set(payload, 1);
+ mlxsw_reg_paos_e_set(payload, 1);
+}
+
+/* PPCNT - Ports Performance Counters Register
+ * -------------------------------------------
+ * The PPCNT register retrieves per port performance counters.
+ */
+#define MLXSW_REG_PPCNT_ID 0x5008
+#define MLXSW_REG_PPCNT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_ppcnt = {
+ .id = MLXSW_REG_PPCNT_ID,
+ .len = MLXSW_REG_PPCNT_LEN,
+};
+
+/* reg_ppcnt_swid
+ * For HCA: must be always 0.
+ * Switch partition ID to associate port with.
+ * Switch partitions are numbered from 0 to 7 inclusively.
+ * Switch partition 254 indicates stacking ports.
+ * Switch partition 255 indicates all switch partitions.
+ * Only valid on Set() operation with local_port=255.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8);
+
+/* reg_ppcnt_local_port
+ * Local port number.
+ * 255 indicates all ports on the device, and is only allowed
+ * for Set() operation.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
+
+/* reg_ppcnt_pnat
+ * Port number access type:
+ * 0 - Local port number
+ * 1 - IB port number
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
+
+/* reg_ppcnt_grp
+ * Performance counter group.
+ * Group 63 indicates all groups. Only valid on Set() operation with
+ * clr bit set.
+ * 0x0: IEEE 802.3 Counters
+ * 0x1: RFC 2863 Counters
+ * 0x2: RFC 2819 Counters
+ * 0x3: RFC 3635 Counters
+ * 0x5: Ethernet Extended Counters
+ * 0x8: Link Level Retransmission Counters
+ * 0x10: Per Priority Counters
+ * 0x11: Per Traffic Class Counters
+ * 0x12: Physical Layer Counters
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6);
+
+/* reg_ppcnt_clr
+ * Clear counters. Setting the clr bit will reset the counter value
+ * for all counters in the counter group. This bit can be set
+ * for both Set() and Get() operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
+
+/* reg_ppcnt_prio_tc
+ * Priority for counter set that support per priority, valid values: 0-7.
+ * Traffic class for counter set that support per traffic class,
+ * valid values: 0- cap_max_tclass-1 .
+ * For HCA: cap_max_tclass is always 8.
+ * Otherwise must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
+
+/* reg_ppcnt_a_frames_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_transmitted_ok,
+ 0x08 + 0x00, 0, 64);
+
+/* reg_ppcnt_a_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_received_ok,
+ 0x08 + 0x08, 0, 64);
+
+/* reg_ppcnt_a_frame_check_sequence_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_check_sequence_errors,
+ 0x08 + 0x10, 0, 64);
+
+/* reg_ppcnt_a_alignment_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_alignment_errors,
+ 0x08 + 0x18, 0, 64);
+
+/* reg_ppcnt_a_octets_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_transmitted_ok,
+ 0x08 + 0x20, 0, 64);
+
+/* reg_ppcnt_a_octets_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_received_ok,
+ 0x08 + 0x28, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_xmitted_ok,
+ 0x08 + 0x30, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_xmitted_ok,
+ 0x08 + 0x38, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_received_ok,
+ 0x08 + 0x40, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_received_ok,
+ 0x08 + 0x48, 0, 64);
+
+/* reg_ppcnt_a_in_range_length_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_in_range_length_errors,
+ 0x08 + 0x50, 0, 64);
+
+/* reg_ppcnt_a_out_of_range_length_field
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_out_of_range_length_field,
+ 0x08 + 0x58, 0, 64);
+
+/* reg_ppcnt_a_frame_too_long_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_too_long_errors,
+ 0x08 + 0x60, 0, 64);
+
+/* reg_ppcnt_a_symbol_error_during_carrier
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_symbol_error_during_carrier,
+ 0x08 + 0x68, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_transmitted,
+ 0x08 + 0x70, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_received,
+ 0x08 + 0x78, 0, 64);
+
+/* reg_ppcnt_a_unsupported_opcodes_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_unsupported_opcodes_received,
+ 0x08 + 0x80, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
+ 0x08 + 0x88, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
+ 0x08 + 0x90, 0, 64);
+
+static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(ppcnt, payload);
+ mlxsw_reg_ppcnt_swid_set(payload, 0);
+ mlxsw_reg_ppcnt_local_port_set(payload, local_port);
+ mlxsw_reg_ppcnt_pnat_set(payload, 0);
+ mlxsw_reg_ppcnt_grp_set(payload, 0);
+ mlxsw_reg_ppcnt_clr_set(payload, 0);
+ mlxsw_reg_ppcnt_prio_tc_set(payload, 0);
+}
+
+/* PBMC - Port Buffer Management Control Register
+ * ----------------------------------------------
+ * The PBMC register configures and retrieves the port packet buffer
+ * allocation for different Prios, and the Pause threshold management.
+ */
+#define MLXSW_REG_PBMC_ID 0x500C
+#define MLXSW_REG_PBMC_LEN 0x68
+
+static const struct mlxsw_reg_info mlxsw_reg_pbmc = {
+ .id = MLXSW_REG_PBMC_ID,
+ .len = MLXSW_REG_PBMC_LEN,
+};
+
+/* reg_pbmc_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pbmc, local_port, 0x00, 16, 8);
+
+/* reg_pbmc_xoff_timer_value
+ * When device generates a pause frame, it uses this value as the pause
+ * timer (time for the peer port to pause in quota-512 bit time).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pbmc, xoff_timer_value, 0x04, 16, 16);
+
+/* reg_pbmc_xoff_refresh
+ * The time before a new pause frame should be sent to refresh the pause RW
+ * state. Using the same units as xoff_timer_value above (in quota-512 bit
+ * time).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16);
+
+/* reg_pbmc_buf_lossy
+ * The field indicates if the buffer is lossy.
+ * 0 - Lossless
+ * 1 - Lossy
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_lossy, 0x0C, 25, 1, 0x08, 0x00, false);
+
+/* reg_pbmc_buf_epsb
+ * Eligible for Port Shared buffer.
+ * If epsb is set, packets assigned to buffer are allowed to insert the port
+ * shared buffer.
+ * When buf_lossy is MLXSW_REG_PBMC_LOSSY_LOSSY this field is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_epsb, 0x0C, 24, 1, 0x08, 0x00, false);
+
+/* reg_pbmc_buf_size
+ * The part of the packet buffer array is allocated for the specific buffer.
+ * Units are represented in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false);
+
+static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port,
+ u16 xoff_timer_value, u16 xoff_refresh)
+{
+ MLXSW_REG_ZERO(pbmc, payload);
+ mlxsw_reg_pbmc_local_port_set(payload, local_port);
+ mlxsw_reg_pbmc_xoff_timer_value_set(payload, xoff_timer_value);
+ mlxsw_reg_pbmc_xoff_refresh_set(payload, xoff_refresh);
+}
+
+static inline void mlxsw_reg_pbmc_lossy_buffer_pack(char *payload,
+ int buf_index,
+ u16 size)
+{
+ mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 1);
+ mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0);
+ mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
+}
+
+/* PSPA - Port Switch Partition Allocation
+ * ---------------------------------------
+ * Controls the association of a port with a switch partition and enables
+ * configuring ports as stacking ports.
+ */
+#define MLXSW_REG_PSPA_ID 0x500D
+#define MLXSW_REG_PSPA_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_pspa = {
+ .id = MLXSW_REG_PSPA_ID,
+ .len = MLXSW_REG_PSPA_LEN,
+};
+
+/* reg_pspa_swid
+ * Switch partition ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8);
+
+/* reg_pspa_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8);
+
+/* reg_pspa_sub_port
+ * Virtual port within the local port. Set to 0 when virtual ports are
+ * disabled on the local port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8);
+
+static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
+{
+ MLXSW_REG_ZERO(pspa, payload);
+ mlxsw_reg_pspa_swid_set(payload, swid);
+ mlxsw_reg_pspa_local_port_set(payload, local_port);
+ mlxsw_reg_pspa_sub_port_set(payload, 0);
+}
+
+/* HTGT - Host Trap Group Table
+ * ----------------------------
+ * Configures the properties for forwarding to CPU.
+ */
+#define MLXSW_REG_HTGT_ID 0x7002
+#define MLXSW_REG_HTGT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_htgt = {
+ .id = MLXSW_REG_HTGT_ID,
+ .len = MLXSW_REG_HTGT_LEN,
+};
+
+/* reg_htgt_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8);
+
+#define MLXSW_REG_HTGT_PATH_TYPE_LOCAL 0x0 /* For locally attached CPU */
+
+/* reg_htgt_type
+ * CPU path type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
+
+enum mlxsw_reg_htgt_trap_group {
+ MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_REG_HTGT_TRAP_GROUP_RX,
+ MLXSW_REG_HTGT_TRAP_GROUP_CTRL,
+};
+
+/* reg_htgt_trap_group
+ * Trap group number. User defined number specifying which trap groups
+ * should be forwarded to the CPU. The mapping between trap IDs and trap
+ * groups is configured using HPKT register.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, trap_group, 0x00, 0, 8);
+
+enum {
+ MLXSW_REG_HTGT_POLICER_DISABLE,
+ MLXSW_REG_HTGT_POLICER_ENABLE,
+};
+
+/* reg_htgt_pide
+ * Enable policer ID specified using 'pid' field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pide, 0x04, 15, 1);
+
+/* reg_htgt_pid
+ * Policer ID for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pid, 0x04, 0, 8);
+
+#define MLXSW_REG_HTGT_TRAP_TO_CPU 0x0
+
+/* reg_htgt_mirror_action
+ * Mirror action to use.
+ * 0 - Trap to CPU.
+ * 1 - Trap to CPU and mirror to a mirroring agent.
+ * 2 - Mirror to a mirroring agent and do not trap to CPU.
+ * Access: RW
+ *
+ * Note: Mirroring to a mirroring agent is only supported in Spectrum.
+ */
+MLXSW_ITEM32(reg, htgt, mirror_action, 0x08, 8, 2);
+
+/* reg_htgt_mirroring_agent
+ * Mirroring agent.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3);
+
+/* reg_htgt_priority
+ * Trap group priority.
+ * In case a packet matches multiple classification rules, the packet will
+ * only be trapped once, based on the trap ID associated with the group (via
+ * register HPKT) with the highest priority.
+ * Supported values are 0-7, with 7 represnting the highest priority.
+ * Access: RW
+ *
+ * Note: In SwitchX-2 this field is ignored and the priority value is replaced
+ * by the 'trap_group' field.
+ */
+MLXSW_ITEM32(reg, htgt, priority, 0x0C, 0, 4);
+
+/* reg_htgt_local_path_cpu_tclass
+ * CPU ingress traffic class for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
+
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD 0x15
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX 0x14
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL 0x13
+
+/* reg_htgt_local_path_rdq
+ * Receive descriptor queue (RDQ) to use for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
+
+static inline void mlxsw_reg_htgt_pack(char *payload,
+ enum mlxsw_reg_htgt_trap_group group)
+{
+ u8 swid, rdq;
+
+ MLXSW_REG_ZERO(htgt, payload);
+ switch (group) {
+ case MLXSW_REG_HTGT_TRAP_GROUP_EMAD:
+ swid = MLXSW_PORT_SWID_ALL_SWIDS;
+ rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD;
+ break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_RX:
+ swid = 0;
+ rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX;
+ break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_CTRL:
+ swid = 0;
+ rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL;
+ break;
+ }
+ mlxsw_reg_htgt_swid_set(payload, swid);
+ mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
+ mlxsw_reg_htgt_trap_group_set(payload, group);
+ mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE);
+ mlxsw_reg_htgt_pid_set(payload, 0);
+ mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
+ mlxsw_reg_htgt_mirroring_agent_set(payload, 0);
+ mlxsw_reg_htgt_priority_set(payload, 0);
+ mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, 7);
+ mlxsw_reg_htgt_local_path_rdq_set(payload, rdq);
+}
+
+/* HPKT - Host Packet Trap
+ * -----------------------
+ * Configures trap IDs inside trap groups.
+ */
+#define MLXSW_REG_HPKT_ID 0x7003
+#define MLXSW_REG_HPKT_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_hpkt = {
+ .id = MLXSW_REG_HPKT_ID,
+ .len = MLXSW_REG_HPKT_LEN,
+};
+
+enum {
+ MLXSW_REG_HPKT_ACK_NOT_REQUIRED,
+ MLXSW_REG_HPKT_ACK_REQUIRED,
+};
+
+/* reg_hpkt_ack
+ * Require acknowledgements from the host for events.
+ * If set, then the device will wait for the event it sent to be acknowledged
+ * by the host. This option is only relevant for event trap IDs.
+ * Access: RW
+ *
+ * Note: Currently not supported by firmware.
+ */
+MLXSW_ITEM32(reg, hpkt, ack, 0x00, 24, 1);
+
+enum mlxsw_reg_hpkt_action {
+ MLXSW_REG_HPKT_ACTION_FORWARD,
+ MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ MLXSW_REG_HPKT_ACTION_MIRROR_TO_CPU,
+ MLXSW_REG_HPKT_ACTION_DISCARD,
+ MLXSW_REG_HPKT_ACTION_SOFT_DISCARD,
+ MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD,
+};
+
+/* reg_hpkt_action
+ * Action to perform on packet when trapped.
+ * 0 - No action. Forward to CPU based on switching rules.
+ * 1 - Trap to CPU (CPU receives sole copy).
+ * 2 - Mirror to CPU (CPU receives a replica of the packet).
+ * 3 - Discard.
+ * 4 - Soft discard (allow other traps to act on the packet).
+ * 5 - Trap and soft discard (allow other traps to overwrite this trap).
+ * Access: RW
+ *
+ * Note: Must be set to 0 (forward) for event trap IDs, as they are already
+ * addressed to the CPU.
+ */
+MLXSW_ITEM32(reg, hpkt, action, 0x00, 20, 3);
+
+/* reg_hpkt_trap_group
+ * Trap group to associate the trap with.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, trap_group, 0x00, 12, 6);
+
+/* reg_hpkt_trap_id
+ * Trap ID.
+ * Access: Index
+ *
+ * Note: A trap ID can only be associated with a single trap group. The device
+ * will associate the trap ID with the last trap group configured.
+ */
+MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 9);
+
+enum {
+ MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT,
+ MLXSW_REG_HPKT_CTRL_PACKET_NO_BUFFER,
+ MLXSW_REG_HPKT_CTRL_PACKET_USE_BUFFER,
+};
+
+/* reg_hpkt_ctrl
+ * Configure dedicated buffer resources for control packets.
+ * 0 - Keep factory defaults.
+ * 1 - Do not use control buffer for this trap ID.
+ * 2 - Use control buffer for this trap ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
+
+static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id)
+{
+ enum mlxsw_reg_htgt_trap_group trap_group;
+
+ MLXSW_REG_ZERO(hpkt, payload);
+ mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
+ mlxsw_reg_hpkt_action_set(payload, action);
+ switch (trap_id) {
+ case MLXSW_TRAP_ID_ETHEMAD:
+ case MLXSW_TRAP_ID_PUDE:
+ trap_group = MLXSW_REG_HTGT_TRAP_GROUP_EMAD;
+ break;
+ default:
+ trap_group = MLXSW_REG_HTGT_TRAP_GROUP_RX;
+ break;
+ }
+ mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
+ mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
+ mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
+}
+
+/* SBPR - Shared Buffer Pools Register
+ * -----------------------------------
+ * The SBPR configures and retrieves the shared buffer pools and configuration.
+ */
+#define MLXSW_REG_SBPR_ID 0xB001
+#define MLXSW_REG_SBPR_LEN 0x14
+
+static const struct mlxsw_reg_info mlxsw_reg_sbpr = {
+ .id = MLXSW_REG_SBPR_ID,
+ .len = MLXSW_REG_SBPR_LEN,
+};
+
+enum mlxsw_reg_sbpr_dir {
+ MLXSW_REG_SBPR_DIR_INGRESS,
+ MLXSW_REG_SBPR_DIR_EGRESS,
+};
+
+/* reg_sbpr_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpr, dir, 0x00, 24, 2);
+
+/* reg_sbpr_pool
+ * Pool index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpr, pool, 0x00, 0, 4);
+
+/* reg_sbpr_size
+ * Pool size in buffer cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpr, size, 0x04, 0, 24);
+
+enum mlxsw_reg_sbpr_mode {
+ MLXSW_REG_SBPR_MODE_STATIC,
+ MLXSW_REG_SBPR_MODE_DYNAMIC,
+};
+
+/* reg_sbpr_mode
+ * Pool quota calculation mode.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4);
+
+static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool,
+ enum mlxsw_reg_sbpr_dir dir,
+ enum mlxsw_reg_sbpr_mode mode, u32 size)
+{
+ MLXSW_REG_ZERO(sbpr, payload);
+ mlxsw_reg_sbpr_pool_set(payload, pool);
+ mlxsw_reg_sbpr_dir_set(payload, dir);
+ mlxsw_reg_sbpr_mode_set(payload, mode);
+ mlxsw_reg_sbpr_size_set(payload, size);
+}
+
+/* SBCM - Shared Buffer Class Management Register
+ * ----------------------------------------------
+ * The SBCM register configures and retrieves the shared buffer allocation
+ * and configuration according to Port-PG, including the binding to pool
+ * and definition of the associated quota.
+ */
+#define MLXSW_REG_SBCM_ID 0xB002
+#define MLXSW_REG_SBCM_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_sbcm = {
+ .id = MLXSW_REG_SBCM_ID,
+ .len = MLXSW_REG_SBCM_LEN,
+};
+
+/* reg_sbcm_local_port
+ * Local port number.
+ * For Ingress: excludes CPU port and Router port
+ * For Egress: excludes IP Router
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8);
+
+/* reg_sbcm_pg_buff
+ * PG buffer - Port PG (dir=ingress) / traffic class (dir=egress)
+ * For PG buffer: range is 0..cap_max_pg_buffers - 1
+ * For traffic class: range is 0..cap_max_tclass - 1
+ * Note that when traffic class is in MC aware mode then the traffic
+ * classes which are MC aware cannot be configured.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6);
+
+enum mlxsw_reg_sbcm_dir {
+ MLXSW_REG_SBCM_DIR_INGRESS,
+ MLXSW_REG_SBCM_DIR_EGRESS,
+};
+
+/* reg_sbcm_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2);
+
+/* reg_sbcm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24);
+
+/* reg_sbcm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24);
+
+/* reg_sbcm_pool
+ * Association of the port-priority to a pool.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
+
+static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
+ enum mlxsw_reg_sbcm_dir dir,
+ u32 min_buff, u32 max_buff, u8 pool)
+{
+ MLXSW_REG_ZERO(sbcm, payload);
+ mlxsw_reg_sbcm_local_port_set(payload, local_port);
+ mlxsw_reg_sbcm_pg_buff_set(payload, pg_buff);
+ mlxsw_reg_sbcm_dir_set(payload, dir);
+ mlxsw_reg_sbcm_min_buff_set(payload, min_buff);
+ mlxsw_reg_sbcm_max_buff_set(payload, max_buff);
+ mlxsw_reg_sbcm_pool_set(payload, pool);
+}
+
+/* SBPM - Shared Buffer Class Management Register
+ * ----------------------------------------------
+ * The SBPM register configures and retrieves the shared buffer allocation
+ * and configuration according to Port-Pool, including the definition
+ * of the associated quota.
+ */
+#define MLXSW_REG_SBPM_ID 0xB003
+#define MLXSW_REG_SBPM_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_sbpm = {
+ .id = MLXSW_REG_SBPM_ID,
+ .len = MLXSW_REG_SBPM_LEN,
+};
+
+/* reg_sbpm_local_port
+ * Local port number.
+ * For Ingress: excludes CPU port and Router port
+ * For Egress: excludes IP Router
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8);
+
+/* reg_sbpm_pool
+ * The pool associated to quota counting on the local_port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4);
+
+enum mlxsw_reg_sbpm_dir {
+ MLXSW_REG_SBPM_DIR_INGRESS,
+ MLXSW_REG_SBPM_DIR_EGRESS,
+};
+
+/* reg_sbpm_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2);
+
+/* reg_sbpm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24);
+
+/* reg_sbpm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
+
+static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
+ enum mlxsw_reg_sbpm_dir dir,
+ u32 min_buff, u32 max_buff)
+{
+ MLXSW_REG_ZERO(sbpm, payload);
+ mlxsw_reg_sbpm_local_port_set(payload, local_port);
+ mlxsw_reg_sbpm_pool_set(payload, pool);
+ mlxsw_reg_sbpm_dir_set(payload, dir);
+ mlxsw_reg_sbpm_min_buff_set(payload, min_buff);
+ mlxsw_reg_sbpm_max_buff_set(payload, max_buff);
+}
+
+/* SBMM - Shared Buffer Multicast Management Register
+ * --------------------------------------------------
+ * The SBMM register configures and retrieves the shared buffer allocation
+ * and configuration for MC packets according to Switch-Priority, including
+ * the binding to pool and definition of the associated quota.
+ */
+#define MLXSW_REG_SBMM_ID 0xB004
+#define MLXSW_REG_SBMM_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_sbmm = {
+ .id = MLXSW_REG_SBMM_ID,
+ .len = MLXSW_REG_SBMM_LEN,
+};
+
+/* reg_sbmm_prio
+ * Switch Priority.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbmm, prio, 0x00, 8, 4);
+
+/* reg_sbmm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, min_buff, 0x18, 0, 24);
+
+/* reg_sbmm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, max_buff, 0x1C, 0, 24);
+
+/* reg_sbmm_pool
+ * Association of the port-priority to a pool.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, pool, 0x24, 0, 4);
+
+static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff,
+ u32 max_buff, u8 pool)
+{
+ MLXSW_REG_ZERO(sbmm, payload);
+ mlxsw_reg_sbmm_prio_set(payload, prio);
+ mlxsw_reg_sbmm_min_buff_set(payload, min_buff);
+ mlxsw_reg_sbmm_max_buff_set(payload, max_buff);
+ mlxsw_reg_sbmm_pool_set(payload, pool);
+}
+
+static inline const char *mlxsw_reg_id_str(u16 reg_id)
+{
+ switch (reg_id) {
+ case MLXSW_REG_SGCR_ID:
+ return "SGCR";
+ case MLXSW_REG_SPAD_ID:
+ return "SPAD";
+ case MLXSW_REG_SSPR_ID:
+ return "SSPR";
+ case MLXSW_REG_SFDAT_ID:
+ return "SFDAT";
+ case MLXSW_REG_SFD_ID:
+ return "SFD";
+ case MLXSW_REG_SFN_ID:
+ return "SFN";
+ case MLXSW_REG_SPMS_ID:
+ return "SPMS";
+ case MLXSW_REG_SPVID_ID:
+ return "SPVID";
+ case MLXSW_REG_SPVM_ID:
+ return "SPVM";
+ case MLXSW_REG_SFGC_ID:
+ return "SFGC";
+ case MLXSW_REG_SFTR_ID:
+ return "SFTR";
+ case MLXSW_REG_SPMLR_ID:
+ return "SPMLR";
+ case MLXSW_REG_SVFA_ID:
+ return "SVFA";
+ case MLXSW_REG_SVPE_ID:
+ return "SVPE";
+ case MLXSW_REG_SFMR_ID:
+ return "SFMR";
+ case MLXSW_REG_SPVMLR_ID:
+ return "SPVMLR";
+ case MLXSW_REG_PMLP_ID:
+ return "PMLP";
+ case MLXSW_REG_PMTU_ID:
+ return "PMTU";
+ case MLXSW_REG_PTYS_ID:
+ return "PTYS";
+ case MLXSW_REG_PPAD_ID:
+ return "PPAD";
+ case MLXSW_REG_PAOS_ID:
+ return "PAOS";
+ case MLXSW_REG_PPCNT_ID:
+ return "PPCNT";
+ case MLXSW_REG_PBMC_ID:
+ return "PBMC";
+ case MLXSW_REG_PSPA_ID:
+ return "PSPA";
+ case MLXSW_REG_HTGT_ID:
+ return "HTGT";
+ case MLXSW_REG_HPKT_ID:
+ return "HPKT";
+ case MLXSW_REG_SBPR_ID:
+ return "SBPR";
+ case MLXSW_REG_SBCM_ID:
+ return "SBCM";
+ case MLXSW_REG_SBPM_ID:
+ return "SBPM";
+ case MLXSW_REG_SBMM_ID:
+ return "SBMM";
+ default:
+ return "*UNKNOWN*";
+ }
+}
+
+/* PUDE - Port Up / Down Event
+ * ---------------------------
+ * Reports the operational state change of a port.
+ */
+#define MLXSW_REG_PUDE_LEN 0x10
+
+/* reg_pude_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8);
+
+/* reg_pude_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8);
+
+/* reg_pude_admin_status
+ * Port administrative state (the desired state).
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ * into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, admin_status, 0x00, 8, 4);
+
+/* reg_pude_oper_status
+ * Port operatioanl state.
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ * port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, oper_status, 0x00, 0, 4);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
new file mode 100644
index 000000000000..3be4a2355ead
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -0,0 +1,1949 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/bitops.h>
+#include <net/switchdev.h>
+#include <generated/utsrelease.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+#include "port.h"
+#include "trap.h"
+#include "txheader.h"
+
+static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
+static const char mlxsw_sp_driver_version[] = "1.0";
+
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_rx_is_router
+ * Packet is sent from the router. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
+
+/* tx_hdr_fid_valid
+ * Indicates if the 'fid' field is valid and should be used for
+ * forwarding lookup. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
+
+/* tx_hdr_swid
+ * Switch partition ID. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_control_tclass
+ * Indicates if the packet should use the control TClass and not one
+ * of the data TClasses.
+ */
+MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
+
+/* tx_hdr_etclass
+ * Egress TClass to be used on the egress device on the egress port.
+ */
+MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_fid
+ * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
+ * set, otherwise calculated based on the packet's VID using VID to FID mapping.
+ * Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
+static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+ mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_swid_set(txhdr, 0);
+ mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
+ mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+}
+
+static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
+{
+ char spad_pl[MLXSW_REG_SPAD_LEN];
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
+ return 0;
+}
+
+static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool is_up)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char paos_pl[MLXSW_REG_PAOS_LEN];
+
+ mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
+ is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
+ MLXSW_PORT_ADMIN_STATUS_DOWN);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
+}
+
+static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool *p_is_up)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char paos_pl[MLXSW_REG_PAOS_LEN];
+ u8 oper_status;
+ int err;
+
+ mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
+ if (err)
+ return err;
+ oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
+ *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
+ return 0;
+}
+
+static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ int err;
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
+ MLXSW_SP_VFID_BASE + vfid, 0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+
+ if (err)
+ return err;
+
+ set_bit(vfid, mlxsw_sp->active_vfids);
+ return 0;
+}
+
+static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ clear_bit(vfid, mlxsw_sp->active_vfids);
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
+ MLXSW_SP_VFID_BASE + vfid, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ unsigned char *addr)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ppad_pl[MLXSW_REG_PPAD_LEN];
+
+ mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
+ mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
+}
+
+static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
+
+ ether_addr_copy(addr, mlxsw_sp->base_mac);
+ addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
+ return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
+}
+
+static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid, enum mlxsw_reg_spms_state state)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *spms_pl;
+ int err;
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pmtu_pl[MLXSW_REG_PMTU_LEN];
+ int max_mtu;
+ int err;
+
+ mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
+ if (err)
+ return err;
+ max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+
+ if (mtu > max_mtu)
+ return -EINVAL;
+
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
+}
+
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pspa_pl[MLXSW_REG_PSPA_LEN];
+
+ mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
+}
+
+static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char svpe_pl[MLXSW_REG_SVPE_LEN];
+
+ mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
+}
+
+int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
+ fid, vid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid, bool learn_enable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *spvmlr_pl;
+ int err;
+
+ spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
+ if (!spvmlr_pl)
+ return -ENOMEM;
+ mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
+ learn_enable);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
+ kfree(spvmlr_pl);
+ return err;
+}
+
+static int
+mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char sspr_pl[MLXSW_REG_SSPR_LEN];
+
+ mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
+}
+
+static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool *p_usable)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int err;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ return err;
+ *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
+ return 0;
+}
+
+static int mlxsw_sp_port_open(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+ if (err)
+ return err;
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int mlxsw_sp_port_stop(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+}
+
+static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+ const struct mlxsw_tx_info tx_info = {
+ .local_port = mlxsw_sp_port->local_port,
+ .is_emad = false,
+ };
+ u64 len;
+ int err;
+
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
+ return NETDEV_TX_BUSY;
+
+ if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
+ struct sk_buff *skb_orig = skb;
+
+ skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb_orig);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ if (eth_skb_pad(skb)) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ return NETDEV_TX_OK;
+ }
+
+ mlxsw_sp_txhdr_construct(skb, &tx_info);
+ len = skb->len;
+ /* Due to a race we might fail here because of a full queue. In that
+ * unlikely case we simply drop the packet.
+ */
+ err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
+
+ if (!err) {
+ pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->tx_packets++;
+ pcpu_stats->tx_bytes += len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+ } else {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ }
+ return NETDEV_TX_OK;
+}
+
+static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
+ if (err)
+ return err;
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ return 0;
+}
+
+static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
+ if (err)
+ return err;
+ dev->mtu = mtu;
+ return 0;
+}
+
+static struct rtnl_link_stats64 *
+mlxsw_sp_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp_port_pcpu_stats *p;
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ u32 tx_dropped = 0;
+ unsigned int start;
+ int i;
+
+ for_each_possible_cpu(i) {
+ p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp);
+ rx_packets = p->rx_packets;
+ rx_bytes = p->rx_bytes;
+ tx_packets = p->tx_packets;
+ tx_bytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ /* tx_dropped is u32, updated without syncp protection. */
+ tx_dropped += p->tx_dropped;
+ }
+ stats->tx_dropped = tx_dropped;
+ return stats;
+}
+
+int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+ u16 vid_end, bool is_member, bool untagged)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *spvm_pl;
+ int err;
+
+ spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
+ if (!spvm_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
+ vid_end, is_member, untagged);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
+ kfree(spvm_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ u16 vid, last_visited_vid;
+ int err;
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
+ vid);
+ if (err) {
+ last_visited_vid = vid;
+ goto err_port_vid_to_fid_set;
+ }
+ }
+
+ err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
+ if (err) {
+ last_visited_vid = VLAN_N_VID;
+ goto err_port_vid_to_fid_set;
+ }
+
+ return 0;
+
+err_port_vid_to_fid_set:
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
+ mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
+ vid);
+ return err;
+}
+
+static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ u16 vid;
+ int err;
+
+ err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+ if (err)
+ return err;
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
+ vid, vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
+ u16 vid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *sftr_pl;
+ int err;
+
+ /* VLAN 0 is added to HW filter when device goes up, but it is
+ * reserved in our case, so simply return.
+ */
+ if (!vid)
+ return 0;
+
+ if (test_bit(vid, mlxsw_sp_port->active_vfids)) {
+ netdev_warn(dev, "VID=%d already configured\n", vid);
+ return 0;
+ }
+
+ if (!test_bit(vid, mlxsw_sp->active_vfids)) {
+ err = mlxsw_sp_vfid_create(mlxsw_sp, vid);
+ if (err) {
+ netdev_err(dev, "Failed to create vFID=%d\n",
+ MLXSW_SP_VFID_BASE + vid);
+ return err;
+ }
+
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl) {
+ err = -ENOMEM;
+ goto err_flood_table_alloc;
+ }
+ mlxsw_reg_sftr_pack(sftr_pl, 0, vid,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID, 0,
+ MLXSW_PORT_CPU_PORT, true);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+ kfree(sftr_pl);
+ if (err) {
+ netdev_err(dev, "Failed to configure flood table\n");
+ goto err_flood_table_config;
+ }
+ }
+
+ /* In case we fail in the following steps, we intentionally do not
+ * destroy the associated vFID.
+ */
+
+ /* When adding the first VLAN interface on a bridged port we need to
+ * transition all the active 802.1Q bridge VLANs to use explicit
+ * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
+ */
+ if (!mlxsw_sp_port->nr_vfids) {
+ err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
+ if (err) {
+ netdev_err(dev, "Failed to set to Virtual mode\n");
+ return err;
+ }
+ }
+
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+ true, MLXSW_SP_VFID_BASE + vid, vid);
+ if (err) {
+ netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
+ vid, MLXSW_SP_VFID_BASE + vid);
+ goto err_port_vid_to_fid_set;
+ }
+
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+ if (err) {
+ netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
+ goto err_port_vid_learning_set;
+ }
+
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false);
+ if (err) {
+ netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
+ vid);
+ goto err_port_add_vid;
+ }
+
+ err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
+ MLXSW_REG_SPMS_STATE_FORWARDING);
+ if (err) {
+ netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
+ goto err_port_stp_state_set;
+ }
+
+ mlxsw_sp_port->nr_vfids++;
+ set_bit(vid, mlxsw_sp_port->active_vfids);
+
+ return 0;
+
+err_flood_table_config:
+err_flood_table_alloc:
+ mlxsw_sp_vfid_destroy(mlxsw_sp, vid);
+ return err;
+
+err_port_stp_state_set:
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+err_port_add_vid:
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+err_port_vid_learning_set:
+ mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
+ MLXSW_SP_VFID_BASE + vid, vid);
+err_port_vid_to_fid_set:
+ mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+ return err;
+}
+
+int mlxsw_sp_port_kill_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ /* VLAN 0 is removed from HW filter when device goes down, but
+ * it is reserved in our case, so simply return.
+ */
+ if (!vid)
+ return 0;
+
+ if (!test_bit(vid, mlxsw_sp_port->active_vfids)) {
+ netdev_warn(dev, "VID=%d does not exist\n", vid);
+ return 0;
+ }
+
+ err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
+ MLXSW_REG_SPMS_STATE_DISCARDING);
+ if (err) {
+ netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
+ return err;
+ }
+
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+ if (err) {
+ netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
+ vid);
+ return err;
+ }
+
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+ if (err) {
+ netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
+ return err;
+ }
+
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+ false, MLXSW_SP_VFID_BASE + vid,
+ vid);
+ if (err) {
+ netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
+ vid, MLXSW_SP_VFID_BASE + vid);
+ return err;
+ }
+
+ /* When removing the last VLAN interface on a bridged port we need to
+ * transition all active 802.1Q bridge VLANs to use VID to FID
+ * mappings and set port's mode to VLAN mode.
+ */
+ if (mlxsw_sp_port->nr_vfids == 1) {
+ err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+ if (err) {
+ netdev_err(dev, "Failed to set to VLAN mode\n");
+ return err;
+ }
+ }
+
+ mlxsw_sp_port->nr_vfids--;
+ clear_bit(vid, mlxsw_sp_port->active_vfids);
+
+ return 0;
+}
+
+static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
+ .ndo_open = mlxsw_sp_port_open,
+ .ndo_stop = mlxsw_sp_port_stop,
+ .ndo_start_xmit = mlxsw_sp_port_xmit,
+ .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
+ .ndo_change_mtu = mlxsw_sp_port_change_mtu,
+ .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
+ .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
+ .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
+ .ndo_fdb_add = switchdev_port_fdb_add,
+ .ndo_fdb_del = switchdev_port_fdb_del,
+ .ndo_fdb_dump = switchdev_port_fdb_dump,
+ .ndo_bridge_setlink = switchdev_port_bridge_setlink,
+ .ndo_bridge_getlink = switchdev_port_bridge_getlink,
+ .ndo_bridge_dellink = switchdev_port_bridge_dellink,
+};
+
+static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, mlxsw_sp_driver_version,
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ mlxsw_sp->bus_info->fw_rev.major,
+ mlxsw_sp->bus_info->fw_rev.minor,
+ mlxsw_sp->bus_info->fw_rev.subminor);
+ strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
+ sizeof(drvinfo->bus_info));
+}
+
+struct mlxsw_sp_port_hw_stats {
+ char str[ETH_GSTRING_LEN];
+ u64 (*getter)(char *payload);
+};
+
+static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
+ {
+ .str = "a_frames_transmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
+ },
+ {
+ .str = "a_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
+ },
+ {
+ .str = "a_frame_check_sequence_errors",
+ .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
+ },
+ {
+ .str = "a_alignment_errors",
+ .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
+ },
+ {
+ .str = "a_octets_transmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
+ },
+ {
+ .str = "a_octets_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
+ },
+ {
+ .str = "a_multicast_frames_xmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
+ },
+ {
+ .str = "a_broadcast_frames_xmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
+ },
+ {
+ .str = "a_multicast_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
+ },
+ {
+ .str = "a_broadcast_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
+ },
+ {
+ .str = "a_in_range_length_errors",
+ .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
+ },
+ {
+ .str = "a_out_of_range_length_field",
+ .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
+ },
+ {
+ .str = "a_frame_too_long_errors",
+ .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
+ },
+ {
+ .str = "a_symbol_error_during_carrier",
+ .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
+ },
+ {
+ .str = "a_mac_control_frames_transmitted",
+ .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
+ },
+ {
+ .str = "a_mac_control_frames_received",
+ .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
+ },
+ {
+ .str = "a_unsupported_opcodes_received",
+ .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
+ },
+ {
+ .str = "a_pause_mac_ctrl_frames_received",
+ .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
+ },
+ {
+ .str = "a_pause_mac_ctrl_frames_xmitted",
+ .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+
+static void mlxsw_sp_port_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void mlxsw_sp_port_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int i;
+ int err;
+
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+ for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
+ data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
+}
+
+static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return MLXSW_SP_PORT_HW_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+struct mlxsw_sp_port_link_mode {
+ u32 mask;
+ u32 supported;
+ u32 advertised;
+ u32 speed;
+};
+
+static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+ .supported = SUPPORTED_100baseT_Full,
+ .advertised = ADVERTISED_100baseT_Full,
+ .speed = 100,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
+ .speed = 100,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
+ MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
+ .supported = SUPPORTED_1000baseKX_Full,
+ .advertised = ADVERTISED_1000baseKX_Full,
+ .speed = 1000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
+ .supported = SUPPORTED_10000baseT_Full,
+ .advertised = ADVERTISED_10000baseT_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
+ .supported = SUPPORTED_10000baseKX4_Full,
+ .advertised = ADVERTISED_10000baseKX4_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
+ .supported = SUPPORTED_20000baseKR2_Full,
+ .advertised = ADVERTISED_20000baseKR2_Full,
+ .speed = 20000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
+ .supported = SUPPORTED_40000baseCR4_Full,
+ .advertised = ADVERTISED_40000baseCR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
+ .supported = SUPPORTED_40000baseKR4_Full,
+ .advertised = ADVERTISED_40000baseKR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
+ .supported = SUPPORTED_40000baseSR4_Full,
+ .advertised = ADVERTISED_40000baseSR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
+ .supported = SUPPORTED_40000baseLR4_Full,
+ .advertised = ADVERTISED_40000baseLR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+ .speed = 25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
+ MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+ .speed = 50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .supported = SUPPORTED_56000baseKR4_Full,
+ .advertised = ADVERTISED_56000baseKR4_Full,
+ .speed = 56000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+ .speed = 100000,
+ },
+};
+
+#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
+
+static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
+{
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+ return SUPPORTED_FIBRE;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
+ return SUPPORTED_Backplane;
+ return 0;
+}
+
+static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
+{
+ u32 modes = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
+ modes |= mlxsw_sp_port_link_mode[i].supported;
+ }
+ return modes;
+}
+
+static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
+{
+ u32 modes = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
+ modes |= mlxsw_sp_port_link_mode[i].advertised;
+ }
+ return modes;
+}
+
+static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
+ struct ethtool_cmd *cmd)
+{
+ u32 speed = SPEED_UNKNOWN;
+ u8 duplex = DUPLEX_UNKNOWN;
+ int i;
+
+ if (!carrier_ok)
+ goto out;
+
+ for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
+ speed = mlxsw_sp_port_link_mode[i].speed;
+ duplex = DUPLEX_FULL;
+ break;
+ }
+ }
+out:
+ ethtool_cmd_speed_set(cmd, speed);
+ cmd->duplex = duplex;
+}
+
+static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
+{
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+ return PORT_FIBRE;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
+ return PORT_DA;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
+ return PORT_NONE;
+
+ return PORT_OTHER;
+}
+
+static int mlxsw_sp_port_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ u32 eth_proto_oper;
+ int err;
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to get proto");
+ return err;
+ }
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
+ &eth_proto_admin, &eth_proto_oper);
+
+ cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
+ mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
+ mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
+ eth_proto_oper, cmd);
+
+ eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+ cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
+ cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
+
+ cmd->transceiver = XCVR_INTERNAL;
+ return 0;
+}
+
+static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+ if (advertising & mlxsw_sp_port_link_mode[i].advertised)
+ ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static u32 mlxsw_sp_to_ptys_speed(u32 speed)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+ if (speed == mlxsw_sp_port_link_mode[i].speed)
+ ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static int mlxsw_sp_port_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 speed;
+ u32 eth_proto_new;
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ bool is_up;
+ int err;
+
+ speed = ethtool_cmd_speed(cmd);
+
+ eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
+ mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
+ mlxsw_sp_to_ptys_speed(speed);
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to get proto");
+ return err;
+ }
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+
+ eth_proto_new = eth_proto_new & eth_proto_cap;
+ if (!eth_proto_new) {
+ netdev_err(dev, "Not supported proto admin requested");
+ return -EINVAL;
+ }
+ if (eth_proto_new == eth_proto_admin)
+ return 0;
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to set proto admin");
+ return err;
+ }
+
+ err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
+ if (err) {
+ netdev_err(dev, "Failed to get oper status");
+ return err;
+ }
+ if (!is_up)
+ return 0;
+
+ err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ if (err) {
+ netdev_err(dev, "Failed to set admin status");
+ return err;
+ }
+
+ err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+ if (err) {
+ netdev_err(dev, "Failed to set admin status");
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
+ .get_drvinfo = mlxsw_sp_port_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlxsw_sp_port_get_strings,
+ .get_ethtool_stats = mlxsw_sp_port_get_stats,
+ .get_sset_count = mlxsw_sp_port_get_sset_count,
+ .get_settings = mlxsw_sp_port_get_settings,
+ .set_settings = mlxsw_sp_port_set_settings,
+};
+
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct net_device *dev;
+ bool usable;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
+ if (!dev)
+ return -ENOMEM;
+ mlxsw_sp_port = netdev_priv(dev);
+ mlxsw_sp_port->dev = dev;
+ mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
+ mlxsw_sp_port->local_port = local_port;
+ mlxsw_sp_port->learning = 1;
+ mlxsw_sp_port->learning_sync = 1;
+ mlxsw_sp_port->uc_flood = 1;
+ mlxsw_sp_port->pvid = 1;
+
+ mlxsw_sp_port->pcpu_stats =
+ netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
+ if (!mlxsw_sp_port->pcpu_stats) {
+ err = -ENOMEM;
+ goto err_alloc_stats;
+ }
+
+ dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
+ dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
+
+ err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
+ mlxsw_sp_port->local_port);
+ goto err_dev_addr_init;
+ }
+
+ netif_carrier_off(dev);
+
+ dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ /* Each packet needs to have a Tx header (metadata) on top all other
+ * headers.
+ */
+ dev->hard_header_len += MLXSW_TXHDR_LEN;
+
+ err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_module_check;
+ }
+
+ if (!usable) {
+ dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
+ mlxsw_sp_port->local_port);
+ goto port_not_usable;
+ }
+
+ err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_system_port_mapping_set;
+ }
+
+ err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_swid_set;
+ }
+
+ err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_mtu_set;
+ }
+
+ err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ if (err)
+ goto err_port_admin_status_set;
+
+ err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_buffers_init;
+ }
+
+ mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
+ mlxsw_sp_port->local_port);
+ goto err_register_netdev;
+ }
+
+ err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
+ if (err)
+ goto err_port_vlan_init;
+
+ mlxsw_sp->ports[local_port] = mlxsw_sp_port;
+ return 0;
+
+err_port_vlan_init:
+ unregister_netdev(dev);
+err_register_netdev:
+err_port_buffers_init:
+err_port_admin_status_set:
+err_port_mtu_set:
+err_port_swid_set:
+err_port_system_port_mapping_set:
+port_not_usable:
+err_port_module_check:
+err_dev_addr_init:
+ free_percpu(mlxsw_sp_port->pcpu_stats);
+err_alloc_stats:
+ free_netdev(dev);
+ return err;
+}
+
+static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ u16 vfid;
+
+ for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID)
+ mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
+}
+
+static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ if (!mlxsw_sp_port)
+ return;
+ mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
+ unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+ mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
+ free_percpu(mlxsw_sp_port->pcpu_stats);
+ free_netdev(mlxsw_sp_port->dev);
+}
+
+static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+ mlxsw_sp_port_remove(mlxsw_sp, i);
+ kfree(mlxsw_sp->ports);
+}
+
+static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t alloc_size;
+ int i;
+ int err;
+
+ alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
+ mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
+ if (!mlxsw_sp->ports)
+ return -ENOMEM;
+
+ for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+ err = mlxsw_sp_port_create(mlxsw_sp, i);
+ if (err)
+ goto err_port_create;
+ }
+ return 0;
+
+err_port_create:
+ for (i--; i >= 1; i--)
+ mlxsw_sp_port_remove(mlxsw_sp, i);
+ kfree(mlxsw_sp->ports);
+ return err;
+}
+
+static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
+ char *pude_pl, void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ enum mlxsw_reg_pude_oper_status status;
+ u8 local_port;
+
+ local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
+ local_port);
+ return;
+ }
+
+ status = mlxsw_reg_pude_oper_status_get(pude_pl);
+ if (status == MLXSW_PORT_OPER_STATUS_UP) {
+ netdev_info(mlxsw_sp_port->dev, "link up\n");
+ netif_carrier_on(mlxsw_sp_port->dev);
+ } else {
+ netdev_info(mlxsw_sp_port->dev, "link down\n");
+ netif_carrier_off(mlxsw_sp_port->dev);
+ }
+}
+
+static struct mlxsw_event_listener mlxsw_sp_pude_event = {
+ .func = mlxsw_sp_pude_event_func,
+ .trap_id = MLXSW_TRAP_ID_PUDE,
+};
+
+static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_event_trap_id trap_id)
+{
+ struct mlxsw_event_listener *el;
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int err;
+
+ switch (trap_id) {
+ case MLXSW_TRAP_ID_PUDE:
+ el = &mlxsw_sp_pude_event;
+ break;
+ }
+ err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
+ if (err)
+ return err;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ goto err_event_trap_set;
+
+ return 0;
+
+err_event_trap_set:
+ mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
+ return err;
+}
+
+static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_event_trap_id trap_id)
+{
+ struct mlxsw_event_listener *el;
+
+ switch (trap_id) {
+ case MLXSW_TRAP_ID_PUDE:
+ el = &mlxsw_sp_pude_event;
+ break;
+ }
+ mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
+}
+
+static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+
+ if (unlikely(!mlxsw_sp_port)) {
+ dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
+ local_port);
+ return;
+ }
+
+ skb->dev = mlxsw_sp_port->dev;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ netif_receive_skb(skb);
+}
+
+static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_FDB_MC,
+ },
+ /* Traps for specific L2 packet types, not trapped as FDB MC */
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_STP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LACP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_EAPOL,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LLDP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MMRP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MVRP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_RPVST,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_DHCP,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
+ },
+};
+
+static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int i;
+ int err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
+ err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
+ &mlxsw_sp_rx_listener[i],
+ mlxsw_sp);
+ if (err)
+ goto err_rx_listener_register;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ mlxsw_sp_rx_listener[i].trap_id);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ goto err_rx_trap_set;
+ }
+ return 0;
+
+err_rx_trap_set:
+ mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
+ &mlxsw_sp_rx_listener[i],
+ mlxsw_sp);
+err_rx_listener_register:
+ for (i--; i >= 0; i--) {
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ mlxsw_sp_rx_listener[i].trap_id);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
+ &mlxsw_sp_rx_listener[i],
+ mlxsw_sp);
+ }
+ return err;
+}
+
+static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ mlxsw_sp_rx_listener[i].trap_id);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
+ &mlxsw_sp_rx_listener[i],
+ mlxsw_sp);
+ }
+}
+
+static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
+ enum mlxsw_reg_sfgc_type type,
+ enum mlxsw_reg_sfgc_bridge_type bridge_type)
+{
+ enum mlxsw_flood_table_type table_type;
+ enum mlxsw_sp_flood_table flood_table;
+ char sfgc_pl[MLXSW_REG_SFGC_LEN];
+
+ if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) {
+ table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
+ flood_table = 0;
+ } else {
+ table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+ if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
+ flood_table = MLXSW_SP_FLOOD_TABLE_UC;
+ else
+ flood_table = MLXSW_SP_FLOOD_TABLE_BM;
+ }
+
+ mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
+ flood_table);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
+}
+
+static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int type, err;
+
+ /* For non-offloaded netdevs, flood all traffic types to CPU
+ * port.
+ */
+ for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
+ if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
+ continue;
+
+ err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
+ if (err)
+ return err;
+ }
+
+ /* For bridged ports, use one flooding table for unknown unicast
+ * traffic and a second table for unregistered multicast and
+ * broadcast.
+ */
+ for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
+ if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
+ continue;
+
+ err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ int err;
+
+ mlxsw_sp->core = mlxsw_core;
+ mlxsw_sp->bus_info = mlxsw_bus_info;
+
+ err = mlxsw_sp_base_mac_get(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
+ return err;
+ }
+
+ err = mlxsw_sp_ports_create(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
+ goto err_ports_create;
+ }
+
+ err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
+ goto err_event_register;
+ }
+
+ err = mlxsw_sp_traps_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
+ goto err_rx_listener_register;
+ }
+
+ err = mlxsw_sp_flood_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
+ goto err_flood_init;
+ }
+
+ err = mlxsw_sp_buffers_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
+ goto err_buffers_init;
+ }
+
+ err = mlxsw_sp_switchdev_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
+ goto err_switchdev_init;
+ }
+
+ return 0;
+
+err_switchdev_init:
+err_buffers_init:
+err_flood_init:
+ mlxsw_sp_traps_fini(mlxsw_sp);
+err_rx_listener_register:
+ mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
+err_event_register:
+ mlxsw_sp_ports_remove(mlxsw_sp);
+err_ports_create:
+ mlxsw_sp_vfids_fini(mlxsw_sp);
+ return err;
+}
+
+static void mlxsw_sp_fini(void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_switchdev_fini(mlxsw_sp);
+ mlxsw_sp_traps_fini(mlxsw_sp);
+ mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
+ mlxsw_sp_ports_remove(mlxsw_sp);
+ mlxsw_sp_vfids_fini(mlxsw_sp);
+}
+
+static struct mlxsw_config_profile mlxsw_sp_config_profile = {
+ .used_max_vepa_channels = 1,
+ .max_vepa_channels = 0,
+ .used_max_lag = 1,
+ .max_lag = 64,
+ .used_max_port_per_lag = 1,
+ .max_port_per_lag = 16,
+ .used_max_mid = 1,
+ .max_mid = 7000,
+ .used_max_pgt = 1,
+ .max_pgt = 0,
+ .used_max_system_port = 1,
+ .max_system_port = 64,
+ .used_max_vlan_groups = 1,
+ .max_vlan_groups = 127,
+ .used_max_regions = 1,
+ .max_regions = 400,
+ .used_flood_tables = 1,
+ .used_flood_mode = 1,
+ .flood_mode = 3,
+ .max_fid_offset_flood_tables = 2,
+ .fid_offset_flood_table_size = VLAN_N_VID - 1,
+ .max_fid_flood_tables = 1,
+ .fid_flood_table_size = VLAN_N_VID,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 0,
+ .used_max_pkey = 1,
+ .max_pkey = 0,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_ETH,
+ }
+ },
+};
+
+static struct mlxsw_driver mlxsw_sp_driver = {
+ .kind = MLXSW_DEVICE_KIND_SPECTRUM,
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .init = mlxsw_sp_init,
+ .fini = mlxsw_sp_fini,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp_config_profile,
+};
+
+static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
+}
+
+static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int err;
+
+ /* When port is not bridged untagged packets are tagged with
+ * PVID=VID=1, thereby creating an implicit VLAN interface in
+ * the device. Remove it and let bridge code take care of its
+ * own VLANs.
+ */
+ err = mlxsw_sp_port_kill_vid(dev, 0, 1);
+ if (err)
+ netdev_err(dev, "Failed to remove VID 1\n");
+
+ return err;
+}
+
+static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int err;
+
+ /* Add implicit VLAN interface in the device, so that untagged
+ * packets will be classified to the default vFID.
+ */
+ err = mlxsw_sp_port_add_vid(dev, 0, 1);
+ if (err)
+ netdev_err(dev, "Failed to add VID 1\n");
+
+ return err;
+}
+
+static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
+{
+ return !mlxsw_sp->master_bridge.dev ||
+ mlxsw_sp->master_bridge.dev == br_dev;
+}
+
+static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
+{
+ mlxsw_sp->master_bridge.dev = br_dev;
+ mlxsw_sp->master_bridge.ref_count++;
+}
+
+static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
+{
+ if (--mlxsw_sp->master_bridge.ref_count == 0)
+ mlxsw_sp->master_bridge.dev = NULL;
+}
+
+static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct net_device *upper_dev;
+ struct mlxsw_sp *mlxsw_sp;
+ int err;
+
+ if (!mlxsw_sp_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ mlxsw_sp_port = netdev_priv(dev);
+ mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ info = ptr;
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ /* HW limitation forbids to put ports to multiple bridges. */
+ if (info->master && info->linking &&
+ netif_is_bridge_master(upper_dev) &&
+ !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
+ return NOTIFY_BAD;
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (info->master &&
+ netif_is_bridge_master(upper_dev)) {
+ if (info->linking) {
+ err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
+ if (err)
+ netdev_err(dev, "Failed to join bridge\n");
+ mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
+ mlxsw_sp_port->bridged = 1;
+ } else {
+ err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
+ if (err)
+ netdev_err(dev, "Failed to leave bridge\n");
+ mlxsw_sp_port->bridged = 0;
+ mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
+ }
+ }
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_netdevice_event,
+};
+
+static int __init mlxsw_sp_module_init(void)
+{
+ int err;
+
+ register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+ err = mlxsw_core_driver_register(&mlxsw_sp_driver);
+ if (err)
+ goto err_core_driver_register;
+ return 0;
+
+err_core_driver_register:
+ unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+ return err;
+}
+
+static void __exit mlxsw_sp_module_exit(void)
+{
+ mlxsw_core_driver_unregister(&mlxsw_sp_driver);
+ unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+}
+
+module_init(mlxsw_sp_module_init);
+module_exit(mlxsw_sp_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox Spectrum driver");
+MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
new file mode 100644
index 000000000000..4365c8bccc6d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -0,0 +1,122 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_H
+#define _MLXSW_SPECTRUM_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+
+#include "core.h"
+
+#define MLXSW_SP_VFID_BASE VLAN_N_VID
+
+struct mlxsw_sp_port;
+
+struct mlxsw_sp {
+ unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)];
+ unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)];
+ struct mlxsw_sp_port **ports;
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ unsigned char base_mac[ETH_ALEN];
+ struct {
+ struct delayed_work dw;
+#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
+ unsigned int interval; /* ms */
+ } fdb_notify;
+#define MLXSW_SP_DEFAULT_AGEING_TIME 300
+ u32 ageing_time;
+ struct {
+ struct net_device *dev;
+ unsigned int ref_count;
+ } master_bridge;
+};
+
+struct mlxsw_sp_port_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 tx_dropped;
+};
+
+struct mlxsw_sp_port {
+ struct net_device *dev;
+ struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
+ struct mlxsw_sp *mlxsw_sp;
+ u8 local_port;
+ u8 stp_state;
+ u8 learning:1,
+ learning_sync:1,
+ uc_flood:1,
+ bridged:1;
+ u16 pvid;
+ /* 802.1Q bridge VLANs */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ /* VLAN interfaces */
+ unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)];
+ u16 nr_vfids;
+};
+
+enum mlxsw_sp_flood_table {
+ MLXSW_SP_FLOOD_TABLE_UC,
+ MLXSW_SP_FLOOD_TABLE_BM,
+};
+
+int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
+
+int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
+ u16 vid);
+int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+ u16 vid_end, bool is_member, bool untagged);
+int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
+ u16 vid);
+int mlxsw_sp_port_kill_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
new file mode 100644
index 000000000000..d59195e3f7fb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -0,0 +1,422 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "port.h"
+#include "reg.h"
+
+struct mlxsw_sp_pb {
+ u8 index;
+ u16 size;
+};
+
+#define MLXSW_SP_PB(_index, _size) \
+ { \
+ .index = _index, \
+ .size = _size, \
+ }
+
+static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = {
+ MLXSW_SP_PB(0, 208),
+ MLXSW_SP_PB(1, 208),
+ MLXSW_SP_PB(2, 208),
+ MLXSW_SP_PB(3, 208),
+ MLXSW_SP_PB(4, 208),
+ MLXSW_SP_PB(5, 208),
+ MLXSW_SP_PB(6, 208),
+ MLXSW_SP_PB(7, 208),
+ MLXSW_SP_PB(9, 208),
+};
+
+#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
+
+static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ char pbmc_pl[MLXSW_REG_PBMC_LEN];
+ int i;
+
+ mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
+ 0xffff, 0xffff / 2);
+ for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
+ const struct mlxsw_sp_pb *pb;
+
+ pb = &mlxsw_sp_pbs[i];
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size);
+ }
+ return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
+ MLXSW_REG(pbmc), pbmc_pl);
+}
+
+#define MLXSW_SP_SB_BYTES_PER_CELL 96
+
+struct mlxsw_sp_sb_pool {
+ u8 pool;
+ enum mlxsw_reg_sbpr_dir dir;
+ enum mlxsw_reg_sbpr_mode mode;
+ u32 size;
+};
+
+#define MLXSW_SP_SB_POOL_INGRESS_SIZE \
+ ((15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) / \
+ MLXSW_SP_SB_BYTES_PER_CELL)
+#define MLXSW_SP_SB_POOL_EGRESS_SIZE \
+ ((14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) / \
+ MLXSW_SP_SB_BYTES_PER_CELL)
+
+#define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size) \
+ { \
+ .pool = _pool, \
+ .dir = _dir, \
+ .mode = _mode, \
+ .size = _size, \
+ }
+
+#define MLXSW_SP_SB_POOL_INGRESS(_pool, _size) \
+ MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_INGRESS, \
+ MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
+
+#define MLXSW_SP_SB_POOL_EGRESS(_pool, _size) \
+ MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_EGRESS, \
+ MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
+
+static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = {
+ MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_SB_POOL_INGRESS_SIZE),
+ MLXSW_SP_SB_POOL_INGRESS(1, 0),
+ MLXSW_SP_SB_POOL_INGRESS(2, 0),
+ MLXSW_SP_SB_POOL_INGRESS(3, 0),
+ MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_SB_POOL_EGRESS_SIZE),
+ MLXSW_SP_SB_POOL_EGRESS(1, 0),
+ MLXSW_SP_SB_POOL_EGRESS(2, 0),
+ MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_SB_POOL_EGRESS_SIZE),
+};
+
+#define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools)
+
+static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char sbpr_pl[MLXSW_REG_SBPR_LEN];
+ int i;
+ int err;
+
+ for (i = 0; i < MLXSW_SP_SB_POOLS_LEN; i++) {
+ const struct mlxsw_sp_sb_pool *pool;
+
+ pool = &mlxsw_sp_sb_pools[i];
+ mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir,
+ pool->mode, pool->size);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+struct mlxsw_sp_sb_cm {
+ union {
+ u8 pg;
+ u8 tc;
+ } u;
+ enum mlxsw_reg_sbcm_dir dir;
+ u32 min_buff;
+ u32 max_buff;
+ u8 pool;
+};
+
+#define MLXSW_SP_SB_CM(_pg_tc, _dir, _min_buff, _max_buff, _pool) \
+ { \
+ .u.pg = _pg_tc, \
+ .dir = _dir, \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool = _pool, \
+ }
+
+#define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff) \
+ MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBCM_DIR_INGRESS, \
+ _min_buff, _max_buff, 0)
+
+#define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff) \
+ MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, \
+ _min_buff, _max_buff, 0)
+
+#define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc) \
+ MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, 104, 2, 3)
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = {
+ MLXSW_SP_SB_CM_INGRESS(0, 10000 / MLXSW_SP_SB_BYTES_PER_CELL, 8),
+ MLXSW_SP_SB_CM_INGRESS(1, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(2, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(3, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(4, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(5, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(6, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(7, 0, 0),
+ MLXSW_SP_SB_CM_INGRESS(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff),
+ MLXSW_SP_SB_CM_EGRESS(0, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(1, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(2, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(3, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(4, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(5, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(6, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(7, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+ MLXSW_SP_SB_CM_EGRESS(8, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(9, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(10, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(11, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(12, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(13, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(14, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(15, 0, 0),
+ MLXSW_SP_SB_CM_EGRESS(16, 1, 0xff),
+};
+
+#define MLXSW_SP_SB_CMS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms)
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(0),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(1),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(2),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(3),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(4),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(5),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(6),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(7),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(8),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(9),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(10),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(11),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(12),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(13),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(14),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(15),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(16),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(17),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(18),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(19),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(20),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(21),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(22),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(23),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(24),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(25),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(26),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(27),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(28),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(29),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(30),
+ MLXSW_SP_CPU_PORT_SB_CM_EGRESS(31),
+};
+
+#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
+ ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
+
+static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ const struct mlxsw_sp_sb_cm *cms,
+ size_t cms_len)
+{
+ char sbcm_pl[MLXSW_REG_SBCM_LEN];
+ int i;
+ int err;
+
+ for (i = 0; i < cms_len; i++) {
+ const struct mlxsw_sp_sb_cm *cm;
+
+ cm = &cms[i];
+ mlxsw_reg_sbcm_pack(sbcm_pl, local_port, cm->u.pg, cm->dir,
+ cm->min_buff, cm->max_buff, cm->pool);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_port->local_port, mlxsw_sp_sb_cms,
+ MLXSW_SP_SB_CMS_LEN);
+}
+
+static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_sb_cms_init(mlxsw_sp, 0, mlxsw_sp_cpu_port_sb_cms,
+ MLXSW_SP_CPU_PORT_SB_MCS_LEN);
+}
+
+struct mlxsw_sp_sb_pm {
+ u8 pool;
+ enum mlxsw_reg_sbpm_dir dir;
+ u32 min_buff;
+ u32 max_buff;
+};
+
+#define MLXSW_SP_SB_PM(_pool, _dir, _min_buff, _max_buff) \
+ { \
+ .pool = _pool, \
+ .dir = _dir, \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ }
+
+#define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff) \
+ MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_INGRESS, \
+ _min_buff, _max_buff)
+
+#define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff) \
+ MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_EGRESS, \
+ _min_buff, _max_buff)
+
+static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
+ MLXSW_SP_SB_PM_INGRESS(0, 0, 0xff),
+ MLXSW_SP_SB_PM_INGRESS(1, 0, 0),
+ MLXSW_SP_SB_PM_INGRESS(2, 0, 0),
+ MLXSW_SP_SB_PM_INGRESS(3, 0, 0),
+ MLXSW_SP_SB_PM_EGRESS(0, 0, 7),
+ MLXSW_SP_SB_PM_EGRESS(1, 0, 0),
+ MLXSW_SP_SB_PM_EGRESS(2, 0, 0),
+ MLXSW_SP_SB_PM_EGRESS(3, 0, 0),
+};
+
+#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms)
+
+static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ char sbpm_pl[MLXSW_REG_SBPM_LEN];
+ int i;
+ int err;
+
+ for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) {
+ const struct mlxsw_sp_sb_pm *pm;
+
+ pm = &mlxsw_sp_sb_pms[i];
+ mlxsw_reg_sbpm_pack(sbpm_pl, mlxsw_sp_port->local_port,
+ pm->pool, pm->dir,
+ pm->min_buff, pm->max_buff);
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
+ MLXSW_REG(sbpm), sbpm_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+struct mlxsw_sp_sb_mm {
+ u8 prio;
+ u32 min_buff;
+ u32 max_buff;
+ u8 pool;
+};
+
+#define MLXSW_SP_SB_MM(_prio, _min_buff, _max_buff, _pool) \
+ { \
+ .prio = _prio, \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool = _pool, \
+ }
+
+static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
+ MLXSW_SP_SB_MM(0, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(1, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(2, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(3, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(4, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(5, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(6, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(7, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(8, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(10, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(11, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(12, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(13, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(14, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+};
+
+#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
+
+static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char sbmm_pl[MLXSW_REG_SBMM_LEN];
+ int i;
+ int err;
+
+ for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
+ const struct mlxsw_sp_sb_mm *mc;
+
+ mc = &mlxsw_sp_sb_mms[i];
+ mlxsw_reg_sbmm_pack(sbmm_pl, mc->prio, mc->min_buff,
+ mc->max_buff, mc->pool);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = mlxsw_sp_sb_pools_init(mlxsw_sp);
+ if (err)
+ return err;
+ err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
+ if (err)
+ return err;
+ err = mlxsw_sp_sb_mms_init(mlxsw_sp);
+
+ return err;
+}
+
+int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
new file mode 100644
index 000000000000..617fb22b5d81
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -0,0 +1,903 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <net/switchdev.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+
+static int mlxsw_sp_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
+ memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
+ attr->u.ppid.id_len);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ attr->u.brport_flags =
+ (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
+ (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
+ (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 state)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ enum mlxsw_reg_spms_state spms_state;
+ char *spms_pl;
+ u16 vid;
+ int err;
+
+ switch (state) {
+ case BR_STATE_DISABLED: /* fall-through */
+ case BR_STATE_FORWARDING:
+ spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
+ break;
+ case BR_STATE_LISTENING: /* fall-through */
+ case BR_STATE_LEARNING:
+ spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
+ break;
+ case BR_STATE_BLOCKING:
+ spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
+ break;
+ default:
+ BUG();
+ }
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ u8 state)
+{
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ mlxsw_sp_port->stp_state = state;
+ return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
+}
+
+static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid_begin, u16 fid_end, bool set,
+ bool only_uc)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 range = fid_end - fid_begin + 1;
+ char *sftr_pl;
+ int err;
+
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid_begin,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
+ mlxsw_sp_port->local_port, set);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+ if (err)
+ goto buffer_out;
+
+ /* Flooding control allows one to decide whether a given port will
+ * flood unicast traffic for which there is no FDB entry.
+ */
+ if (only_uc)
+ goto buffer_out;
+
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid_begin,
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
+ mlxsw_sp_port->local_port, set);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+buffer_out:
+ kfree(sftr_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool set)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ u16 vid, last_visited_vid;
+ int err;
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+ err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
+ true);
+ if (err) {
+ last_visited_vid = vid;
+ goto err_port_flood_set;
+ }
+ }
+
+ return 0;
+
+err_port_flood_set:
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
+ __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
+ netdev_err(dev, "Failed to configure unicast flooding\n");
+ return err;
+}
+
+static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ unsigned long brport_flags)
+{
+ unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
+ bool set;
+ int err;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if ((uc_flood ^ brport_flags) & BR_FLOOD) {
+ set = mlxsw_sp_port->uc_flood ? false : true;
+ err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
+ if (err)
+ return err;
+ }
+
+ mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
+ mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
+ mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
+
+ return 0;
+}
+
+static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
+{
+ char sfdat_pl[MLXSW_REG_SFDAT_LEN];
+ int err;
+
+ mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
+ if (err)
+ return err;
+ mlxsw_sp->ageing_time = ageing_time;
+ return 0;
+}
+
+static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ unsigned long ageing_clock_t)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
+}
+
+static int mlxsw_sp_port_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
+ attr->u.ageing_time);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spvid_pl[MLXSW_REG_SPVID_LEN];
+
+ mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
+}
+
+static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ int err;
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+
+ if (err)
+ return err;
+
+ set_bit(fid, mlxsw_sp->active_fids);
+ return 0;
+}
+
+static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ clear_bit(fid, mlxsw_sp->active_fids);
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
+ fid, fid);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
+{
+ enum mlxsw_reg_svfa_mt mt;
+
+ if (mlxsw_sp_port->nr_vfids)
+ mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ else
+ mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+
+ return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
+}
+
+static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
+{
+ enum mlxsw_reg_svfa_mt mt;
+
+ if (!mlxsw_sp_port->nr_vfids)
+ return 0;
+
+ mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
+}
+
+static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
+ u16 vid_end)
+{
+ u16 vid;
+ int err;
+
+ for (vid = vid_begin; vid <= vid_end; vid++) {
+ err = mlxsw_sp_port_add_vid(dev, 0, vid);
+ if (err)
+ goto err_port_add_vid;
+ }
+ return 0;
+
+err_port_add_vid:
+ for (vid--; vid >= vid_begin; vid--)
+ mlxsw_sp_port_kill_vid(dev, 0, vid);
+ return err;
+}
+
+static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end,
+ bool flag_untagged, bool flag_pvid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct net_device *dev = mlxsw_sp_port->dev;
+ enum mlxsw_reg_svfa_mt mt;
+ u16 vid, vid_e;
+ int err;
+
+ /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
+ * not bridged, then packets ingressing through the port with
+ * the specified VIDs will be directed to CPU.
+ */
+ if (!mlxsw_sp_port->bridged)
+ return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
+
+ for (vid = vid_begin; vid <= vid_end; vid++) {
+ if (!test_bit(vid, mlxsw_sp->active_fids)) {
+ err = mlxsw_sp_fid_create(mlxsw_sp, vid);
+ if (err) {
+ netdev_err(dev, "Failed to create FID=%d\n",
+ vid);
+ return err;
+ }
+
+ /* When creating a FID, we set a VID to FID mapping
+ * regardless of the port's mode.
+ */
+ mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
+ true, vid, vid);
+ if (err) {
+ netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
+ vid);
+ return err;
+ }
+ }
+
+ /* Set FID mapping according to port's mode */
+ err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
+ if (err) {
+ netdev_err(dev, "Failed to map FID=%d", vid);
+ return err;
+ }
+ }
+
+ err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
+ true, false);
+ if (err) {
+ netdev_err(dev, "Failed to configure flooding\n");
+ return err;
+ }
+
+ for (vid = vid_begin; vid <= vid_end;
+ vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
+ vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
+ vid_end);
+
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, true,
+ flag_untagged);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Unable to add VIDs %d-%d\n",
+ vid, vid_e);
+ return err;
+ }
+ }
+
+ vid = vid_begin;
+ if (flag_pvid && mlxsw_sp_port->pvid != vid) {
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Unable to add PVID %d\n",
+ vid);
+ return err;
+ }
+ mlxsw_sp_port->pvid = vid;
+ }
+
+ /* Changing activity bits only if HW operation succeded */
+ for (vid = vid_begin; vid <= vid_end; vid++)
+ set_bit(vid, mlxsw_sp_port->active_vlans);
+
+ return mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
+ mlxsw_sp_port->stp_state);
+}
+
+static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ bool untagged_flag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid_flag = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
+ vlan->vid_begin, vlan->vid_end,
+ untagged_flag, pvid_flag);
+}
+
+static int mlxsw_sp_port_fdb_op(struct mlxsw_sp_port *mlxsw_sp_port,
+ const char *mac, u16 vid, bool adding,
+ bool dynamic)
+{
+ enum mlxsw_reg_sfd_rec_policy policy;
+ enum mlxsw_reg_sfd_op op;
+ char *sfd_pl;
+ int err;
+
+ if (!vid)
+ vid = mlxsw_sp_port->pvid;
+
+ sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+ if (!sfd_pl)
+ return -ENOMEM;
+
+ policy = dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
+ op = adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
+ MLXSW_REG_SFD_OP_WRITE_REMOVE;
+ mlxsw_reg_sfd_pack(sfd_pl, op, 0);
+ mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy,
+ mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP,
+ mlxsw_sp_port->local_port);
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sfd),
+ sfd_pl);
+ kfree(sfd_pl);
+
+ return err;
+}
+
+static int
+mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
+ true, false);
+}
+
+static int mlxsw_sp_port_obj_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_VLAN(obj),
+ trans);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_FDB(obj),
+ trans);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
+ u16 vid_end)
+{
+ u16 vid;
+ int err;
+
+ for (vid = vid_begin; vid <= vid_end; vid++) {
+ err = mlxsw_sp_port_kill_vid(dev, 0, vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid_begin, u16 vid_end, bool init)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ u16 vid, vid_e;
+ int err;
+
+ /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
+ * not bridged, then prevent packets ingressing through the
+ * port with the specified VIDs from being trapped to CPU.
+ */
+ if (!init && !mlxsw_sp_port->bridged)
+ return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
+
+ for (vid = vid_begin; vid <= vid_end;
+ vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
+ vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
+ vid_end);
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, false,
+ false);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Unable to del VIDs %d-%d\n",
+ vid, vid_e);
+ return err;
+ }
+ }
+
+ if ((mlxsw_sp_port->pvid >= vid_begin) &&
+ (mlxsw_sp_port->pvid <= vid_end)) {
+ /* Default VLAN is always 1 */
+ mlxsw_sp_port->pvid = 1;
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port,
+ mlxsw_sp_port->pvid);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Unable to del PVID %d\n",
+ vid);
+ return err;
+ }
+ }
+
+ if (init)
+ goto out;
+
+ err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
+ false, false);
+ if (err) {
+ netdev_err(dev, "Failed to clear flooding\n");
+ return err;
+ }
+
+ for (vid = vid_begin; vid <= vid_end; vid++) {
+ /* Remove FID mapping in case of Virtual mode */
+ err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
+ if (err) {
+ netdev_err(dev, "Failed to unmap FID=%d", vid);
+ return err;
+ }
+ }
+
+out:
+ /* Changing activity bits only if HW operation succeded */
+ for (vid = vid_begin; vid <= vid_end; vid++)
+ clear_bit(vid, mlxsw_sp_port->active_vlans);
+
+ return 0;
+}
+
+static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
+ vlan->vid_begin, vlan->vid_end, false);
+}
+
+static int
+mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_port_fdb *fdb)
+{
+ return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
+ false, false);
+}
+
+static int mlxsw_sp_port_obj_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_FDB(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_obj_port_fdb *fdb,
+ switchdev_obj_dump_cb_t *cb)
+{
+ char *sfd_pl;
+ char mac[ETH_ALEN];
+ u16 vid;
+ u8 local_port;
+ u8 num_rec;
+ int stored_err = 0;
+ int i;
+ int err;
+
+ sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+ if (!sfd_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
+ do {
+ mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
+ err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core,
+ MLXSW_REG(sfd), sfd_pl);
+ if (err)
+ goto out;
+
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+
+ /* Even in case of error, we have to run the dump to the end
+ * so the session in firmware is finished.
+ */
+ if (stored_err)
+ continue;
+
+ for (i = 0; i < num_rec; i++) {
+ switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
+ case MLXSW_REG_SFD_REC_TYPE_UNICAST:
+ mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &vid,
+ &local_port);
+ if (local_port == mlxsw_sp_port->local_port) {
+ ether_addr_copy(fdb->addr, mac);
+ fdb->ndm_state = NUD_REACHABLE;
+ fdb->vid = vid;
+ err = cb(&fdb->obj);
+ if (err)
+ stored_err = err;
+ }
+ }
+ }
+ } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
+
+out:
+ kfree(sfd_pl);
+ return stored_err ? stored_err : err;
+}
+
+static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_obj_port_vlan *vlan,
+ switchdev_obj_dump_cb_t *cb)
+{
+ u16 vid;
+ int err = 0;
+
+ for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+ vlan->flags = 0;
+ if (vid == mlxsw_sp_port->pvid)
+ vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+ vlan->vid_begin = vid;
+ vlan->vid_end = vid;
+ err = cb(&vlan->obj);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+static int mlxsw_sp_port_obj_dump(struct net_device *dev,
+ struct switchdev_obj *obj,
+ switchdev_obj_dump_cb_t *cb)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
+ SWITCHDEV_OBJ_PORT_FDB(obj), cb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
+ .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
+ .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
+ .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
+ .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
+ .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
+};
+
+static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
+ char *sfn_pl, int rec_index,
+ bool adding)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ char mac[ETH_ALEN];
+ u8 local_port;
+ u16 vid;
+ int err;
+
+ mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &vid, &local_port);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
+ return;
+ }
+
+ err = mlxsw_sp_port_fdb_op(mlxsw_sp_port, mac, vid,
+ adding && mlxsw_sp_port->learning, true);
+ if (err) {
+ if (net_ratelimit())
+ netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
+ return;
+ }
+
+ if (mlxsw_sp_port->learning && mlxsw_sp_port->learning_sync) {
+ struct switchdev_notifier_fdb_info info;
+ unsigned long notifier_type;
+
+ info.addr = mac;
+ info.vid = vid;
+ notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
+ call_switchdev_notifiers(notifier_type, mlxsw_sp_port->dev,
+ &info.info);
+ }
+}
+
+static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
+ char *sfn_pl, int rec_index)
+{
+ switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
+ case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
+ mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
+ rec_index, true);
+ break;
+ case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
+ mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
+ rec_index, false);
+ break;
+ }
+}
+
+static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
+{
+ schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
+ msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
+}
+
+static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
+{
+ struct mlxsw_sp *mlxsw_sp;
+ char *sfn_pl;
+ u8 num_rec;
+ int i;
+ int err;
+
+ sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
+ if (!sfn_pl)
+ return;
+
+ mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
+
+ do {
+ mlxsw_reg_sfn_pack(sfn_pl);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
+ if (err) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
+ break;
+ }
+ num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
+ for (i = 0; i < num_rec; i++)
+ mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
+
+ } while (num_rec);
+
+ kfree(sfn_pl);
+ mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
+}
+
+static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
+ return err;
+ }
+ INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
+ mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
+ mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
+ return 0;
+}
+
+static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
+}
+
+static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ u16 fid;
+
+ for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
+ mlxsw_sp_fid_destroy(mlxsw_sp, fid);
+}
+
+int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_fdb_init(mlxsw_sp);
+}
+
+void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_fdb_fini(mlxsw_sp);
+ mlxsw_sp_fids_fini(mlxsw_sp);
+}
+
+int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int err;
+
+ /* Allow only untagged packets to ingress and tag them internally
+ * with VID 1.
+ */
+ mlxsw_sp_port->pvid = 1;
+ err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID, true);
+ if (err) {
+ netdev_err(dev, "Unable to init VLANs\n");
+ return err;
+ }
+
+ /* Add implicit VLAN interface in the device, so that untagged
+ * packets will be classified to the default vFID.
+ */
+ err = mlxsw_sp_port_add_vid(dev, 0, 1);
+ if (err)
+ netdev_err(dev, "Failed to configure default vFID\n");
+
+ return err;
+}
+
+void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
+}
+
+void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
new file mode 100644
index 000000000000..d85960cfb694
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -0,0 +1,1557 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+#include <generated/utsrelease.h>
+
+#include "core.h"
+#include "reg.h"
+#include "port.h"
+#include "trap.h"
+#include "txheader.h"
+
+static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
+static const char mlxsw_sx_driver_version[] = "1.0";
+
+struct mlxsw_sx_port;
+
+struct mlxsw_sx {
+ struct mlxsw_sx_port **ports;
+ struct mlxsw_core *core;
+ const struct mlxsw_bus_info *bus_info;
+ u8 hw_id[ETH_ALEN];
+};
+
+struct mlxsw_sx_port_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 tx_dropped;
+};
+
+struct mlxsw_sx_port {
+ struct net_device *dev;
+ struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
+ struct mlxsw_sx *mlxsw_sx;
+ u8 local_port;
+};
+
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_etclass
+ * Egress TClass to be used on the egress device on the egress port.
+ * The MSB is specified in the 'ctclass3' field.
+ * Range is 0-15, where 15 is the highest priority.
+ */
+MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
+
+/* tx_hdr_swid
+ * Switch partition ID.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_ctclass3
+ * See field 'etclass'.
+ */
+MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
+
+/* tx_hdr_rdq
+ * RDQ for control packets sent to remote CPU.
+ * Must be set to 0x1F for EMADs, otherwise 0.
+ */
+MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
+
+/* tx_hdr_cpu_sig
+ * Signature control for packets going to CPU. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
+
+/* tx_hdr_sig
+ * Stacking protocl signature. Must be set to 0xE0E0.
+ */
+MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
+
+/* tx_hdr_stclass
+ * Stacking TClass.
+ */
+MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
+
+/* tx_hdr_emad
+ * EMAD bit. Must be set for EMADs.
+ */
+MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
+static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+ bool is_emad = tx_info->is_emad;
+
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ /* We currently set default values for the egress tclass (QoS). */
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
+ mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
+ MLXSW_TXHDR_ETCLASS_5);
+ mlxsw_tx_hdr_swid_set(txhdr, 0);
+ mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
+ mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
+ mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
+ MLXSW_TXHDR_RDQ_OTHER);
+ mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
+ mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
+ mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
+ mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
+ MLXSW_TXHDR_NOT_EMAD);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+}
+
+static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ bool is_up)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char paos_pl[MLXSW_REG_PAOS_LEN];
+
+ mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
+ is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
+ MLXSW_PORT_ADMIN_STATUS_DOWN);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+}
+
+static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
+ bool *p_is_up)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char paos_pl[MLXSW_REG_PAOS_LEN];
+ u8 oper_status;
+ int err;
+
+ mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+ if (err)
+ return err;
+ oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
+ *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
+ return 0;
+}
+
+static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char pmtu_pl[MLXSW_REG_PMTU_LEN];
+ int max_mtu;
+ int err;
+
+ mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+ if (err)
+ return err;
+ max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+
+ if (mtu > max_mtu)
+ return -EINVAL;
+
+ mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+}
+
+static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char pspa_pl[MLXSW_REG_PSPA_LEN];
+
+ mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
+}
+
+static int
+mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char sspr_pl[MLXSW_REG_SSPR_LEN];
+
+ mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
+}
+
+static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port,
+ bool *p_usable)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int err;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sx_port->local_port);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ return err;
+ *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
+ return 0;
+}
+
+static int mlxsw_sx_port_open(struct net_device *dev)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+ if (err)
+ return err;
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int mlxsw_sx_port_stop(struct net_device *dev)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+}
+
+static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+ const struct mlxsw_tx_info tx_info = {
+ .local_port = mlxsw_sx_port->local_port,
+ .is_emad = false,
+ };
+ u64 len;
+ int err;
+
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info))
+ return NETDEV_TX_BUSY;
+
+ if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
+ struct sk_buff *skb_orig = skb;
+
+ skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb_orig);
+ return NETDEV_TX_OK;
+ }
+ }
+ mlxsw_sx_txhdr_construct(skb, &tx_info);
+ len = skb->len;
+ /* Due to a race we might fail here because of a full queue. In that
+ * unlikely case we simply drop the packet.
+ */
+ err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
+
+ if (!err) {
+ pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->tx_packets++;
+ pcpu_stats->tx_bytes += len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+ } else {
+ this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ }
+ return NETDEV_TX_OK;
+}
+
+static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
+ if (err)
+ return err;
+ dev->mtu = mtu;
+ return 0;
+}
+
+static struct rtnl_link_stats64 *
+mlxsw_sx_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx_port_pcpu_stats *p;
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ u32 tx_dropped = 0;
+ unsigned int start;
+ int i;
+
+ for_each_possible_cpu(i) {
+ p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp);
+ rx_packets = p->rx_packets;
+ rx_bytes = p->rx_bytes;
+ tx_packets = p->tx_packets;
+ tx_bytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ /* tx_dropped is u32, updated without syncp protection. */
+ tx_dropped += p->tx_dropped;
+ }
+ stats->tx_dropped = tx_dropped;
+ return stats;
+}
+
+static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
+ .ndo_open = mlxsw_sx_port_open,
+ .ndo_stop = mlxsw_sx_port_stop,
+ .ndo_start_xmit = mlxsw_sx_port_xmit,
+ .ndo_change_mtu = mlxsw_sx_port_change_mtu,
+ .ndo_get_stats64 = mlxsw_sx_port_get_stats64,
+};
+
+static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+ strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, mlxsw_sx_driver_version,
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ mlxsw_sx->bus_info->fw_rev.major,
+ mlxsw_sx->bus_info->fw_rev.minor,
+ mlxsw_sx->bus_info->fw_rev.subminor);
+ strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
+ sizeof(drvinfo->bus_info));
+}
+
+struct mlxsw_sx_port_hw_stats {
+ char str[ETH_GSTRING_LEN];
+ u64 (*getter)(char *payload);
+};
+
+static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
+ {
+ .str = "a_frames_transmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
+ },
+ {
+ .str = "a_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
+ },
+ {
+ .str = "a_frame_check_sequence_errors",
+ .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
+ },
+ {
+ .str = "a_alignment_errors",
+ .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
+ },
+ {
+ .str = "a_octets_transmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
+ },
+ {
+ .str = "a_octets_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
+ },
+ {
+ .str = "a_multicast_frames_xmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
+ },
+ {
+ .str = "a_broadcast_frames_xmitted_ok",
+ .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
+ },
+ {
+ .str = "a_multicast_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
+ },
+ {
+ .str = "a_broadcast_frames_received_ok",
+ .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
+ },
+ {
+ .str = "a_in_range_length_errors",
+ .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
+ },
+ {
+ .str = "a_out_of_range_length_field",
+ .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
+ },
+ {
+ .str = "a_frame_too_long_errors",
+ .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
+ },
+ {
+ .str = "a_symbol_error_during_carrier",
+ .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
+ },
+ {
+ .str = "a_mac_control_frames_transmitted",
+ .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
+ },
+ {
+ .str = "a_mac_control_frames_received",
+ .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
+ },
+ {
+ .str = "a_unsupported_opcodes_received",
+ .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
+ },
+ {
+ .str = "a_pause_mac_ctrl_frames_received",
+ .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
+ },
+ {
+ .str = "a_pause_mac_ctrl_frames_xmitted",
+ .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
+ },
+};
+
+#define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
+
+static void mlxsw_sx_port_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sx_port_hw_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void mlxsw_sx_port_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int i;
+ int err;
+
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
+ for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
+ data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
+}
+
+static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return MLXSW_SX_PORT_HW_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+struct mlxsw_sx_port_link_mode {
+ u32 mask;
+ u32 supported;
+ u32 advertised;
+ u32 speed;
+};
+
+static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+ .supported = SUPPORTED_100baseT_Full,
+ .advertised = ADVERTISED_100baseT_Full,
+ .speed = 100,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
+ .speed = 100,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
+ MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
+ .supported = SUPPORTED_1000baseKX_Full,
+ .advertised = ADVERTISED_1000baseKX_Full,
+ .speed = 1000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
+ .supported = SUPPORTED_10000baseT_Full,
+ .advertised = ADVERTISED_10000baseT_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
+ .supported = SUPPORTED_10000baseKX4_Full,
+ .advertised = ADVERTISED_10000baseKX4_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
+ .supported = SUPPORTED_20000baseKR2_Full,
+ .advertised = ADVERTISED_20000baseKR2_Full,
+ .speed = 20000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
+ .supported = SUPPORTED_40000baseCR4_Full,
+ .advertised = ADVERTISED_40000baseCR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
+ .supported = SUPPORTED_40000baseKR4_Full,
+ .advertised = ADVERTISED_40000baseKR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
+ .supported = SUPPORTED_40000baseSR4_Full,
+ .advertised = ADVERTISED_40000baseSR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
+ .supported = SUPPORTED_40000baseLR4_Full,
+ .advertised = ADVERTISED_40000baseLR4_Full,
+ .speed = 40000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+ .speed = 25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
+ MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+ .speed = 50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .supported = SUPPORTED_56000baseKR4_Full,
+ .advertised = ADVERTISED_56000baseKR4_Full,
+ .speed = 56000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+ .speed = 100000,
+ },
+};
+
+#define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
+
+static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
+{
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+ return SUPPORTED_FIBRE;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
+ return SUPPORTED_Backplane;
+ return 0;
+}
+
+static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
+{
+ u32 modes = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+ modes |= mlxsw_sx_port_link_mode[i].supported;
+ }
+ return modes;
+}
+
+static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
+{
+ u32 modes = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+ modes |= mlxsw_sx_port_link_mode[i].advertised;
+ }
+ return modes;
+}
+
+static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
+ struct ethtool_cmd *cmd)
+{
+ u32 speed = SPEED_UNKNOWN;
+ u8 duplex = DUPLEX_UNKNOWN;
+ int i;
+
+ if (!carrier_ok)
+ goto out;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
+ speed = mlxsw_sx_port_link_mode[i].speed;
+ duplex = DUPLEX_FULL;
+ break;
+ }
+ }
+out:
+ ethtool_cmd_speed_set(cmd, speed);
+ cmd->duplex = duplex;
+}
+
+static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
+{
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+ return PORT_FIBRE;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
+ return PORT_DA;
+
+ if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+ MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
+ return PORT_NONE;
+
+ return PORT_OTHER;
+}
+
+static int mlxsw_sx_port_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ u32 eth_proto_oper;
+ int err;
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to get proto");
+ return err;
+ }
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
+ &eth_proto_admin, &eth_proto_oper);
+
+ cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
+ mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
+ mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
+ eth_proto_oper, cmd);
+
+ eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+ cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper);
+ cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
+
+ cmd->transceiver = XCVR_INTERNAL;
+ return 0;
+}
+
+static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (advertising & mlxsw_sx_port_link_mode[i].advertised)
+ ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static u32 mlxsw_sx_to_ptys_speed(u32 speed)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+ if (speed == mlxsw_sx_port_link_mode[i].speed)
+ ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
+static int mlxsw_sx_port_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 speed;
+ u32 eth_proto_new;
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ bool is_up;
+ int err;
+
+ speed = ethtool_cmd_speed(cmd);
+
+ eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
+ mlxsw_sx_to_ptys_advert_link(cmd->advertising) :
+ mlxsw_sx_to_ptys_speed(speed);
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to get proto");
+ return err;
+ }
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+
+ eth_proto_new = eth_proto_new & eth_proto_cap;
+ if (!eth_proto_new) {
+ netdev_err(dev, "Not supported proto admin requested");
+ return -EINVAL;
+ }
+ if (eth_proto_new == eth_proto_admin)
+ return 0;
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, eth_proto_new);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+ if (err) {
+ netdev_err(dev, "Failed to set proto admin");
+ return err;
+ }
+
+ err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
+ if (err) {
+ netdev_err(dev, "Failed to get oper status");
+ return err;
+ }
+ if (!is_up)
+ return 0;
+
+ err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+ if (err) {
+ netdev_err(dev, "Failed to set admin status");
+ return err;
+ }
+
+ err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+ if (err) {
+ netdev_err(dev, "Failed to set admin status");
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
+ .get_drvinfo = mlxsw_sx_port_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlxsw_sx_port_get_strings,
+ .get_ethtool_stats = mlxsw_sx_port_get_stats,
+ .get_sset_count = mlxsw_sx_port_get_sset_count,
+ .get_settings = mlxsw_sx_port_get_settings,
+ .set_settings = mlxsw_sx_port_set_settings,
+};
+
+static int mlxsw_sx_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
+ memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
+ .switchdev_port_attr_get = mlxsw_sx_port_attr_get,
+};
+
+static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
+{
+ char spad_pl[MLXSW_REG_SPAD_LEN];
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
+ return 0;
+}
+
+static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ struct net_device *dev = mlxsw_sx_port->dev;
+ char ppad_pl[MLXSW_REG_PPAD_LEN];
+ int err;
+
+ mlxsw_reg_ppad_pack(ppad_pl, false, 0);
+ err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
+ /* The last byte value in base mac address is guaranteed
+ * to be such it does not overflow when adding local_port
+ * value.
+ */
+ dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
+ return 0;
+}
+
+static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ u16 vid, enum mlxsw_reg_spms_state state)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char *spms_pl;
+ int err;
+
+ spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+ if (!spms_pl)
+ return -ENOMEM;
+ mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
+ mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
+ kfree(spms_pl);
+ return err;
+}
+
+static int mlxsw_sx_port_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ u32 speed)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, speed);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+}
+
+static int
+mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
+ enum mlxsw_reg_spmlr_learn_mode mode)
+{
+ struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+ char spmlr_pl[MLXSW_REG_SPMLR_LEN];
+
+ mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
+}
+
+static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port;
+ struct net_device *dev;
+ bool usable;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
+ if (!dev)
+ return -ENOMEM;
+ mlxsw_sx_port = netdev_priv(dev);
+ mlxsw_sx_port->dev = dev;
+ mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
+ mlxsw_sx_port->local_port = local_port;
+
+ mlxsw_sx_port->pcpu_stats =
+ netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
+ if (!mlxsw_sx_port->pcpu_stats) {
+ err = -ENOMEM;
+ goto err_alloc_stats;
+ }
+
+ dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
+ dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
+ dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
+
+ err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
+ mlxsw_sx_port->local_port);
+ goto err_dev_addr_get;
+ }
+
+ netif_carrier_off(dev);
+
+ dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
+ NETIF_F_VLAN_CHALLENGED;
+
+ /* Each packet needs to have a Tx header (metadata) on top all other
+ * headers.
+ */
+ dev->hard_header_len += MLXSW_TXHDR_LEN;
+
+ err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to check module\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_module_check;
+ }
+
+ if (!usable) {
+ dev_dbg(mlxsw_sx->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
+ mlxsw_sx_port->local_port);
+ goto port_not_usable;
+ }
+
+ err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_system_port_mapping_set;
+ }
+
+ err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_swid_set;
+ }
+
+ err = mlxsw_sx_port_speed_set(mlxsw_sx_port,
+ MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_speed_set;
+ }
+
+ err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, ETH_DATA_LEN);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_mtu_set;
+ }
+
+ err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+ if (err)
+ goto err_port_admin_status_set;
+
+ err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
+ MLXSW_PORT_DEFAULT_VID,
+ MLXSW_REG_SPMS_STATE_FORWARDING);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_stp_state_set;
+ }
+
+ err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
+ MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
+ mlxsw_sx_port->local_port);
+ goto err_port_mac_learning_mode_set;
+ }
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
+ mlxsw_sx_port->local_port);
+ goto err_register_netdev;
+ }
+
+ mlxsw_sx->ports[local_port] = mlxsw_sx_port;
+ return 0;
+
+err_register_netdev:
+err_port_mac_learning_mode_set:
+err_port_stp_state_set:
+err_port_admin_status_set:
+err_port_mtu_set:
+err_port_speed_set:
+err_port_swid_set:
+err_port_system_port_mapping_set:
+port_not_usable:
+err_port_module_check:
+err_dev_addr_get:
+ free_percpu(mlxsw_sx_port->pcpu_stats);
+err_alloc_stats:
+ free_netdev(dev);
+ return err;
+}
+
+static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+ struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+
+ if (!mlxsw_sx_port)
+ return;
+ unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
+ mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
+ free_percpu(mlxsw_sx_port->pcpu_stats);
+ free_netdev(mlxsw_sx_port->dev);
+}
+
+static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
+{
+ int i;
+
+ for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+ mlxsw_sx_port_remove(mlxsw_sx, i);
+ kfree(mlxsw_sx->ports);
+}
+
+static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
+{
+ size_t alloc_size;
+ int i;
+ int err;
+
+ alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS;
+ mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
+ if (!mlxsw_sx->ports)
+ return -ENOMEM;
+
+ for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+ err = mlxsw_sx_port_create(mlxsw_sx, i);
+ if (err)
+ goto err_port_create;
+ }
+ return 0;
+
+err_port_create:
+ for (i--; i >= 1; i--)
+ mlxsw_sx_port_remove(mlxsw_sx, i);
+ kfree(mlxsw_sx->ports);
+ return err;
+}
+
+static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
+ char *pude_pl, void *priv)
+{
+ struct mlxsw_sx *mlxsw_sx = priv;
+ struct mlxsw_sx_port *mlxsw_sx_port;
+ enum mlxsw_reg_pude_oper_status status;
+ u8 local_port;
+
+ local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+ mlxsw_sx_port = mlxsw_sx->ports[local_port];
+ if (!mlxsw_sx_port) {
+ dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
+ local_port);
+ return;
+ }
+
+ status = mlxsw_reg_pude_oper_status_get(pude_pl);
+ if (status == MLXSW_PORT_OPER_STATUS_UP) {
+ netdev_info(mlxsw_sx_port->dev, "link up\n");
+ netif_carrier_on(mlxsw_sx_port->dev);
+ } else {
+ netdev_info(mlxsw_sx_port->dev, "link down\n");
+ netif_carrier_off(mlxsw_sx_port->dev);
+ }
+}
+
+static struct mlxsw_event_listener mlxsw_sx_pude_event = {
+ .func = mlxsw_sx_pude_event_func,
+ .trap_id = MLXSW_TRAP_ID_PUDE,
+};
+
+static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx,
+ enum mlxsw_event_trap_id trap_id)
+{
+ struct mlxsw_event_listener *el;
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int err;
+
+ switch (trap_id) {
+ case MLXSW_TRAP_ID_PUDE:
+ el = &mlxsw_sx_pude_event;
+ break;
+ }
+ err = mlxsw_core_event_listener_register(mlxsw_sx->core, el, mlxsw_sx);
+ if (err)
+ return err;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ goto err_event_trap_set;
+
+ return 0;
+
+err_event_trap_set:
+ mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+ return err;
+}
+
+static void mlxsw_sx_event_unregister(struct mlxsw_sx *mlxsw_sx,
+ enum mlxsw_event_trap_id trap_id)
+{
+ struct mlxsw_event_listener *el;
+
+ switch (trap_id) {
+ case MLXSW_TRAP_ID_PUDE:
+ el = &mlxsw_sx_pude_event;
+ break;
+ }
+ mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+}
+
+static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_sx *mlxsw_sx = priv;
+ struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+ struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+
+ if (unlikely(!mlxsw_sx_port)) {
+ dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
+ local_port);
+ return;
+ }
+
+ skb->dev = mlxsw_sx_port->dev;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ netif_receive_skb(skb);
+}
+
+static const struct mlxsw_rx_listener mlxsw_sx_rx_listener[] = {
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_FDB_MC,
+ },
+ /* Traps for specific L2 packet types, not trapped as FDB MC */
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_STP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LACP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_EAPOL,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LLDP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MMRP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MVRP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_RPVST,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_DHCP,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
+ },
+ {
+ .func = mlxsw_sx_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
+ },
+};
+
+static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int i;
+ int err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+ err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
+ &mlxsw_sx_rx_listener[i],
+ mlxsw_sx);
+ if (err)
+ goto err_rx_listener_register;
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+ mlxsw_sx_rx_listener[i].trap_id);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ goto err_rx_trap_set;
+ }
+ return 0;
+
+err_rx_trap_set:
+ mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+ &mlxsw_sx_rx_listener[i],
+ mlxsw_sx);
+err_rx_listener_register:
+ for (i--; i >= 0; i--) {
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ mlxsw_sx_rx_listener[i].trap_id);
+ mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+ &mlxsw_sx_rx_listener[i],
+ mlxsw_sx);
+ }
+ return err;
+}
+
+static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
+{
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ mlxsw_sx_rx_listener[i].trap_id);
+ mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+ mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+ &mlxsw_sx_rx_listener[i],
+ mlxsw_sx);
+ }
+}
+
+static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
+{
+ char sfgc_pl[MLXSW_REG_SFGC_LEN];
+ char sgcr_pl[MLXSW_REG_SGCR_LEN];
+ char *sftr_pl;
+ int err;
+
+ /* Configure a flooding table, which includes only CPU port. */
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+ mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
+ MLXSW_PORT_CPU_PORT, true);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
+ kfree(sftr_pl);
+ if (err)
+ return err;
+
+ /* Flood different packet types using the flooding table. */
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_BROADCAST,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl,
+ MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
+ MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+ MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+ 0);
+ err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_sgcr_pack(sgcr_pl, true);
+ return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
+}
+
+static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+ struct mlxsw_sx *mlxsw_sx = priv;
+ int err;
+
+ mlxsw_sx->core = mlxsw_core;
+ mlxsw_sx->bus_info = mlxsw_bus_info;
+
+ err = mlxsw_sx_hw_id_get(mlxsw_sx);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
+ return err;
+ }
+
+ err = mlxsw_sx_ports_create(mlxsw_sx);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
+ return err;
+ }
+
+ err = mlxsw_sx_event_register(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to register for PUDE events\n");
+ goto err_event_register;
+ }
+
+ err = mlxsw_sx_traps_init(mlxsw_sx);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps for RX\n");
+ goto err_rx_listener_register;
+ }
+
+ err = mlxsw_sx_flood_init(mlxsw_sx);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
+ goto err_flood_init;
+ }
+
+ return 0;
+
+err_flood_init:
+ mlxsw_sx_traps_fini(mlxsw_sx);
+err_rx_listener_register:
+ mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+err_event_register:
+ mlxsw_sx_ports_remove(mlxsw_sx);
+ return err;
+}
+
+static void mlxsw_sx_fini(void *priv)
+{
+ struct mlxsw_sx *mlxsw_sx = priv;
+
+ mlxsw_sx_traps_fini(mlxsw_sx);
+ mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+ mlxsw_sx_ports_remove(mlxsw_sx);
+}
+
+static struct mlxsw_config_profile mlxsw_sx_config_profile = {
+ .used_max_vepa_channels = 1,
+ .max_vepa_channels = 0,
+ .used_max_lag = 1,
+ .max_lag = 64,
+ .used_max_port_per_lag = 1,
+ .max_port_per_lag = 16,
+ .used_max_mid = 1,
+ .max_mid = 7000,
+ .used_max_pgt = 1,
+ .max_pgt = 0,
+ .used_max_system_port = 1,
+ .max_system_port = 48000,
+ .used_max_vlan_groups = 1,
+ .max_vlan_groups = 127,
+ .used_max_regions = 1,
+ .max_regions = 400,
+ .used_flood_tables = 1,
+ .max_flood_tables = 2,
+ .max_vid_flood_tables = 1,
+ .used_flood_mode = 1,
+ .flood_mode = 3,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 0,
+ .used_max_pkey = 1,
+ .max_pkey = 0,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_ETH,
+ }
+ },
+};
+
+static struct mlxsw_driver mlxsw_sx_driver = {
+ .kind = MLXSW_DEVICE_KIND_SWITCHX2,
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct mlxsw_sx),
+ .init = mlxsw_sx_init,
+ .fini = mlxsw_sx_fini,
+ .txhdr_construct = mlxsw_sx_txhdr_construct,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sx_config_profile,
+};
+
+static int __init mlxsw_sx_module_init(void)
+{
+ return mlxsw_core_driver_register(&mlxsw_sx_driver);
+}
+
+static void __exit mlxsw_sx_module_exit(void)
+{
+ mlxsw_core_driver_unregister(&mlxsw_sx_driver);
+}
+
+module_init(mlxsw_sx_module_init);
+module_exit(mlxsw_sx_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
+MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
new file mode 100644
index 000000000000..53a9550be75e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -0,0 +1,66 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/trap.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_TRAP_H
+#define _MLXSW_TRAP_H
+
+enum {
+ /* Ethernet EMAD and FDB miss */
+ MLXSW_TRAP_ID_FDB_MC = 0x01,
+ MLXSW_TRAP_ID_ETHEMAD = 0x05,
+ /* L2 traps for specific packet types */
+ MLXSW_TRAP_ID_STP = 0x10,
+ MLXSW_TRAP_ID_LACP = 0x11,
+ MLXSW_TRAP_ID_EAPOL = 0x12,
+ MLXSW_TRAP_ID_LLDP = 0x13,
+ MLXSW_TRAP_ID_MMRP = 0x14,
+ MLXSW_TRAP_ID_MVRP = 0x15,
+ MLXSW_TRAP_ID_RPVST = 0x16,
+ MLXSW_TRAP_ID_DHCP = 0x19,
+ MLXSW_TRAP_ID_IGMP_QUERY = 0x30,
+ MLXSW_TRAP_ID_IGMP_V1_REPORT = 0x31,
+ MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
+ MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
+ MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
+
+ MLXSW_TRAP_ID_MAX = 0x1FF
+};
+
+enum mlxsw_event_trap_id {
+ /* Port Up/Down event generated by hardware */
+ MLXSW_TRAP_ID_PUDE = 0x8,
+};
+
+#endif /* _MLXSW_TRAP_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
new file mode 100644
index 000000000000..fdf94720ca62
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -0,0 +1,81 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/txheader.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_TXHEADER_H
+#define _MLXSW_TXHEADER_H
+
+#define MLXSW_TXHDR_LEN 0x10
+#define MLXSW_TXHDR_VERSION_0 0
+#define MLXSW_TXHDR_VERSION_1 1
+
+enum {
+ MLXSW_TXHDR_ETH_CTL,
+ MLXSW_TXHDR_ETH_DATA,
+};
+
+#define MLXSW_TXHDR_PROTO_ETH 1
+
+enum {
+ MLXSW_TXHDR_ETCLASS_0,
+ MLXSW_TXHDR_ETCLASS_1,
+ MLXSW_TXHDR_ETCLASS_2,
+ MLXSW_TXHDR_ETCLASS_3,
+ MLXSW_TXHDR_ETCLASS_4,
+ MLXSW_TXHDR_ETCLASS_5,
+ MLXSW_TXHDR_ETCLASS_6,
+ MLXSW_TXHDR_ETCLASS_7,
+};
+
+enum {
+ MLXSW_TXHDR_RDQ_OTHER,
+ MLXSW_TXHDR_RDQ_EMAD = 0x1f,
+};
+
+#define MLXSW_TXHDR_CTCLASS3 0
+#define MLXSW_TXHDR_CPU_SIG 0
+#define MLXSW_TXHDR_SIG 0xE0E0
+#define MLXSW_TXHDR_STCLASS_NONE 0
+
+enum {
+ MLXSW_TXHDR_NOT_EMAD,
+ MLXSW_TXHDR_EMAD,
+};
+
+enum {
+ MLXSW_TXHDR_TYPE_DATA,
+ MLXSW_TXHDR_TYPE_CONTROL = 6,
+};
+
+#endif
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f78909a00f15..09d2e16fd6b0 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -952,9 +952,8 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
- err = dma_mapping_error(adapter->dev,
- sg_dma_address(&tx_ctl->sg));
- if (err) {
+ if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
+ err = -ENOMEM;
sg_dma_address(&tx_ctl->sg) = 0;
goto err;
}
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 66d4ab703f45..1edc973df4c4 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -1601,12 +1601,12 @@ static const struct of_device_id ks8851_match_table[] = {
{ .compatible = "micrel,ks8851" },
{ }
};
+MODULE_DEVICE_TABLE(of, ks8851_match_table);
static struct spi_driver ks8851_driver = {
.driver = {
.name = "ks8851",
.of_match_table = ks8851_match_table,
- .owner = THIS_MODULE,
.pm = &ks8851_pm_ops,
},
.probe = ks8851_probe,
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 3fd8ca6d4e7c..36a09d94b368 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -33,4 +33,13 @@ config ENC28J60_WRITEVERIFY
Enable the verify after the buffer write useful for debugging purpose.
If unsure, say N.
+config ENCX24J600
+ tristate "ENCX24J600 support"
+ depends on SPI
+ ---help---
+ Support for the Microchip ENC424J600/624J600 ethernet chip.
+
+ To compile this driver as a module, choose M here. The module will be
+ called encx24j600.
+
endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index 573d4292b9ea..ff78f621b59a 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_ENC28J60) += enc28j60.o
+obj-$(CONFIG_ENCX24J600) += encx24j600.o encx24j600-regmap.o
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index b1b5f66b8b69..86ea17e7ba7b 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1633,7 +1633,6 @@ static int enc28j60_remove(struct spi_device *spi)
static struct spi_driver enc28j60_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = enc28j60_probe,
.remove = enc28j60_remove,
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
new file mode 100644
index 000000000000..f3bb9055a292
--- /dev/null
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -0,0 +1,513 @@
+/**
+ * Register map access API - ENCX24J600 support
+ *
+ * Copyright 2015 Gridpoint
+ *
+ * Author: Jon Ringle <jringle@gridpoint.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "encx24j600_hw.h"
+
+static inline bool is_bits_set(int value, int mask)
+{
+ return (value & mask) == mask;
+}
+
+static int encx24j600_switch_bank(struct encx24j600_context *ctx,
+ int bank)
+{
+ int ret = 0;
+
+ int bank_opcode = BANK_SELECT(bank);
+ ret = spi_write(ctx->spi, &bank_opcode, 1);
+ if (ret == 0)
+ ctx->bank = bank;
+
+ return ret;
+}
+
+static int encx24j600_cmdn(struct encx24j600_context *ctx, u8 opcode,
+ const void *buf, size_t len)
+{
+ struct spi_message m;
+ struct spi_transfer t[2] = { { .tx_buf = &opcode, .len = 1, },
+ { .tx_buf = buf, .len = len }, };
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ return spi_sync(ctx->spi, &m);
+}
+
+static void regmap_lock_mutex(void *context)
+{
+ struct encx24j600_context *ctx = context;
+ mutex_lock(&ctx->mutex);
+}
+
+static void regmap_unlock_mutex(void *context)
+{
+ struct encx24j600_context *ctx = context;
+ mutex_unlock(&ctx->mutex);
+}
+
+static int regmap_encx24j600_sfr_read(void *context, u8 reg, u8 *val,
+ size_t len)
+{
+ struct encx24j600_context *ctx = context;
+ u8 banked_reg = reg & ADDR_MASK;
+ u8 bank = ((reg & BANK_MASK) >> BANK_SHIFT);
+ u8 cmd = RCRU;
+ int ret = 0;
+ int i = 0;
+ u8 tx_buf[2];
+
+ if (reg < 0x80) {
+ cmd = RCRCODE | banked_reg;
+ if ((banked_reg < 0x16) && (ctx->bank != bank))
+ ret = encx24j600_switch_bank(ctx, bank);
+ if (unlikely(ret))
+ return ret;
+ } else {
+ /* Translate registers that are more effecient using
+ * 3-byte SPI commands
+ */
+ switch (reg) {
+ case EGPRDPT:
+ cmd = RGPRDPT; break;
+ case EGPWRPT:
+ cmd = RGPWRPT; break;
+ case ERXRDPT:
+ cmd = RRXRDPT; break;
+ case ERXWRPT:
+ cmd = RRXWRPT; break;
+ case EUDARDPT:
+ cmd = RUDARDPT; break;
+ case EUDAWRPT:
+ cmd = RUDAWRPT; break;
+ case EGPDATA:
+ case ERXDATA:
+ case EUDADATA:
+ default:
+ return -EINVAL;
+ }
+ }
+
+ tx_buf[i++] = cmd;
+ if (cmd == RCRU)
+ tx_buf[i++] = reg;
+
+ ret = spi_write_then_read(ctx->spi, tx_buf, i, val, len);
+
+ return ret;
+}
+
+static int regmap_encx24j600_sfr_update(struct encx24j600_context *ctx,
+ u8 reg, u8 *val, size_t len,
+ u8 unbanked_cmd, u8 banked_code)
+{
+ u8 banked_reg = reg & ADDR_MASK;
+ u8 bank = ((reg & BANK_MASK) >> BANK_SHIFT);
+ u8 cmd = unbanked_cmd;
+ struct spi_message m;
+ struct spi_transfer t[3] = { { .tx_buf = &cmd, .len = sizeof(cmd), },
+ { .tx_buf = &reg, .len = sizeof(reg), },
+ { .tx_buf = val, .len = len }, };
+
+ if (reg < 0x80) {
+ int ret = 0;
+ cmd = banked_code | banked_reg;
+ if ((banked_reg < 0x16) && (ctx->bank != bank))
+ ret = encx24j600_switch_bank(ctx, bank);
+ if (unlikely(ret))
+ return ret;
+ } else {
+ /* Translate registers that are more effecient using
+ * 3-byte SPI commands
+ */
+ switch (reg) {
+ case EGPRDPT:
+ cmd = WGPRDPT; break;
+ case EGPWRPT:
+ cmd = WGPWRPT; break;
+ case ERXRDPT:
+ cmd = WRXRDPT; break;
+ case ERXWRPT:
+ cmd = WRXWRPT; break;
+ case EUDARDPT:
+ cmd = WUDARDPT; break;
+ case EUDAWRPT:
+ cmd = WUDAWRPT; break;
+ case EGPDATA:
+ case ERXDATA:
+ case EUDADATA:
+ default:
+ return -EINVAL;
+ }
+ }
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+
+ if (cmd == unbanked_cmd) {
+ t[1].tx_buf = &reg;
+ spi_message_add_tail(&t[1], &m);
+ }
+
+ spi_message_add_tail(&t[2], &m);
+ return spi_sync(ctx->spi, &m);
+}
+
+static int regmap_encx24j600_sfr_write(void *context, u8 reg, u8 *val,
+ size_t len)
+{
+ struct encx24j600_context *ctx = context;
+ return regmap_encx24j600_sfr_update(ctx, reg, val, len, WCRU, WCRCODE);
+}
+
+static int regmap_encx24j600_sfr_set_bits(struct encx24j600_context *ctx,
+ u8 reg, u8 val)
+{
+ return regmap_encx24j600_sfr_update(ctx, reg, &val, 1, BFSU, BFSCODE);
+}
+
+static int regmap_encx24j600_sfr_clr_bits(struct encx24j600_context *ctx,
+ u8 reg, u8 val)
+{
+ return regmap_encx24j600_sfr_update(ctx, reg, &val, 1, BFCU, BFCCODE);
+}
+
+static int regmap_encx24j600_reg_update_bits(void *context, unsigned int reg,
+ unsigned int mask,
+ unsigned int val)
+{
+ struct encx24j600_context *ctx = context;
+
+ int ret = 0;
+ unsigned int set_mask = mask & val;
+ unsigned int clr_mask = mask & ~val;
+
+ if ((reg >= 0x40 && reg < 0x6c) || reg >= 0x80)
+ return -EINVAL;
+
+ if (set_mask & 0xff)
+ ret = regmap_encx24j600_sfr_set_bits(ctx, reg, set_mask);
+
+ set_mask = (set_mask & 0xff00) >> 8;
+
+ if ((set_mask & 0xff) && (ret == 0))
+ ret = regmap_encx24j600_sfr_set_bits(ctx, reg + 1, set_mask);
+
+ if ((clr_mask & 0xff) && (ret == 0))
+ ret = regmap_encx24j600_sfr_clr_bits(ctx, reg, clr_mask);
+
+ clr_mask = (clr_mask & 0xff00) >> 8;
+
+ if ((clr_mask & 0xff) && (ret == 0))
+ ret = regmap_encx24j600_sfr_clr_bits(ctx, reg + 1, clr_mask);
+
+ return ret;
+}
+
+int regmap_encx24j600_spi_write(void *context, u8 reg, const u8 *data,
+ size_t count)
+{
+ struct encx24j600_context *ctx = context;
+
+ if (reg < 0xc0)
+ return encx24j600_cmdn(ctx, reg, data, count);
+ else
+ /* SPI 1-byte command. Ignore data */
+ return spi_write(ctx->spi, &reg, 1);
+}
+EXPORT_SYMBOL_GPL(regmap_encx24j600_spi_write);
+
+int regmap_encx24j600_spi_read(void *context, u8 reg, u8 *data, size_t count)
+{
+ struct encx24j600_context *ctx = context;
+
+ if (reg == RBSEL && count > 1)
+ count = 1;
+
+ return spi_write_then_read(ctx->spi, &reg, sizeof(reg), data, count);
+}
+EXPORT_SYMBOL_GPL(regmap_encx24j600_spi_read);
+
+static int regmap_encx24j600_write(void *context, const void *data,
+ size_t len)
+{
+ u8 *dout = (u8 *)data;
+ u8 reg = dout[0];
+ ++dout;
+ --len;
+
+ if (reg > 0xa0)
+ return regmap_encx24j600_spi_write(context, reg, dout, len);
+
+ if (len > 2)
+ return -EINVAL;
+
+ return regmap_encx24j600_sfr_write(context, reg, dout, len);
+}
+
+static int regmap_encx24j600_read(void *context,
+ const void *reg_buf, size_t reg_size,
+ void *val, size_t val_size)
+{
+ u8 reg = *(const u8 *)reg_buf;
+
+ if (reg_size != 1) {
+ pr_err("%s: reg=%02x reg_size=%zu\n", __func__, reg, reg_size);
+ return -EINVAL;
+ }
+
+ if (reg > 0xa0)
+ return regmap_encx24j600_spi_read(context, reg, val, val_size);
+
+ if (val_size > 2) {
+ pr_err("%s: reg=%02x val_size=%zu\n", __func__, reg, val_size);
+ return -EINVAL;
+ }
+
+ return regmap_encx24j600_sfr_read(context, reg, val, val_size);
+}
+
+static bool encx24j600_regmap_readable(struct device *dev, unsigned int reg)
+{
+ if ((reg < 0x36) ||
+ ((reg >= 0x40) && (reg < 0x4c)) ||
+ ((reg >= 0x52) && (reg < 0x56)) ||
+ ((reg >= 0x60) && (reg < 0x66)) ||
+ ((reg >= 0x68) && (reg < 0x80)) ||
+ ((reg >= 0x86) && (reg < 0x92)) ||
+ (reg == 0xc8))
+ return true;
+ else
+ return false;
+}
+
+static bool encx24j600_regmap_writeable(struct device *dev, unsigned int reg)
+{
+ if ((reg < 0x12) ||
+ ((reg >= 0x14) && (reg < 0x1a)) ||
+ ((reg >= 0x1c) && (reg < 0x36)) ||
+ ((reg >= 0x40) && (reg < 0x4c)) ||
+ ((reg >= 0x52) && (reg < 0x56)) ||
+ ((reg >= 0x60) && (reg < 0x68)) ||
+ ((reg >= 0x6c) && (reg < 0x80)) ||
+ ((reg >= 0x86) && (reg < 0x92)) ||
+ ((reg >= 0xc0) && (reg < 0xc8)) ||
+ ((reg >= 0xca) && (reg < 0xf0)))
+ return true;
+ else
+ return false;
+}
+
+static bool encx24j600_regmap_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ERXHEAD:
+ case EDMACS:
+ case ETXSTAT:
+ case ETXWIRE:
+ case ECON1: /* Can be modified via single byte cmds */
+ case ECON2: /* Can be modified via single byte cmds */
+ case ESTAT:
+ case EIR: /* Can be modified via single byte cmds */
+ case MIRD:
+ case MISTAT:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static bool encx24j600_regmap_precious(struct device *dev, unsigned int reg)
+{
+ /* single byte cmds are precious */
+ if (((reg >= 0xc0) && (reg < 0xc8)) ||
+ ((reg >= 0xca) && (reg < 0xf0)))
+ return true;
+ else
+ return false;
+}
+
+static int regmap_encx24j600_phy_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct encx24j600_context *ctx = context;
+ int ret;
+ unsigned int mistat;
+
+ reg = MIREGADR_VAL | (reg & PHREG_MASK);
+ ret = regmap_write(ctx->regmap, MIREGADR, reg);
+ if (unlikely(ret))
+ goto err_out;
+
+ ret = regmap_write(ctx->regmap, MICMD, MIIRD);
+ if (unlikely(ret))
+ goto err_out;
+
+ usleep_range(26, 100);
+ while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
+ (mistat & BUSY))
+ cpu_relax();
+
+ if (unlikely(ret))
+ goto err_out;
+
+ ret = regmap_write(ctx->regmap, MICMD, 0);
+ if (unlikely(ret))
+ goto err_out;
+
+ ret = regmap_read(ctx->regmap, MIRD, val);
+
+err_out:
+ if (ret)
+ pr_err("%s: error %d reading reg %02x\n", __func__, ret,
+ reg & PHREG_MASK);
+
+ return ret;
+}
+
+static int regmap_encx24j600_phy_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct encx24j600_context *ctx = context;
+ int ret;
+ unsigned int mistat;
+
+ reg = MIREGADR_VAL | (reg & PHREG_MASK);
+ ret = regmap_write(ctx->regmap, MIREGADR, reg);
+ if (unlikely(ret))
+ goto err_out;
+
+ ret = regmap_write(ctx->regmap, MIWR, val);
+ if (unlikely(ret))
+ goto err_out;
+
+ usleep_range(26, 100);
+ while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
+ (mistat & BUSY))
+ cpu_relax();
+
+err_out:
+ if (ret)
+ pr_err("%s: error %d writing reg %02x=%04x\n", __func__, ret,
+ reg & PHREG_MASK, val);
+
+ return ret;
+}
+
+static bool encx24j600_phymap_readable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case PHCON1:
+ case PHSTAT1:
+ case PHANA:
+ case PHANLPA:
+ case PHANE:
+ case PHCON2:
+ case PHSTAT2:
+ case PHSTAT3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool encx24j600_phymap_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case PHCON1:
+ case PHCON2:
+ case PHANA:
+ return true;
+ case PHSTAT1:
+ case PHSTAT2:
+ case PHSTAT3:
+ case PHANLPA:
+ case PHANE:
+ default:
+ return false;
+ }
+}
+
+static bool encx24j600_phymap_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case PHSTAT1:
+ case PHSTAT2:
+ case PHSTAT3:
+ case PHANLPA:
+ case PHANE:
+ case PHCON2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct regmap_config regcfg = {
+ .name = "reg",
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = 0xee,
+ .reg_stride = 2,
+ .cache_type = REGCACHE_RBTREE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .readable_reg = encx24j600_regmap_readable,
+ .writeable_reg = encx24j600_regmap_writeable,
+ .volatile_reg = encx24j600_regmap_volatile,
+ .precious_reg = encx24j600_regmap_precious,
+ .lock = regmap_lock_mutex,
+ .unlock = regmap_unlock_mutex,
+};
+
+static struct regmap_bus regmap_encx24j600 = {
+ .write = regmap_encx24j600_write,
+ .read = regmap_encx24j600_read,
+ .reg_update_bits = regmap_encx24j600_reg_update_bits,
+};
+
+static struct regmap_config phycfg = {
+ .name = "phy",
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = 0x1f,
+ .cache_type = REGCACHE_RBTREE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .readable_reg = encx24j600_phymap_readable,
+ .writeable_reg = encx24j600_phymap_writeable,
+ .volatile_reg = encx24j600_phymap_volatile,
+};
+static struct regmap_bus phymap_encx24j600 = {
+ .reg_write = regmap_encx24j600_phy_reg_write,
+ .reg_read = regmap_encx24j600_phy_reg_read,
+};
+
+void devm_regmap_init_encx24j600(struct device *dev,
+ struct encx24j600_context *ctx)
+{
+ mutex_init(&ctx->mutex);
+ regcfg.lock_arg = ctx;
+ ctx->regmap = devm_regmap_init(dev, &regmap_encx24j600, ctx, &regcfg);
+ ctx->phymap = devm_regmap_init(dev, &phymap_encx24j600, ctx, &phycfg);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
new file mode 100644
index 000000000000..2056b719c262
--- /dev/null
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -0,0 +1,1129 @@
+/**
+ * Microchip ENCX24J600 ethernet driver
+ *
+ * Copyright (C) 2015 Gridpoint
+ * Author: Jon Ringle <jringle@gridpoint.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/regmap.h>
+#include <linux/skbuff.h>
+#include <linux/spi/spi.h>
+
+#include "encx24j600_hw.h"
+
+#define DRV_NAME "encx24j600"
+#define DRV_VERSION "1.0"
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+/* SRAM memory layout:
+ *
+ * 0x0000-0x05ff TX buffers 1.5KB (1*1536) reside in the GP area in SRAM
+ * 0x0600-0x5fff RX buffers 22.5KB (15*1536) reside in the RX area in SRAM
+ */
+#define ENC_TX_BUF_START 0x0000U
+#define ENC_RX_BUF_START 0x0600U
+#define ENC_RX_BUF_END 0x5fffU
+#define ENC_SRAM_SIZE 0x6000U
+
+enum {
+ RXFILTER_NORMAL,
+ RXFILTER_MULTI,
+ RXFILTER_PROMISC
+};
+
+struct encx24j600_priv {
+ struct net_device *ndev;
+ struct mutex lock; /* device access lock */
+ struct encx24j600_context ctx;
+ struct sk_buff *tx_skb;
+ struct task_struct *kworker_task;
+ struct kthread_worker kworker;
+ struct kthread_work tx_work;
+ struct kthread_work setrx_work;
+ u16 next_packet;
+ bool hw_enabled;
+ bool full_duplex;
+ bool autoneg;
+ u16 speed;
+ int rxfilter;
+ u32 msg_enable;
+};
+
+static void dump_packet(const char *msg, int len, const char *data)
+{
+ pr_debug(DRV_NAME ": %s - packet len:%d\n", msg, len);
+ print_hex_dump_bytes("pk data: ", DUMP_PREFIX_OFFSET, data, len);
+}
+
+static void encx24j600_dump_rsv(struct encx24j600_priv *priv, const char *msg,
+ struct rsv *rsv)
+{
+ struct net_device *dev = priv->ndev;
+
+ netdev_info(dev, "RX packet Len:%d\n", rsv->len);
+ netdev_dbg(dev, "%s - NextPk: 0x%04x\n", msg,
+ rsv->next_packet);
+ netdev_dbg(dev, "RxOK: %d, DribbleNibble: %d\n",
+ RSV_GETBIT(rsv->rxstat, RSV_RXOK),
+ RSV_GETBIT(rsv->rxstat, RSV_DRIBBLENIBBLE));
+ netdev_dbg(dev, "CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n",
+ RSV_GETBIT(rsv->rxstat, RSV_CRCERROR),
+ RSV_GETBIT(rsv->rxstat, RSV_LENCHECKERR),
+ RSV_GETBIT(rsv->rxstat, RSV_LENOUTOFRANGE));
+ netdev_dbg(dev, "Multicast: %d, Broadcast: %d, LongDropEvent: %d, CarrierEvent: %d\n",
+ RSV_GETBIT(rsv->rxstat, RSV_RXMULTICAST),
+ RSV_GETBIT(rsv->rxstat, RSV_RXBROADCAST),
+ RSV_GETBIT(rsv->rxstat, RSV_RXLONGEVDROPEV),
+ RSV_GETBIT(rsv->rxstat, RSV_CARRIEREV));
+ netdev_dbg(dev, "ControlFrame: %d, PauseFrame: %d, UnknownOp: %d, VLanTagFrame: %d\n",
+ RSV_GETBIT(rsv->rxstat, RSV_RXCONTROLFRAME),
+ RSV_GETBIT(rsv->rxstat, RSV_RXPAUSEFRAME),
+ RSV_GETBIT(rsv->rxstat, RSV_RXUNKNOWNOPCODE),
+ RSV_GETBIT(rsv->rxstat, RSV_RXTYPEVLAN));
+}
+
+static u16 encx24j600_read_reg(struct encx24j600_priv *priv, u8 reg)
+{
+ struct net_device *dev = priv->ndev;
+ unsigned int val = 0;
+ int ret = regmap_read(priv->ctx.regmap, reg, &val);
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d reading reg %02x\n",
+ __func__, ret, reg);
+ return val;
+}
+
+static void encx24j600_write_reg(struct encx24j600_priv *priv, u8 reg, u16 val)
+{
+ struct net_device *dev = priv->ndev;
+ int ret = regmap_write(priv->ctx.regmap, reg, val);
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
+ __func__, ret, reg, val);
+}
+
+static void encx24j600_update_reg(struct encx24j600_priv *priv, u8 reg,
+ u16 mask, u16 val)
+{
+ struct net_device *dev = priv->ndev;
+ int ret = regmap_update_bits(priv->ctx.regmap, reg, mask, val);
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d updating reg %02x=%04x~%04x\n",
+ __func__, ret, reg, val, mask);
+}
+
+static u16 encx24j600_read_phy(struct encx24j600_priv *priv, u8 reg)
+{
+ struct net_device *dev = priv->ndev;
+ unsigned int val = 0;
+ int ret = regmap_read(priv->ctx.phymap, reg, &val);
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d reading %02x\n",
+ __func__, ret, reg);
+ return val;
+}
+
+static void encx24j600_write_phy(struct encx24j600_priv *priv, u8 reg, u16 val)
+{
+ struct net_device *dev = priv->ndev;
+ int ret = regmap_write(priv->ctx.phymap, reg, val);
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
+ __func__, ret, reg, val);
+}
+
+static void encx24j600_clr_bits(struct encx24j600_priv *priv, u8 reg, u16 mask)
+{
+ encx24j600_update_reg(priv, reg, mask, 0);
+}
+
+static void encx24j600_set_bits(struct encx24j600_priv *priv, u8 reg, u16 mask)
+{
+ encx24j600_update_reg(priv, reg, mask, mask);
+}
+
+static void encx24j600_cmd(struct encx24j600_priv *priv, u8 cmd)
+{
+ struct net_device *dev = priv->ndev;
+ int ret = regmap_write(priv->ctx.regmap, cmd, 0);
+ if (unlikely(ret))
+ netif_err(priv, drv, dev, "%s: error %d with cmd %02x\n",
+ __func__, ret, cmd);
+}
+
+static int encx24j600_raw_read(struct encx24j600_priv *priv, u8 reg, u8 *data,
+ size_t count)
+{
+ int ret;
+ mutex_lock(&priv->ctx.mutex);
+ ret = regmap_encx24j600_spi_read(&priv->ctx, reg, data, count);
+ mutex_unlock(&priv->ctx.mutex);
+
+ return ret;
+}
+
+static int encx24j600_raw_write(struct encx24j600_priv *priv, u8 reg,
+ const u8 *data, size_t count)
+{
+ int ret;
+ mutex_lock(&priv->ctx.mutex);
+ ret = regmap_encx24j600_spi_write(&priv->ctx, reg, data, count);
+ mutex_unlock(&priv->ctx.mutex);
+
+ return ret;
+}
+
+static void encx24j600_update_phcon1(struct encx24j600_priv *priv)
+{
+ u16 phcon1 = encx24j600_read_phy(priv, PHCON1);
+ if (priv->autoneg == AUTONEG_ENABLE) {
+ phcon1 |= ANEN | RENEG;
+ } else {
+ phcon1 &= ~ANEN;
+ if (priv->speed == SPEED_100)
+ phcon1 |= SPD100;
+ else
+ phcon1 &= ~SPD100;
+
+ if (priv->full_duplex)
+ phcon1 |= PFULDPX;
+ else
+ phcon1 &= ~PFULDPX;
+ }
+ encx24j600_write_phy(priv, PHCON1, phcon1);
+}
+
+/* Waits for autonegotiation to complete. */
+static int encx24j600_wait_for_autoneg(struct encx24j600_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ unsigned long timeout = jiffies + msecs_to_jiffies(2000);
+ u16 phstat1;
+ u16 estat;
+ int ret = 0;
+
+ phstat1 = encx24j600_read_phy(priv, PHSTAT1);
+ while ((phstat1 & ANDONE) == 0) {
+ if (time_after(jiffies, timeout)) {
+ u16 phstat3;
+
+ netif_notice(priv, drv, dev, "timeout waiting for autoneg done\n");
+
+ priv->autoneg = AUTONEG_DISABLE;
+ phstat3 = encx24j600_read_phy(priv, PHSTAT3);
+ priv->speed = (phstat3 & PHY3SPD100)
+ ? SPEED_100 : SPEED_10;
+ priv->full_duplex = (phstat3 & PHY3DPX) ? 1 : 0;
+ encx24j600_update_phcon1(priv);
+ netif_notice(priv, drv, dev, "Using parallel detection: %s/%s",
+ priv->speed == SPEED_100 ? "100" : "10",
+ priv->full_duplex ? "Full" : "Half");
+
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+ phstat1 = encx24j600_read_phy(priv, PHSTAT1);
+ }
+
+ estat = encx24j600_read_reg(priv, ESTAT);
+ if (estat & PHYDPX) {
+ encx24j600_set_bits(priv, MACON2, FULDPX);
+ encx24j600_write_reg(priv, MABBIPG, 0x15);
+ } else {
+ encx24j600_clr_bits(priv, MACON2, FULDPX);
+ encx24j600_write_reg(priv, MABBIPG, 0x12);
+ /* Max retransmittions attempt */
+ encx24j600_write_reg(priv, MACLCON, 0x370f);
+ }
+
+ return ret;
+}
+
+/* Access the PHY to determine link status */
+static void encx24j600_check_link_status(struct encx24j600_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ u16 estat;
+
+ estat = encx24j600_read_reg(priv, ESTAT);
+
+ if (estat & PHYLNK) {
+ if (priv->autoneg == AUTONEG_ENABLE)
+ encx24j600_wait_for_autoneg(priv);
+
+ netif_carrier_on(dev);
+ netif_info(priv, ifup, dev, "link up\n");
+ } else {
+ netif_info(priv, ifdown, dev, "link down\n");
+
+ /* Re-enable autoneg since we won't know what we might be
+ * connected to when the link is brought back up again.
+ */
+ priv->autoneg = AUTONEG_ENABLE;
+ priv->full_duplex = true;
+ priv->speed = SPEED_100;
+ netif_carrier_off(dev);
+ }
+}
+
+static void encx24j600_int_link_handler(struct encx24j600_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+
+ netif_dbg(priv, intr, dev, "%s", __func__);
+ encx24j600_check_link_status(priv);
+ encx24j600_clr_bits(priv, EIR, LINKIF);
+}
+
+static void encx24j600_tx_complete(struct encx24j600_priv *priv, bool err)
+{
+ struct net_device *dev = priv->ndev;
+
+ if (!priv->tx_skb) {
+ BUG();
+ return;
+ }
+
+ mutex_lock(&priv->lock);
+
+ if (err)
+ dev->stats.tx_errors++;
+ else
+ dev->stats.tx_packets++;
+
+ dev->stats.tx_bytes += priv->tx_skb->len;
+
+ encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF);
+
+ netif_dbg(priv, tx_done, dev, "TX Done%s\n", err ? ": Err" : "");
+
+ dev_kfree_skb(priv->tx_skb);
+ priv->tx_skb = NULL;
+
+ netif_wake_queue(dev);
+
+ mutex_unlock(&priv->lock);
+}
+
+static int encx24j600_receive_packet(struct encx24j600_priv *priv,
+ struct rsv *rsv)
+{
+ struct net_device *dev = priv->ndev;
+ struct sk_buff *skb = netdev_alloc_skb(dev, rsv->len + NET_IP_ALIGN);
+ if (!skb) {
+ pr_err_ratelimited("RX: OOM: packet dropped\n");
+ dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ encx24j600_raw_read(priv, RRXDATA, skb_put(skb, rsv->len), rsv->len);
+
+ if (netif_msg_pktdata(priv))
+ dump_packet("RX", skb->len, skb->data);
+
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+
+ /* Maintain stats */
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += rsv->len;
+ priv->next_packet = rsv->next_packet;
+
+ netif_rx(skb);
+
+ return 0;
+}
+
+static void encx24j600_rx_packets(struct encx24j600_priv *priv, u8 packet_count)
+{
+ struct net_device *dev = priv->ndev;
+
+ while (packet_count--) {
+ struct rsv rsv;
+ u16 newrxtail;
+
+ encx24j600_write_reg(priv, ERXRDPT, priv->next_packet);
+ encx24j600_raw_read(priv, RRXDATA, (u8 *)&rsv, sizeof(rsv));
+
+ if (netif_msg_rx_status(priv))
+ encx24j600_dump_rsv(priv, __func__, &rsv);
+
+ if (!RSV_GETBIT(rsv.rxstat, RSV_RXOK) ||
+ (rsv.len > MAX_FRAMELEN)) {
+ netif_err(priv, rx_err, dev, "RX Error %04x\n",
+ rsv.rxstat);
+ dev->stats.rx_errors++;
+
+ if (RSV_GETBIT(rsv.rxstat, RSV_CRCERROR))
+ dev->stats.rx_crc_errors++;
+ if (RSV_GETBIT(rsv.rxstat, RSV_LENCHECKERR))
+ dev->stats.rx_frame_errors++;
+ if (rsv.len > MAX_FRAMELEN)
+ dev->stats.rx_over_errors++;
+ } else {
+ encx24j600_receive_packet(priv, &rsv);
+ }
+
+ newrxtail = priv->next_packet - 2;
+ if (newrxtail == ENC_RX_BUF_START)
+ newrxtail = SRAM_SIZE - 2;
+
+ encx24j600_cmd(priv, SETPKTDEC);
+ encx24j600_write_reg(priv, ERXTAIL, newrxtail);
+ }
+}
+
+static irqreturn_t encx24j600_isr(int irq, void *dev_id)
+{
+ struct encx24j600_priv *priv = dev_id;
+ struct net_device *dev = priv->ndev;
+ int eir;
+
+ /* Clear interrupts */
+ encx24j600_cmd(priv, CLREIE);
+
+ eir = encx24j600_read_reg(priv, EIR);
+
+ if (eir & LINKIF)
+ encx24j600_int_link_handler(priv);
+
+ if (eir & TXIF)
+ encx24j600_tx_complete(priv, false);
+
+ if (eir & TXABTIF)
+ encx24j600_tx_complete(priv, true);
+
+ if (eir & RXABTIF) {
+ if (eir & PCFULIF) {
+ /* Packet counter is full */
+ netif_err(priv, rx_err, dev, "Packet counter full\n");
+ }
+ dev->stats.rx_dropped++;
+ encx24j600_clr_bits(priv, EIR, RXABTIF);
+ }
+
+ if (eir & PKTIF) {
+ u8 packet_count;
+
+ mutex_lock(&priv->lock);
+
+ packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff;
+ while (packet_count) {
+ encx24j600_rx_packets(priv, packet_count);
+ packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff;
+ }
+
+ mutex_unlock(&priv->lock);
+ }
+
+ /* Enable interrupts */
+ encx24j600_cmd(priv, SETEIE);
+
+ return IRQ_HANDLED;
+}
+
+static int encx24j600_soft_reset(struct encx24j600_priv *priv)
+{
+ int ret = 0;
+ int timeout;
+ u16 eudast;
+
+ /* Write and verify a test value to EUDAST */
+ regcache_cache_bypass(priv->ctx.regmap, true);
+ timeout = 10;
+ do {
+ encx24j600_write_reg(priv, EUDAST, EUDAST_TEST_VAL);
+ eudast = encx24j600_read_reg(priv, EUDAST);
+ usleep_range(25, 100);
+ } while ((eudast != EUDAST_TEST_VAL) && --timeout);
+ regcache_cache_bypass(priv->ctx.regmap, false);
+
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ goto err_out;
+ }
+
+ /* Wait for CLKRDY to become set */
+ timeout = 10;
+ while (!(encx24j600_read_reg(priv, ESTAT) & CLKRDY) && --timeout)
+ usleep_range(25, 100);
+
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ goto err_out;
+ }
+
+ /* Issue a System Reset command */
+ encx24j600_cmd(priv, SETETHRST);
+ usleep_range(25, 100);
+
+ /* Confirm that EUDAST has 0000h after system reset */
+ if (encx24j600_read_reg(priv, EUDAST) != 0) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ /* Wait for PHY register and status bits to become available */
+ usleep_range(256, 1000);
+
+err_out:
+ return ret;
+}
+
+static int encx24j600_hw_reset(struct encx24j600_priv *priv)
+{
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = encx24j600_soft_reset(priv);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static void encx24j600_reset_hw_tx(struct encx24j600_priv *priv)
+{
+ encx24j600_set_bits(priv, ECON2, TXRST);
+ encx24j600_clr_bits(priv, ECON2, TXRST);
+}
+
+static void encx24j600_hw_init_tx(struct encx24j600_priv *priv)
+{
+ /* Reset TX */
+ encx24j600_reset_hw_tx(priv);
+
+ /* Clear the TXIF flag if were previously set */
+ encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF);
+
+ /* Write the Tx Buffer pointer */
+ encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START);
+}
+
+static void encx24j600_hw_init_rx(struct encx24j600_priv *priv)
+{
+ encx24j600_cmd(priv, DISABLERX);
+
+ /* Set up RX packet start address in the SRAM */
+ encx24j600_write_reg(priv, ERXST, ENC_RX_BUF_START);
+
+ /* Preload the RX Data pointer to the beginning of the RX area */
+ encx24j600_write_reg(priv, ERXRDPT, ENC_RX_BUF_START);
+
+ priv->next_packet = ENC_RX_BUF_START;
+
+ /* Set up RX end address in the SRAM */
+ encx24j600_write_reg(priv, ERXTAIL, ENC_SRAM_SIZE - 2);
+
+ /* Reset the user data pointers */
+ encx24j600_write_reg(priv, EUDAST, ENC_SRAM_SIZE);
+ encx24j600_write_reg(priv, EUDAND, ENC_SRAM_SIZE + 1);
+
+ /* Set Max Frame length */
+ encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN);
+}
+
+static void encx24j600_dump_config(struct encx24j600_priv *priv,
+ const char *msg)
+{
+ pr_info(DRV_NAME ": %s\n", msg);
+
+ /* CHIP configuration */
+ pr_info(DRV_NAME " ECON1: %04X\n", encx24j600_read_reg(priv, ECON1));
+ pr_info(DRV_NAME " ECON2: %04X\n", encx24j600_read_reg(priv, ECON2));
+ pr_info(DRV_NAME " ERXFCON: %04X\n", encx24j600_read_reg(priv,
+ ERXFCON));
+ pr_info(DRV_NAME " ESTAT: %04X\n", encx24j600_read_reg(priv, ESTAT));
+ pr_info(DRV_NAME " EIR: %04X\n", encx24j600_read_reg(priv, EIR));
+ pr_info(DRV_NAME " EIDLED: %04X\n", encx24j600_read_reg(priv, EIDLED));
+
+ /* MAC layer configuration */
+ pr_info(DRV_NAME " MACON1: %04X\n", encx24j600_read_reg(priv, MACON1));
+ pr_info(DRV_NAME " MACON2: %04X\n", encx24j600_read_reg(priv, MACON2));
+ pr_info(DRV_NAME " MAIPG: %04X\n", encx24j600_read_reg(priv, MAIPG));
+ pr_info(DRV_NAME " MACLCON: %04X\n", encx24j600_read_reg(priv,
+ MACLCON));
+ pr_info(DRV_NAME " MABBIPG: %04X\n", encx24j600_read_reg(priv,
+ MABBIPG));
+
+ /* PHY configuation */
+ pr_info(DRV_NAME " PHCON1: %04X\n", encx24j600_read_phy(priv, PHCON1));
+ pr_info(DRV_NAME " PHCON2: %04X\n", encx24j600_read_phy(priv, PHCON2));
+ pr_info(DRV_NAME " PHANA: %04X\n", encx24j600_read_phy(priv, PHANA));
+ pr_info(DRV_NAME " PHANLPA: %04X\n", encx24j600_read_phy(priv,
+ PHANLPA));
+ pr_info(DRV_NAME " PHANE: %04X\n", encx24j600_read_phy(priv, PHANE));
+ pr_info(DRV_NAME " PHSTAT1: %04X\n", encx24j600_read_phy(priv,
+ PHSTAT1));
+ pr_info(DRV_NAME " PHSTAT2: %04X\n", encx24j600_read_phy(priv,
+ PHSTAT2));
+ pr_info(DRV_NAME " PHSTAT3: %04X\n", encx24j600_read_phy(priv,
+ PHSTAT3));
+}
+
+static void encx24j600_set_rxfilter_mode(struct encx24j600_priv *priv)
+{
+ switch (priv->rxfilter) {
+ case RXFILTER_PROMISC:
+ encx24j600_set_bits(priv, MACON1, PASSALL);
+ encx24j600_write_reg(priv, ERXFCON, UCEN | MCEN | NOTMEEN);
+ break;
+ case RXFILTER_MULTI:
+ encx24j600_clr_bits(priv, MACON1, PASSALL);
+ encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN | MCEN);
+ break;
+ case RXFILTER_NORMAL:
+ default:
+ encx24j600_clr_bits(priv, MACON1, PASSALL);
+ encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN);
+ break;
+ }
+}
+
+static int encx24j600_hw_init(struct encx24j600_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ int ret = 0;
+ u16 eidled;
+ u16 macon2;
+
+ priv->hw_enabled = false;
+
+ eidled = encx24j600_read_reg(priv, EIDLED);
+ if (((eidled & DEVID_MASK) >> DEVID_SHIFT) != ENCX24J600_DEV_ID) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ netif_info(priv, drv, dev, "Silicon rev ID: 0x%02x\n",
+ (eidled & REVID_MASK) >> REVID_SHIFT);
+
+ /* PHY Leds: link status,
+ * LEDA: Link State + collision events
+ * LEDB: Link State + transmit/receive events
+ */
+ encx24j600_update_reg(priv, EIDLED, 0xff00, 0xcb00);
+
+ /* Loopback disabled */
+ encx24j600_write_reg(priv, MACON1, 0x9);
+
+ /* interpacket gap value */
+ encx24j600_write_reg(priv, MAIPG, 0x0c12);
+
+ /* Write the auto negotiation pattern */
+ encx24j600_write_phy(priv, PHANA, PHANA_DEFAULT);
+
+ encx24j600_update_phcon1(priv);
+ encx24j600_check_link_status(priv);
+
+ macon2 = MACON2_RSV1 | TXCRCEN | PADCFG0 | PADCFG2 | MACON2_DEFER;
+ if ((priv->autoneg == AUTONEG_DISABLE) && priv->full_duplex)
+ macon2 |= FULDPX;
+
+ encx24j600_set_bits(priv, MACON2, macon2);
+
+ priv->rxfilter = RXFILTER_NORMAL;
+ encx24j600_set_rxfilter_mode(priv);
+
+ /* Program the Maximum frame length */
+ encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN);
+
+ /* Init Tx pointers */
+ encx24j600_hw_init_tx(priv);
+
+ /* Init Rx pointers */
+ encx24j600_hw_init_rx(priv);
+
+ if (netif_msg_hw(priv))
+ encx24j600_dump_config(priv, "Hw is initialized");
+
+err_out:
+ return ret;
+}
+
+static void encx24j600_hw_enable(struct encx24j600_priv *priv)
+{
+ /* Clear the interrupt flags in case was set */
+ encx24j600_clr_bits(priv, EIR, (PCFULIF | RXABTIF | TXABTIF | TXIF |
+ PKTIF | LINKIF));
+
+ /* Enable the interrupts */
+ encx24j600_write_reg(priv, EIE, (PCFULIE | RXABTIE | TXABTIE | TXIE |
+ PKTIE | LINKIE | INTIE));
+
+ /* Enable RX */
+ encx24j600_cmd(priv, ENABLERX);
+
+ priv->hw_enabled = true;
+}
+
+static void encx24j600_hw_disable(struct encx24j600_priv *priv)
+{
+ /* Disable all interrupts */
+ encx24j600_write_reg(priv, EIE, 0);
+
+ /* Disable RX */
+ encx24j600_cmd(priv, DISABLERX);
+
+ priv->hw_enabled = false;
+}
+
+static int encx24j600_setlink(struct net_device *dev, u8 autoneg, u16 speed,
+ u8 duplex)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (!priv->hw_enabled) {
+ /* link is in low power mode now; duplex setting
+ * will take effect on next encx24j600_hw_init()
+ */
+ if (speed == SPEED_10 || speed == SPEED_100) {
+ priv->autoneg = (autoneg == AUTONEG_ENABLE);
+ priv->full_duplex = (duplex == DUPLEX_FULL);
+ priv->speed = (speed == SPEED_100);
+ } else {
+ netif_warn(priv, link, dev, "unsupported link speed setting\n");
+ /*speeds other than SPEED_10 and SPEED_100 */
+ /*are not supported by chip */
+ ret = -EOPNOTSUPP;
+ }
+ } else {
+ netif_warn(priv, link, dev, "Warning: hw must be disabled to set link mode\n");
+ ret = -EBUSY;
+ }
+ return ret;
+}
+
+static void encx24j600_hw_get_macaddr(struct encx24j600_priv *priv,
+ unsigned char *ethaddr)
+{
+ unsigned short val;
+
+ val = encx24j600_read_reg(priv, MAADR1);
+
+ ethaddr[0] = val & 0x00ff;
+ ethaddr[1] = (val & 0xff00) >> 8;
+
+ val = encx24j600_read_reg(priv, MAADR2);
+
+ ethaddr[2] = val & 0x00ffU;
+ ethaddr[3] = (val & 0xff00U) >> 8;
+
+ val = encx24j600_read_reg(priv, MAADR3);
+
+ ethaddr[4] = val & 0x00ffU;
+ ethaddr[5] = (val & 0xff00U) >> 8;
+}
+
+/* Program the hardware MAC address from dev->dev_addr.*/
+static int encx24j600_set_hw_macaddr(struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ if (priv->hw_enabled) {
+ netif_info(priv, drv, dev, "Hardware must be disabled to set Mac address\n");
+ return -EBUSY;
+ }
+
+ mutex_lock(&priv->lock);
+
+ netif_info(priv, drv, dev, "%s: Setting MAC address to %pM\n",
+ dev->name, dev->dev_addr);
+
+ encx24j600_write_reg(priv, MAADR3, (dev->dev_addr[4] |
+ dev->dev_addr[5] << 8));
+ encx24j600_write_reg(priv, MAADR2, (dev->dev_addr[2] |
+ dev->dev_addr[3] << 8));
+ encx24j600_write_reg(priv, MAADR1, (dev->dev_addr[0] |
+ dev->dev_addr[1] << 8));
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+/* Store the new hardware address in dev->dev_addr, and update the MAC.*/
+static int encx24j600_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *address = addr;
+
+ if (netif_running(dev))
+ return -EBUSY;
+ if (!is_valid_ether_addr(address->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+ return encx24j600_set_hw_macaddr(dev);
+}
+
+static int encx24j600_open(struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ int ret = request_threaded_irq(priv->ctx.spi->irq, NULL, encx24j600_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ DRV_NAME, priv);
+ if (unlikely(ret < 0)) {
+ netdev_err(dev, "request irq %d failed (ret = %d)\n",
+ priv->ctx.spi->irq, ret);
+ return ret;
+ }
+
+ encx24j600_hw_disable(priv);
+ encx24j600_hw_init(priv);
+ encx24j600_hw_enable(priv);
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int encx24j600_stop(struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ free_irq(priv->ctx.spi->irq, priv);
+ return 0;
+}
+
+static void encx24j600_setrx_proc(struct kthread_work *ws)
+{
+ struct encx24j600_priv *priv =
+ container_of(ws, struct encx24j600_priv, setrx_work);
+
+ mutex_lock(&priv->lock);
+ encx24j600_set_rxfilter_mode(priv);
+ mutex_unlock(&priv->lock);
+}
+
+static void encx24j600_set_multicast_list(struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+ int oldfilter = priv->rxfilter;
+
+ if (dev->flags & IFF_PROMISC) {
+ netif_dbg(priv, link, dev, "promiscuous mode\n");
+ priv->rxfilter = RXFILTER_PROMISC;
+ } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
+ netif_dbg(priv, link, dev, "%smulticast mode\n",
+ (dev->flags & IFF_ALLMULTI) ? "all-" : "");
+ priv->rxfilter = RXFILTER_MULTI;
+ } else {
+ netif_dbg(priv, link, dev, "normal mode\n");
+ priv->rxfilter = RXFILTER_NORMAL;
+ }
+
+ if (oldfilter != priv->rxfilter)
+ queue_kthread_work(&priv->kworker, &priv->setrx_work);
+}
+
+static void encx24j600_hw_tx(struct encx24j600_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ netif_info(priv, tx_queued, dev, "TX Packet Len:%d\n",
+ priv->tx_skb->len);
+
+ if (netif_msg_pktdata(priv))
+ dump_packet("TX", priv->tx_skb->len, priv->tx_skb->data);
+
+ if (encx24j600_read_reg(priv, EIR) & TXABTIF)
+ /* Last transmition aborted due to error. Reset TX interface */
+ encx24j600_reset_hw_tx(priv);
+
+ /* Clear the TXIF flag if were previously set */
+ encx24j600_clr_bits(priv, EIR, TXIF);
+
+ /* Set the data pointer to the TX buffer address in the SRAM */
+ encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START);
+
+ /* Copy the packet into the SRAM */
+ encx24j600_raw_write(priv, WGPDATA, (u8 *)priv->tx_skb->data,
+ priv->tx_skb->len);
+
+ /* Program the Tx buffer start pointer */
+ encx24j600_write_reg(priv, ETXST, ENC_TX_BUF_START);
+
+ /* Program the packet length */
+ encx24j600_write_reg(priv, ETXLEN, priv->tx_skb->len);
+
+ /* Start the transmission */
+ encx24j600_cmd(priv, SETTXRTS);
+}
+
+static void encx24j600_tx_proc(struct kthread_work *ws)
+{
+ struct encx24j600_priv *priv =
+ container_of(ws, struct encx24j600_priv, tx_work);
+
+ mutex_lock(&priv->lock);
+ encx24j600_hw_tx(priv);
+ mutex_unlock(&priv->lock);
+}
+
+static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ /* save the timestamp */
+ dev->trans_start = jiffies;
+
+ /* Remember the skb for deferred processing */
+ priv->tx_skb = skb;
+
+ queue_kthread_work(&priv->kworker, &priv->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+/* Deal with a transmit timeout */
+static void encx24j600_tx_timeout(struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ netif_err(priv, tx_err, dev, "TX timeout at %ld, latency %ld\n",
+ jiffies, jiffies - dev->trans_start);
+
+ dev->stats.tx_errors++;
+ netif_wake_queue(dev);
+ return;
+}
+
+static int encx24j600_get_regs_len(struct net_device *dev)
+{
+ return SFR_REG_COUNT;
+}
+
+static void encx24j600_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+ u16 *buff = p;
+ u8 reg;
+
+ regs->version = 1;
+ mutex_lock(&priv->lock);
+ for (reg = 0; reg < SFR_REG_COUNT; reg += 2) {
+ unsigned int val = 0;
+ /* ignore errors for unreadable registers */
+ regmap_read(priv->ctx.regmap, reg, &val);
+ buff[reg] = val & 0xffff;
+ }
+ mutex_unlock(&priv->lock);
+}
+
+static void encx24j600_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ sizeof(info->bus_info));
+}
+
+static int encx24j600_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_TP;
+
+ ethtool_cmd_speed_set(cmd, priv->speed);
+ cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ cmd->port = PORT_TP;
+ cmd->autoneg = priv->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static int encx24j600_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ return encx24j600_setlink(dev, cmd->autoneg,
+ ethtool_cmd_speed(cmd), cmd->duplex);
+}
+
+static u32 encx24j600_get_msglevel(struct net_device *dev)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void encx24j600_set_msglevel(struct net_device *dev, u32 val)
+{
+ struct encx24j600_priv *priv = netdev_priv(dev);
+ priv->msg_enable = val;
+}
+
+static const struct ethtool_ops encx24j600_ethtool_ops = {
+ .get_settings = encx24j600_get_settings,
+ .set_settings = encx24j600_set_settings,
+ .get_drvinfo = encx24j600_get_drvinfo,
+ .get_msglevel = encx24j600_get_msglevel,
+ .set_msglevel = encx24j600_set_msglevel,
+ .get_regs_len = encx24j600_get_regs_len,
+ .get_regs = encx24j600_get_regs,
+};
+
+static const struct net_device_ops encx24j600_netdev_ops = {
+ .ndo_open = encx24j600_open,
+ .ndo_stop = encx24j600_stop,
+ .ndo_start_xmit = encx24j600_tx,
+ .ndo_set_rx_mode = encx24j600_set_multicast_list,
+ .ndo_set_mac_address = encx24j600_set_mac_address,
+ .ndo_tx_timeout = encx24j600_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int encx24j600_spi_probe(struct spi_device *spi)
+{
+ int ret;
+
+ struct net_device *ndev;
+ struct encx24j600_priv *priv;
+
+ ndev = alloc_etherdev(sizeof(struct encx24j600_priv));
+
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto error_out;
+ }
+
+ priv = netdev_priv(ndev);
+ spi_set_drvdata(spi, priv);
+ dev_set_drvdata(&spi->dev, priv);
+ SET_NETDEV_DEV(ndev, &spi->dev);
+
+ priv->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+ priv->ndev = ndev;
+
+ /* Default configuration PHY configuration */
+ priv->full_duplex = true;
+ priv->autoneg = AUTONEG_ENABLE;
+ priv->speed = SPEED_100;
+
+ priv->ctx.spi = spi;
+ devm_regmap_init_encx24j600(&spi->dev, &priv->ctx);
+ ndev->irq = spi->irq;
+ ndev->netdev_ops = &encx24j600_netdev_ops;
+
+ mutex_init(&priv->lock);
+
+ /* Reset device and check if it is connected */
+ if (encx24j600_hw_reset(priv)) {
+ netif_err(priv, probe, ndev,
+ DRV_NAME ": Chip is not detected\n");
+ ret = -EIO;
+ goto out_free;
+ }
+
+ /* Initialize the device HW to the consistent state */
+ if (encx24j600_hw_init(priv)) {
+ netif_err(priv, probe, ndev,
+ DRV_NAME ": HW initialization error\n");
+ ret = -EIO;
+ goto out_free;
+ }
+
+ init_kthread_worker(&priv->kworker);
+ init_kthread_work(&priv->tx_work, encx24j600_tx_proc);
+ init_kthread_work(&priv->setrx_work, encx24j600_setrx_proc);
+
+ priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker,
+ "encx24j600");
+
+ if (IS_ERR(priv->kworker_task)) {
+ ret = PTR_ERR(priv->kworker_task);
+ goto out_free;
+ }
+
+ /* Get the MAC address from the chip */
+ encx24j600_hw_get_macaddr(priv, ndev->dev_addr);
+
+ ndev->ethtool_ops = &encx24j600_ethtool_ops;
+
+ ret = register_netdev(ndev);
+ if (unlikely(ret)) {
+ netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n",
+ ret);
+ goto out_free;
+ }
+
+ netif_info(priv, drv, priv->ndev, "MAC address %pM\n", ndev->dev_addr);
+
+ return ret;
+
+out_free:
+ free_netdev(ndev);
+
+error_out:
+ return ret;
+}
+
+static int encx24j600_spi_remove(struct spi_device *spi)
+{
+ struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
+
+ unregister_netdev(priv->ndev);
+
+ free_netdev(priv->ndev);
+
+ return 0;
+}
+
+static const struct spi_device_id encx24j600_spi_id_table[] = {
+ { .name = "encx24j600" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, encx24j600_spi_id_table);
+
+static struct spi_driver encx24j600_spi_net_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .bus = &spi_bus_type,
+ },
+ .probe = encx24j600_spi_probe,
+ .remove = encx24j600_spi_remove,
+ .id_table = encx24j600_spi_id_table,
+};
+
+static int __init encx24j600_init(void)
+{
+ return spi_register_driver(&encx24j600_spi_net_driver);
+}
+module_init(encx24j600_init);
+
+static void encx24j600_exit(void)
+{
+ spi_unregister_driver(&encx24j600_spi_net_driver);
+}
+module_exit(encx24j600_exit);
+
+MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
+MODULE_AUTHOR("Jon Ringle <jringle@gridpoint.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h
new file mode 100644
index 000000000000..4be73d5553f8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/encx24j600_hw.h
@@ -0,0 +1,437 @@
+/**
+ * encx24j600_hw.h: Register definitions
+ *
+ */
+
+#ifndef _ENCX24J600_HW_H
+#define _ENCX24J600_HW_H
+
+struct encx24j600_context {
+ struct spi_device *spi;
+ struct regmap *regmap;
+ struct regmap *phymap;
+ struct mutex mutex; /* mutex to protect access to regmap */
+ int bank;
+};
+
+void devm_regmap_init_encx24j600(struct device *dev,
+ struct encx24j600_context *ctx);
+
+/* Single-byte instructions */
+#define BANK_SELECT(bank) (0xC0 | ((bank & (BANK_MASK >> BANK_SHIFT)) << 1))
+#define B0SEL 0xC0 /* Bank 0 Select */
+#define B1SEL 0xC2 /* Bank 1 Select */
+#define B2SEL 0xC4 /* Bank 2 Select */
+#define B3SEL 0xC6 /* Bank 3 Select */
+#define SETETHRST 0xCA /* System Reset */
+#define FCDISABLE 0xE0 /* Flow Control Disable */
+#define FCSINGLE 0xE2 /* Flow Control Single */
+#define FCMULTIPLE 0xE4 /* Flow Control Multiple */
+#define FCCLEAR 0xE6 /* Flow Control Clear */
+#define SETPKTDEC 0xCC /* Decrement Packet Counter */
+#define DMASTOP 0xD2 /* DMA Stop */
+#define DMACKSUM 0xD8 /* DMA Start Checksum */
+#define DMACKSUMS 0xDA /* DMA Start Checksum with Seed */
+#define DMACOPY 0xDC /* DMA Start Copy */
+#define DMACOPYS 0xDE /* DMA Start Copy and Checksum with Seed */
+#define SETTXRTS 0xD4 /* Request Packet Transmission */
+#define ENABLERX 0xE8 /* Enable RX */
+#define DISABLERX 0xEA /* Disable RX */
+#define SETEIE 0xEC /* Enable Interrupts */
+#define CLREIE 0xEE /* Disable Interrupts */
+
+/* Two byte instructions */
+#define RBSEL 0xC8 /* Read Bank Select */
+
+/* Three byte instructions */
+#define WGPRDPT 0x60 /* Write EGPRDPT */
+#define RGPRDPT 0x62 /* Read EGPRDPT */
+#define WRXRDPT 0x64 /* Write ERXRDPT */
+#define RRXRDPT 0x66 /* Read ERXRDPT */
+#define WUDARDPT 0x68 /* Write EUDARDPT */
+#define RUDARDPT 0x6A /* Read EUDARDPT */
+#define WGPWRPT 0x6C /* Write EGPWRPT */
+#define RGPWRPT 0x6E /* Read EGPWRPT */
+#define WRXWRPT 0x70 /* Write ERXWRPT */
+#define RRXWRPT 0x72 /* Read ERXWRPT */
+#define WUDAWRPT 0x74 /* Write EUDAWRPT */
+#define RUDAWRPT 0x76 /* Read EUDAWRPT */
+
+/* n byte instructions */
+#define RCRCODE 0x00
+#define WCRCODE 0x40
+#define BFSCODE 0x80
+#define BFCCODE 0xA0
+#define RCR(addr) (RCRCODE | (addr & ADDR_MASK)) /* Read Control Register */
+#define WCR(addr) (WCRCODE | (addr & ADDR_MASK)) /* Write Control Register */
+#define RCRU 0x20 /* Read Control Register Unbanked */
+#define WCRU 0x22 /* Write Control Register Unbanked */
+#define BFS(addr) (BFSCODE | (addr & ADDR_MASK)) /* Bit Field Set */
+#define BFC(addr) (BFCCODE | (addr & ADDR_MASK)) /* Bit Field Clear */
+#define BFSU 0x24 /* Bit Field Set Unbanked */
+#define BFCU 0x26 /* Bit Field Clear Unbanked */
+#define RGPDATA 0x28 /* Read EGPDATA */
+#define WGPDATA 0x2A /* Write EGPDATA */
+#define RRXDATA 0x2C /* Read ERXDATA */
+#define WRXDATA 0x2E /* Write ERXDATA */
+#define RUDADATA 0x30 /* Read EUDADATA */
+#define WUDADATA 0x32 /* Write EUDADATA */
+
+#define SFR_REG_COUNT 0xA0
+
+/* ENC424J600 Control Registers
+ * Control register definitions are a combination of address
+ * and bank number
+ * - Register address (bits 0-4)
+ * - Bank number (bits 5-6)
+ */
+#define ADDR_MASK 0x1F
+#define BANK_MASK 0x60
+#define BANK_SHIFT 5
+
+/* All-bank registers */
+#define EUDAST 0x16
+#define EUDAND 0x18
+#define ESTAT 0x1A
+#define EIR 0x1C
+#define ECON1 0x1E
+
+/* Bank 0 registers */
+#define ETXST (0x00 | 0x00)
+#define ETXLEN (0x02 | 0x00)
+#define ERXST (0x04 | 0x00)
+#define ERXTAIL (0x06 | 0x00)
+#define ERXHEAD (0x08 | 0x00)
+#define EDMAST (0x0A | 0x00)
+#define EDMALEN (0x0C | 0x00)
+#define EDMADST (0x0E | 0x00)
+#define EDMACS (0x10 | 0x00)
+#define ETXSTAT (0x12 | 0x00)
+#define ETXWIRE (0x14 | 0x00)
+
+/* Bank 1 registers */
+#define EHT1 (0x00 | 0x20)
+#define EHT2 (0x02 | 0x20)
+#define EHT3 (0x04 | 0x20)
+#define EHT4 (0x06 | 0x20)
+#define EPMM1 (0x08 | 0x20)
+#define EPMM2 (0x0A | 0x20)
+#define EPMM3 (0x0C | 0x20)
+#define EPMM4 (0x0E | 0x20)
+#define EPMCS (0x10 | 0x20)
+#define EPMO (0x12 | 0x20)
+#define ERXFCON (0x14 | 0x20)
+
+/* Bank 2 registers */
+#define MACON1 (0x00 | 0x40)
+#define MACON2 (0x02 | 0x40)
+#define MABBIPG (0x04 | 0x40)
+#define MAIPG (0x06 | 0x40)
+#define MACLCON (0x08 | 0x40)
+#define MAMXFL (0x0A | 0x40)
+#define MICMD (0x12 | 0x40)
+#define MIREGADR (0x14 | 0x40)
+
+/* Bank 3 registers */
+#define MAADR3 (0x00 | 0x60)
+#define MAADR2 (0x02 | 0x60)
+#define MAADR1 (0x04 | 0x60)
+#define MIWR (0x06 | 0x60)
+#define MIRD (0x08 | 0x60)
+#define MISTAT (0x0A | 0x60)
+#define EPAUS (0x0C | 0x60)
+#define ECON2 (0x0E | 0x60)
+#define ERXWM (0x10 | 0x60)
+#define EIE (0x12 | 0x60)
+#define EIDLED (0x14 | 0x60)
+
+/* Unbanked registers */
+#define EGPDATA (0x00 | 0x80)
+#define ERXDATA (0x02 | 0x80)
+#define EUDADATA (0x04 | 0x80)
+#define EGPRDPT (0x06 | 0x80)
+#define EGPWRPT (0x08 | 0x80)
+#define ERXRDPT (0x0A | 0x80)
+#define ERXWRPT (0x0C | 0x80)
+#define EUDARDPT (0x0E | 0x80)
+#define EUDAWRPT (0x10 | 0x80)
+
+
+/* Register bit definitions */
+/* ESTAT */
+#define INT (1 << 15)
+#define FCIDLE (1 << 14)
+#define RXBUSY (1 << 13)
+#define CLKRDY (1 << 12)
+#define PHYDPX (1 << 10)
+#define PHYLNK (1 << 8)
+
+/* EIR */
+#define CRYPTEN (1 << 15)
+#define MODEXIF (1 << 14)
+#define HASHIF (1 << 13)
+#define AESIF (1 << 12)
+#define LINKIF (1 << 11)
+#define PKTIF (1 << 6)
+#define DMAIF (1 << 5)
+#define TXIF (1 << 3)
+#define TXABTIF (1 << 2)
+#define RXABTIF (1 << 1)
+#define PCFULIF (1 << 0)
+
+/* ECON1 */
+#define MODEXST (1 << 15)
+#define HASHEN (1 << 14)
+#define HASHOP (1 << 13)
+#define HASHLST (1 << 12)
+#define AESST (1 << 11)
+#define AESOP1 (1 << 10)
+#define AESOP0 (1 << 9)
+#define PKTDEC (1 << 8)
+#define FCOP1 (1 << 7)
+#define FCOP0 (1 << 6)
+#define DMAST (1 << 5)
+#define DMACPY (1 << 4)
+#define DMACSSD (1 << 3)
+#define DMANOCS (1 << 2)
+#define TXRTS (1 << 1)
+#define RXEN (1 << 0)
+
+/* ETXSTAT */
+#define LATECOL (1 << 10)
+#define MAXCOL (1 << 9)
+#define EXDEFER (1 << 8)
+#define ETXSTATL_DEFER (1 << 7)
+#define CRCBAD (1 << 4)
+#define COLCNT_MASK 0xF
+
+/* ERXFCON */
+#define HTEN (1 << 15)
+#define MPEN (1 << 14)
+#define NOTPM (1 << 12)
+#define PMEN3 (1 << 11)
+#define PMEN2 (1 << 10)
+#define PMEN1 (1 << 9)
+#define PMEN0 (1 << 8)
+#define CRCEEN (1 << 7)
+#define CRCEN (1 << 6)
+#define RUNTEEN (1 << 5)
+#define RUNTEN (1 << 4)
+#define UCEN (1 << 3)
+#define NOTMEEN (1 << 2)
+#define MCEN (1 << 1)
+#define BCEN (1 << 0)
+
+/* MACON1 */
+#define LOOPBK (1 << 4)
+#define RXPAUS (1 << 2)
+#define PASSALL (1 << 1)
+
+/* MACON2 */
+#define MACON2_DEFER (1 << 14)
+#define BPEN (1 << 13)
+#define NOBKOFF (1 << 12)
+#define PADCFG2 (1 << 7)
+#define PADCFG1 (1 << 6)
+#define PADCFG0 (1 << 5)
+#define TXCRCEN (1 << 4)
+#define PHDREN (1 << 3)
+#define HFRMEN (1 << 2)
+#define MACON2_RSV1 (1 << 1)
+#define FULDPX (1 << 0)
+
+/* MAIPG */
+/* value of the high byte is given by the reserved bits,
+ * value of the low byte is recomended setting of the
+ * IPG parameter.
+ */
+#define MAIPGH_VAL 0x0C
+#define MAIPGL_VAL 0x12
+
+/* MIREGADRH */
+#define MIREGADR_VAL (1 << 8)
+
+/* MIREGADRL */
+#define PHREG_MASK 0x1F
+
+/* MICMD */
+#define MIISCAN (1 << 1)
+#define MIIRD (1 << 0)
+
+/* MISTAT */
+#define NVALID (1 << 2)
+#define SCAN (1 << 1)
+#define BUSY (1 << 0)
+
+/* ECON2 */
+#define ETHEN (1 << 15)
+#define STRCH (1 << 14)
+#define TXMAC (1 << 13)
+#define SHA1MD5 (1 << 12)
+#define COCON3 (1 << 11)
+#define COCON2 (1 << 10)
+#define COCON1 (1 << 9)
+#define COCON0 (1 << 8)
+#define AUTOFC (1 << 7)
+#define TXRST (1 << 6)
+#define RXRST (1 << 5)
+#define ETHRST (1 << 4)
+#define MODLEN1 (1 << 3)
+#define MODLEN0 (1 << 2)
+#define AESLEN1 (1 << 1)
+#define AESLEN0 (1 << 0)
+
+/* EIE */
+#define INTIE (1 << 15)
+#define MODEXIE (1 << 14)
+#define HASHIE (1 << 13)
+#define AESIE (1 << 12)
+#define LINKIE (1 << 11)
+#define PKTIE (1 << 6)
+#define DMAIE (1 << 5)
+#define TXIE (1 << 3)
+#define TXABTIE (1 << 2)
+#define RXABTIE (1 << 1)
+#define PCFULIE (1 << 0)
+
+/* EIDLED */
+#define LACFG3 (1 << 15)
+#define LACFG2 (1 << 14)
+#define LACFG1 (1 << 13)
+#define LACFG0 (1 << 12)
+#define LBCFG3 (1 << 11)
+#define LBCFG2 (1 << 10)
+#define LBCFG1 (1 << 9)
+#define LBCFG0 (1 << 8)
+#define DEVID_SHIFT 5
+#define DEVID_MASK (0x7 << DEVID_SHIFT)
+#define REVID_SHIFT 0
+#define REVID_MASK (0x1F << REVID_SHIFT)
+
+/* PHY registers */
+#define PHCON1 0x00
+#define PHSTAT1 0x01
+#define PHANA 0x04
+#define PHANLPA 0x05
+#define PHANE 0x06
+#define PHCON2 0x11
+#define PHSTAT2 0x1B
+#define PHSTAT3 0x1F
+
+/* PHCON1 */
+#define PRST (1 << 15)
+#define PLOOPBK (1 << 14)
+#define SPD100 (1 << 13)
+#define ANEN (1 << 12)
+#define PSLEEP (1 << 11)
+#define RENEG (1 << 9)
+#define PFULDPX (1 << 8)
+
+/* PHSTAT1 */
+#define FULL100 (1 << 14)
+#define HALF100 (1 << 13)
+#define FULL10 (1 << 12)
+#define HALF10 (1 << 11)
+#define ANDONE (1 << 5)
+#define LRFAULT (1 << 4)
+#define ANABLE (1 << 3)
+#define LLSTAT (1 << 2)
+#define EXTREGS (1 << 0)
+
+/* PHSTAT2 */
+#define PLRITY (1 << 4)
+
+/* PHSTAT3 */
+#define PHY3SPD100 (1 << 3)
+#define PHY3DPX (1 << 4)
+#define SPDDPX_SHIFT 2
+#define SPDDPX_MASK (0x7 << SPDDPX_SHIFT)
+
+/* PHANA */
+/* Default value for PHY initialization*/
+#define PHANA_DEFAULT 0x05E1
+
+/* PHANE */
+#define PDFLT (1 << 4)
+#define LPARCD (1 << 1)
+#define LPANABL (1 << 0)
+
+#define EUDAST_TEST_VAL 0x1234
+
+#define TSV_SIZE 7
+
+#define ENCX24J600_DEV_ID 0x1
+
+/* Configuration */
+
+/* Led is on when the link is present and driven low
+ * temporarily when packet is TX'd or RX'd
+ */
+#define LED_A_SETTINGS 0xC
+
+/* Led is on if the link is in 100 Mbps mode */
+#define LED_B_SETTINGS 0x8
+
+/* maximum ethernet frame length
+ * Currently not used as a limit anywhere
+ * (we're using the "huge frame enable" feature of
+ * enc424j600).
+ */
+#define MAX_FRAMELEN 1518
+
+/* Size in bytes of the receive buffer in enc424j600.
+ * Must be word aligned (even).
+ */
+#define RX_BUFFER_SIZE (15 * MAX_FRAMELEN)
+
+/* Start of the general purpose area in sram */
+#define SRAM_GP_START 0x0
+
+/* SRAM size */
+#define SRAM_SIZE 0x6000
+
+/* Start of the receive buffer */
+#define ERXST_VAL (SRAM_SIZE - RX_BUFFER_SIZE)
+
+#define RSV_RXLONGEVDROPEV 16
+#define RSV_CARRIEREV 18
+#define RSV_CRCERROR 20
+#define RSV_LENCHECKERR 21
+#define RSV_LENOUTOFRANGE 22
+#define RSV_RXOK 23
+#define RSV_RXMULTICAST 24
+#define RSV_RXBROADCAST 25
+#define RSV_DRIBBLENIBBLE 26
+#define RSV_RXCONTROLFRAME 27
+#define RSV_RXPAUSEFRAME 28
+#define RSV_RXUNKNOWNOPCODE 29
+#define RSV_RXTYPEVLAN 30
+
+#define RSV_RUNTFILTERMATCH 31
+#define RSV_NOTMEFILTERMATCH 32
+#define RSV_HASHFILTERMATCH 33
+#define RSV_MAGICPKTFILTERMATCH 34
+#define RSV_PTRNMTCHFILTERMATCH 35
+#define RSV_UNICASTFILTERMATCH 36
+
+#define RSV_SIZE 8
+#define RSV_BITMASK(x) (1 << ((x) - 16))
+#define RSV_GETBIT(x, y) (((x) & RSV_BITMASK(y)) ? 1 : 0)
+
+struct rsv {
+ u16 next_packet;
+ u16 len;
+ u32 rxstat;
+};
+
+/* Put RX buffer at 0 as suggested by the Errata datasheet */
+
+#define RXSTART_INIT ERXST_VAL
+#define RXEND_INIT 0x5FFF
+
+int regmap_encx24j600_spi_write(void *context, u8 reg, const u8 *data,
+ size_t count);
+int regmap_encx24j600_spi_read(void *context, u8 reg, u8 *data, size_t count);
+
+
+#endif
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index becbb5f1f5a7..a10c928bbd6b 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -552,6 +552,7 @@ static const struct of_device_id moxart_mac_match[] = {
{ .compatible = "moxa,moxart-mac" },
{ }
};
+MODULE_DEVICE_TABLE(of, moxart_mac_match);
static struct platform_driver moxart_mac_driver = {
.probe = moxart_mac_probe,
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index c28111749e1f..9ba975853ec6 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5389,8 +5389,6 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
strlcpy(info->version, s2io_driver_version, sizeof(info->version));
strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
- info->regdump_len = XENA_REG_SPACE;
- info->eedump_len = XENA_EEPROM_SPACE;
}
/**
@@ -8226,31 +8224,7 @@ static void s2io_rem_nic(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-/**
- * s2io_starter - Entry point for the driver
- * Description: This function is the entry point for the driver. It verifies
- * the module loadable parameters and initializes PCI configuration space.
- */
-
-static int __init s2io_starter(void)
-{
- return pci_register_driver(&s2io_driver);
-}
-
-/**
- * s2io_closer - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver. It
- * unregisters the driver.
- */
-
-static __exit void s2io_closer(void)
-{
- pci_unregister_driver(&s2io_driver);
- DBG_PRINT(INIT_DBG, "cleanup done\n");
-}
-
-module_init(s2io_starter);
-module_exit(s2io_closer);
+module_pci_driver(s2io_driver);
static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
struct tcphdr **tcp, struct RxD_t *rxdp,
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index d89b6ed82c51..6c5997dc8afc 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -1085,8 +1085,6 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp);
static void tx_intr_handler(struct fifo_info *fifo_data);
static void s2io_handle_errors(void * dev_id);
-static int s2io_starter(void);
-static void s2io_closer(void);
static void s2io_tx_watchdog(struct net_device *dev);
static void s2io_set_multicast(struct net_device *dev);
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index be916eb2f2e7..9a2967016c18 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -105,10 +105,6 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->fw_version, vdev->fw_version, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info));
- info->regdump_len = sizeof(struct vxge_hw_vpath_reg)
- * vdev->no_of_vpath;
-
- info->n_stats = STAT_LEN;
}
/**
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index a41bb5e6b954..75e88f4c1531 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -4076,6 +4076,8 @@ static void nv_do_nic_poll(unsigned long data)
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 mask = 0;
+ unsigned long flags;
+ unsigned int irq = 0;
/*
* First disable irq(s) and then
@@ -4085,25 +4087,27 @@ static void nv_do_nic_poll(unsigned long data)
if (!using_multi_irqs(dev)) {
if (np->msi_flags & NV_MSI_X_ENABLED)
- disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector;
else
- disable_irq_lockdep(np->pci_dev->irq);
+ irq = np->pci_dev->irq;
mask = np->irqmask;
} else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
- disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector;
mask |= NVREG_IRQ_RX_ALL;
}
if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
- disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+ irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector;
mask |= NVREG_IRQ_TX_ALL;
}
if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
- disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+ irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector;
mask |= NVREG_IRQ_OTHER;
}
}
- /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
+
+ disable_irq_nosync_lockdep_irqsave(irq, &flags);
+ synchronize_irq(irq);
if (np->recover_error) {
np->recover_error = 0;
@@ -4156,28 +4160,22 @@ static void nv_do_nic_poll(unsigned long data)
nv_nic_irq_optimized(0, dev);
else
nv_nic_irq(0, dev);
- if (np->msi_flags & NV_MSI_X_ENABLED)
- enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
- else
- enable_irq_lockdep(np->pci_dev->irq);
} else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
nv_nic_irq_rx(0, dev);
- enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
}
if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
nv_nic_irq_tx(0, dev);
- enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
}
if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
nv_nic_irq_other(0, dev);
- enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
}
}
+ enable_irq_lockdep_irqrestore(irq, &flags);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 66fd868152e5..b159ef8303cc 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -476,13 +476,12 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
mac[5] = tmp >> 8;
}
-static void __lpc_eth_clock_enable(struct netdata_local *pldat,
- bool enable)
+static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable)
{
if (enable)
- clk_enable(pldat->clk);
+ clk_prepare_enable(pldat->clk);
else
- clk_disable(pldat->clk);
+ clk_disable_unprepare(pldat->clk);
}
static void __lpc_params_setup(struct netdata_local *pldat)
@@ -1494,7 +1493,7 @@ err_out_free_irq:
err_out_iounmap:
iounmap(pldat->net_base);
err_out_disable_clocks:
- clk_disable(pldat->clk);
+ clk_disable_unprepare(pldat->clk);
clk_put(pldat->clk);
err_out_free_dev:
free_netdev(ndev);
@@ -1519,7 +1518,7 @@ static int lpc_eth_drv_remove(struct platform_device *pdev)
iounmap(pldat->net_base);
mdiobus_unregister(pldat->mii_bus);
mdiobus_free(pldat->mii_bus);
- clk_disable(pldat->clk);
+ clk_disable_unprepare(pldat->clk);
clk_put(pldat->clk);
free_netdev(ndev);
@@ -1540,7 +1539,7 @@ static int lpc_eth_drv_suspend(struct platform_device *pdev,
if (netif_running(ndev)) {
netif_device_detach(ndev);
__lpc_eth_shutdown(pldat);
- clk_disable(pldat->clk);
+ clk_disable_unprepare(pldat->clk);
/*
* Reset again now clock is disable to be sure
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 7bf9c028d8d7..c177c7cec13b 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -1344,10 +1344,6 @@ static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
- info->n_stats = 0;
- info->testinfo_len = 0;
- info->regdump_len = 0;
- info->eedump_len = 0;
}
static int octeon_mgmt_get_settings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index f6fcf7450352..b19be7c6c1f4 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -164,7 +164,6 @@ static void pch_gbe_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->regdump_len = pch_gbe_get_regs_len(netdev);
}
/**
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index f1f0108c275d..30a6f246dfc9 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -91,4 +91,15 @@ config NETXEN_NIC
---help---
This enables the support for NetXen's Gigabit Ethernet card.
+config QED
+ tristate "QLogic QED 25/40/100Gb core driver"
+ depends on PCI
+ ---help---
+ This enables the support for ...
+
+config QEDE
+ tristate "QLogic QED 25/40/100Gb Ethernet NIC"
+ depends on QED
+ ---help---
+ This enables the support for ...
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile
index b2a283d9ae60..cee90e05beb8 100644
--- a/drivers/net/ethernet/qlogic/Makefile
+++ b/drivers/net/ethernet/qlogic/Makefile
@@ -6,3 +6,5 @@ obj-$(CONFIG_QLA3XXX) += qla3xxx.o
obj-$(CONFIG_QLCNIC) += qlcnic/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_NETXEN_NIC) += netxen/
+obj-$(CONFIG_QED) += qed/
+obj-$(CONFIG_QEDE)+= qede/
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 87e073c6e291..f9034467736c 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -93,8 +93,6 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->regdump_len = NETXEN_NIC_REGS_LEN;
- drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev);
}
static int
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
new file mode 100644
index 000000000000..5c2fd57236fe
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_QED) := qed.o
+
+qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
+ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
new file mode 100644
index 000000000000..ac17d8669b1a
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -0,0 +1,496 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_H
+#define _QED_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/zlib.h>
+#include <linux/hashtable.h>
+#include <linux/qed/qed_if.h>
+#include "qed_hsi.h"
+
+extern const struct qed_common_ops qed_common_ops_pass;
+#define DRV_MODULE_VERSION "8.4.0.0"
+
+#define MAX_HWFNS_PER_DEVICE (4)
+#define NAME_SIZE 16
+#define VER_SIZE 16
+
+/* cau states */
+enum qed_coalescing_mode {
+ QED_COAL_MODE_DISABLE,
+ QED_COAL_MODE_ENABLE
+};
+
+struct qed_eth_cb_ops;
+struct qed_dev_info;
+
+/* helpers */
+static inline u32 qed_db_addr(u32 cid, u32 DEMS)
+{
+ u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+ FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
+
+ return db_addr;
+}
+
+#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
+ ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
+ ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
+
+#define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++)
+
+#define D_TRINE(val, cond1, cond2, true1, true2, def) \
+ (val == (cond1) ? true1 : \
+ (val == (cond2) ? true2 : def))
+
+/* forward */
+struct qed_ptt_pool;
+struct qed_spq;
+struct qed_sb_info;
+struct qed_sb_attn_info;
+struct qed_cxt_mngr;
+struct qed_sb_sp_info;
+struct qed_mcp_info;
+
+struct qed_rt_data {
+ u32 init_val;
+ bool b_valid;
+};
+
+/* The PCI personality is not quite synonymous to protocol ID:
+ * 1. All personalities need CORE connections
+ * 2. The Ethernet personality may support also the RoCE protocol
+ */
+enum qed_pci_personality {
+ QED_PCI_ETH,
+ QED_PCI_DEFAULT /* default in shmem */
+};
+
+/* All VFs are symmetric, all counters are PF + all VFs */
+struct qed_qm_iids {
+ u32 cids;
+ u32 vf_cids;
+ u32 tids;
+};
+
+enum QED_RESOURCES {
+ QED_SB,
+ QED_L2_QUEUE,
+ QED_VPORT,
+ QED_RSS_ENG,
+ QED_PQ,
+ QED_RL,
+ QED_MAC,
+ QED_VLAN,
+ QED_ILT,
+ QED_MAX_RESC,
+};
+
+enum QED_FEATURE {
+ QED_PF_L2_QUE,
+ QED_MAX_FEATURES,
+};
+
+enum QED_PORT_MODE {
+ QED_PORT_MODE_DE_2X40G,
+ QED_PORT_MODE_DE_2X50G,
+ QED_PORT_MODE_DE_1X100G,
+ QED_PORT_MODE_DE_4X10G_F,
+ QED_PORT_MODE_DE_4X10G_E,
+ QED_PORT_MODE_DE_4X20G,
+ QED_PORT_MODE_DE_1X40G,
+ QED_PORT_MODE_DE_2X25G,
+ QED_PORT_MODE_DE_1X25G
+};
+
+struct qed_hw_info {
+ /* PCI personality */
+ enum qed_pci_personality personality;
+
+ /* Resource Allocation scheme results */
+ u32 resc_start[QED_MAX_RESC];
+ u32 resc_num[QED_MAX_RESC];
+ u32 feat_num[QED_MAX_FEATURES];
+
+#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
+#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
+#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
+
+ u8 num_tc;
+ u8 offload_tc;
+ u8 non_offload_tc;
+
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u16 ovlan;
+ u32 part_num[4];
+
+ u32 vendor_id;
+ u32 device_id;
+
+ unsigned char hw_mac_addr[ETH_ALEN];
+
+ struct qed_igu_info *p_igu_info;
+
+ u32 port_mode;
+ u32 hw_mode;
+};
+
+struct qed_hw_cid_data {
+ u32 cid;
+ bool b_cid_allocated;
+
+ /* Additional identifiers */
+ u16 opaque_fid;
+ u8 vport_id;
+};
+
+/* maximun size of read/write commands (HW limit) */
+#define DMAE_MAX_RW_SIZE 0x2000
+
+struct qed_dmae_info {
+ /* Mutex for synchronizing access to functions */
+ struct mutex mutex;
+
+ u8 channel;
+
+ dma_addr_t completion_word_phys_addr;
+
+ /* The memory location where the DMAE writes the completion
+ * value when an operation is finished on this context.
+ */
+ u32 *p_completion_word;
+
+ dma_addr_t intermediate_buffer_phys_addr;
+
+ /* An intermediate buffer for DMAE operations that use virtual
+ * addresses - data is DMA'd to/from this buffer and then
+ * memcpy'd to/from the virtual address
+ */
+ u32 *p_intermediate_buffer;
+
+ dma_addr_t dmae_cmd_phys_addr;
+ struct dmae_cmd *p_dmae_cmd;
+};
+
+struct qed_qm_info {
+ struct init_qm_pq_params *qm_pq_params;
+ struct init_qm_vport_params *qm_vport_params;
+ struct init_qm_port_params *qm_port_params;
+ u16 start_pq;
+ u8 start_vport;
+ u8 pure_lb_pq;
+ u8 offload_pq;
+ u8 pure_ack_pq;
+ u8 vf_queues_offset;
+ u16 num_pqs;
+ u16 num_vf_pqs;
+ u8 num_vports;
+ u8 max_phys_tcs_per_port;
+ bool pf_rl_en;
+ bool pf_wfq_en;
+ bool vport_rl_en;
+ bool vport_wfq_en;
+ u8 pf_wfq;
+ u32 pf_rl;
+};
+
+struct storm_stats {
+ u32 address;
+ u32 len;
+};
+
+struct qed_storm_stats {
+ struct storm_stats mstats;
+ struct storm_stats pstats;
+ struct storm_stats tstats;
+ struct storm_stats ustats;
+};
+
+struct qed_fw_data {
+ struct fw_ver_info *fw_ver_info;
+ const u8 *modes_tree_buf;
+ union init_op *init_ops;
+ const u32 *arr_data;
+ u32 init_ops_size;
+};
+
+struct qed_simd_fp_handler {
+ void *token;
+ void (*func)(void *);
+};
+
+struct qed_hwfn {
+ struct qed_dev *cdev;
+ u8 my_id; /* ID inside the PF */
+#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
+ u8 rel_pf_id; /* Relative to engine*/
+ u8 abs_pf_id;
+#define QED_PATH_ID(_p_hwfn) ((_p_hwfn)->abs_pf_id & 1)
+ u8 port_id;
+ bool b_active;
+
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+
+ bool first_on_engine;
+ bool hw_init_done;
+
+ /* BAR access */
+ void __iomem *regview;
+ void __iomem *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
+
+ /* PTT pool */
+ struct qed_ptt_pool *p_ptt_pool;
+
+ /* HW info */
+ struct qed_hw_info hw_info;
+
+ /* rt_array (for init-tool) */
+ struct qed_rt_data *rt_data;
+
+ /* SPQ */
+ struct qed_spq *p_spq;
+
+ /* EQ */
+ struct qed_eq *p_eq;
+
+ /* Consolidate Q*/
+ struct qed_consq *p_consq;
+
+ /* Slow-Path definitions */
+ struct tasklet_struct *sp_dpc;
+ bool b_sp_dpc_enabled;
+
+ struct qed_ptt *p_main_ptt;
+ struct qed_ptt *p_dpc_ptt;
+
+ struct qed_sb_sp_info *p_sp_sb;
+ struct qed_sb_attn_info *p_sb_attn;
+
+ /* Protocol related */
+ struct qed_pf_params pf_params;
+
+ /* Array of sb_info of all status blocks */
+ struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
+ u16 num_sbs;
+
+ struct qed_cxt_mngr *p_cxt_mngr;
+
+ /* Flag indicating whether interrupts are enabled or not*/
+ bool b_int_enabled;
+
+ struct qed_mcp_info *mcp_info;
+
+ struct qed_hw_cid_data *p_tx_cids;
+ struct qed_hw_cid_data *p_rx_cids;
+
+ struct qed_dmae_info dmae_info;
+
+ /* QM init */
+ struct qed_qm_info qm_info;
+ struct qed_storm_stats storm_stats;
+
+ /* Buffer for unzipping firmware data */
+ void *unzip_buf;
+
+ struct qed_simd_fp_handler simd_proto_handler[64];
+
+ struct z_stream_s *stream;
+};
+
+struct pci_params {
+ int pm_cap;
+
+ unsigned long mem_start;
+ unsigned long mem_end;
+ unsigned int irq;
+ u8 pf_num;
+};
+
+struct qed_int_param {
+ u32 int_mode;
+ u8 num_vectors;
+ u8 min_msix_cnt; /* for minimal functionality */
+};
+
+struct qed_int_params {
+ struct qed_int_param in;
+ struct qed_int_param out;
+ struct msix_entry *msix_table;
+ bool fp_initialized;
+ u8 fp_msix_base;
+ u8 fp_msix_cnt;
+};
+
+struct qed_dev {
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+
+ u8 type;
+#define QED_DEV_TYPE_BB_A0 (0 << 0)
+#define QED_DEV_TYPE_MASK (0x3)
+#define QED_DEV_TYPE_SHIFT (0)
+
+ u16 chip_num;
+#define CHIP_NUM_MASK 0xffff
+#define CHIP_NUM_SHIFT 16
+
+ u16 chip_rev;
+#define CHIP_REV_MASK 0xf
+#define CHIP_REV_SHIFT 12
+
+ u16 chip_metal;
+#define CHIP_METAL_MASK 0xff
+#define CHIP_METAL_SHIFT 4
+
+ u16 chip_bond_id;
+#define CHIP_BOND_ID_MASK 0xf
+#define CHIP_BOND_ID_SHIFT 0
+
+ u8 num_engines;
+ u8 num_ports_in_engines;
+ u8 num_funcs_in_port;
+
+ u8 path_id;
+ enum mf_mode mf_mode;
+#define IS_MF(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode != SF)
+#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_NPAR)
+#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_OVLAN)
+
+ int pcie_width;
+ int pcie_speed;
+ u8 ver_str[VER_SIZE];
+
+ /* Add MF related configuration */
+ u8 mcp_rev;
+ u8 boot_mode;
+
+ u8 wol;
+
+ u32 int_mode;
+ enum qed_coalescing_mode int_coalescing_mode;
+ u8 rx_coalesce_usecs;
+ u8 tx_coalesce_usecs;
+
+ /* Start Bar offset of first hwfn */
+ void __iomem *regview;
+ void __iomem *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
+
+ /* PCI */
+ u8 cache_shift;
+
+ /* Init */
+ const struct iro *iro_arr;
+#define IRO (p_hwfn->cdev->iro_arr)
+
+ /* HW functions */
+ u8 num_hwfns;
+ struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+
+ u32 drv_type;
+
+ struct qed_eth_stats *reset_stats;
+ struct qed_fw_data *fw_data;
+
+ u32 mcp_nvm_resp;
+
+ /* Linux specific here */
+ struct qede_dev *edev;
+ struct pci_dev *pdev;
+ int msg_enable;
+
+ struct pci_params pci_params;
+
+ struct qed_int_params int_params;
+
+ u8 protocol;
+#define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH)
+
+ /* Callbacks to protocol driver */
+ union {
+ struct qed_common_cb_ops *common;
+ struct qed_eth_cb_ops *eth;
+ } protocol_ops;
+ void *ops_cookie;
+
+ const struct firmware *firmware;
+};
+
+#define QED_GET_TYPE(dev) (((dev)->type & QED_DEV_TYPE_MASK) >> \
+ QED_DEV_TYPE_SHIFT)
+#define QED_IS_BB_A0(dev) (QED_GET_TYPE(dev) == QED_DEV_TYPE_BB_A0)
+#define QED_IS_BB(dev) (QED_IS_BB_A0(dev))
+
+#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
+#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
+
+/**
+ * @brief qed_concrete_to_sw_fid - get the sw function id from
+ * the concrete value.
+ *
+ * @param concrete_fid
+ *
+ * @return inline u8
+ */
+static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
+ u32 concrete_fid)
+{
+ u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+
+ return pfid;
+}
+
+#define PURE_LB_TC 8
+
+#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
+
+/* Other Linux specific common definitions */
+#define DP_NAME(cdev) ((cdev)->name)
+
+#define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\
+ (cdev->regview) + \
+ (offset))
+
+#define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
+#define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
+#define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset))
+
+#define DOORBELL(cdev, db_addr, val) \
+ writel((u32)val, (void __iomem *)((u8 __iomem *)\
+ (cdev->doorbells) + (db_addr)))
+
+/* Prototypes */
+int qed_fill_dev_info(struct qed_dev *cdev,
+ struct qed_dev_info *dev_info);
+void qed_link_update(struct qed_hwfn *hwfn);
+u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
+ u32 input_len, u8 *input_buf,
+ u32 max_size, u8 *unzip_buf);
+
+#define QED_ETH_INTERFACE_VERSION 300
+
+#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
new file mode 100644
index 000000000000..7ccdb46c6764
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -0,0 +1,847 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+/* Max number of connection types in HW (DQ/CDU etc.) */
+#define MAX_CONN_TYPES PROTOCOLID_COMMON
+#define NUM_TASK_TYPES 2
+#define NUM_TASK_PF_SEGMENTS 4
+
+/* QM constants */
+#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
+
+/* Doorbell-Queue constants */
+#define DQ_RANGE_SHIFT 4
+#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
+
+/* ILT constants */
+#define ILT_DEFAULT_HW_P_SIZE 3
+#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
+#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
+
+/* ILT entry structure */
+#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
+#define ILT_ENTRY_PHY_ADDR_SHIFT 0
+#define ILT_ENTRY_VALID_MASK 0x1ULL
+#define ILT_ENTRY_VALID_SHIFT 52
+#define ILT_ENTRY_IN_REGS 2
+#define ILT_REG_SIZE_IN_BYTES 4
+
+/* connection context union */
+union conn_context {
+ struct core_conn_context core_ctx;
+ struct eth_conn_context eth_ctx;
+};
+
+#define CONN_CXT_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+
+/* PF per protocl configuration object */
+struct qed_conn_type_cfg {
+ u32 cid_count;
+ u32 cid_start;
+};
+
+/* ILT Client configuration, Per connection type (protocol) resources. */
+#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define CDUC_BLK (0)
+
+enum ilt_clients {
+ ILT_CLI_CDUC,
+ ILT_CLI_QM,
+ ILT_CLI_MAX
+};
+
+struct ilt_cfg_pair {
+ u32 reg;
+ u32 val;
+};
+
+struct qed_ilt_cli_blk {
+ u32 total_size; /* 0 means not active */
+ u32 real_size_in_page;
+ u32 start_line;
+};
+
+struct qed_ilt_client_cfg {
+ bool active;
+
+ /* ILT boundaries */
+ struct ilt_cfg_pair first;
+ struct ilt_cfg_pair last;
+ struct ilt_cfg_pair p_size;
+
+ /* ILT client blocks for PF */
+ struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
+ u32 pf_total_lines;
+};
+
+/* Per Path -
+ * ILT shadow table
+ * Protocol acquired CID lists
+ * PF start line in ILT
+ */
+struct qed_dma_mem {
+ dma_addr_t p_phys;
+ void *p_virt;
+ size_t size;
+};
+
+struct qed_cid_acquired_map {
+ u32 start_cid;
+ u32 max_count;
+ unsigned long *cid_map;
+};
+
+struct qed_cxt_mngr {
+ /* Per protocl configuration */
+ struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
+
+ /* computed ILT structure */
+ struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
+
+ /* Acquired CIDs */
+ struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
+
+ /* ILT shadow table */
+ struct qed_dma_mem *ilt_shadow;
+ u32 pf_start_line;
+};
+
+static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr)
+{
+ u32 type, pf_cids = 0;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++)
+ pf_cids += p_mngr->conn_cfg[type].cid_count;
+
+ return pf_cids;
+}
+
+static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
+ struct qed_qm_iids *iids)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ int type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++)
+ iids->cids += p_mngr->conn_cfg[type].cid_count;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids);
+}
+
+/* set the iids count per protocol */
+static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 cid_count)
+{
+ struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+ struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
+
+ p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
+}
+
+static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
+ struct qed_ilt_cli_blk *p_blk,
+ u32 start_line, u32 total_size,
+ u32 elem_size)
+{
+ u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+ /* verify thatits called only once for each block */
+ if (p_blk->total_size)
+ return;
+
+ p_blk->total_size = total_size;
+ p_blk->real_size_in_page = 0;
+ if (elem_size)
+ p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
+ p_blk->start_line = start_line;
+}
+
+static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
+ struct qed_ilt_client_cfg *p_cli,
+ struct qed_ilt_cli_blk *p_blk,
+ u32 *p_line, enum ilt_clients client_id)
+{
+ if (!p_blk->total_size)
+ return;
+
+ if (!p_cli->active)
+ p_cli->first.val = *p_line;
+
+ p_cli->active = true;
+ *p_line += DIV_ROUND_UP(p_blk->total_size,
+ p_blk->real_size_in_page);
+ p_cli->last.val = *p_line - 1;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
+ client_id, p_cli->first.val,
+ p_cli->last.val, p_blk->total_size,
+ p_blk->real_size_in_page, p_blk->start_line);
+}
+
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u32 curr_line, total, pf_cids;
+ struct qed_qm_iids qm_iids;
+
+ memset(&qm_iids, 0, sizeof(qm_iids));
+
+ p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
+ p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
+
+ /* CDUC */
+ p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+ curr_line = p_mngr->pf_start_line;
+ p_cli->pf_total_lines = 0;
+
+ /* get the counters for the CDUC and QM clients */
+ pf_cids = qed_cxt_cdu_iids(p_mngr);
+
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+
+ total = pf_cids * CONN_CXT_SIZE(p_hwfn);
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total, CONN_CXT_SIZE(p_hwfn));
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ /* QM */
+ p_cli = &p_mngr->clients[ILT_CLI_QM];
+ p_blk = &p_cli->pf_blks[0];
+
+ qed_cxt_qm_iids(p_hwfn, &qm_iids);
+ total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0,
+ p_hwfn->qm_info.num_pqs, 0);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
+ qm_iids.cids, p_hwfn->qm_info.num_pqs, total);
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total * 0x1000,
+ QM_PQ_ELEMENT_SIZE);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
+ RESC_NUM(p_hwfn, QED_ILT)) {
+ DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
+ curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define for_each_ilt_valid_client(pos, clients) \
+ for (pos = 0; pos < ILT_CLI_MAX; pos++)
+
+/* Total number of ILT lines used by this PF */
+static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
+{
+ u32 size = 0;
+ u32 i;
+
+ for_each_ilt_valid_client(i, ilt_clients) {
+ if (!ilt_clients[i].active)
+ continue;
+ size += (ilt_clients[i].last.val -
+ ilt_clients[i].first.val + 1);
+ }
+
+ return size;
+}
+
+static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 ilt_size, i;
+
+ ilt_size = qed_cxt_ilt_shadow_size(p_cli);
+
+ for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
+ struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
+
+ if (p_dma->p_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_dma->size, p_dma->p_virt,
+ p_dma->p_phys);
+ p_dma->p_virt = NULL;
+ }
+ kfree(p_mngr->ilt_shadow);
+}
+
+static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ilt_cli_blk *p_blk,
+ enum ilt_clients ilt_client,
+ u32 start_line_offset)
+{
+ struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+ u32 lines, line, sz_left;
+
+ if (!p_blk->total_size)
+ return 0;
+
+ sz_left = p_blk->total_size;
+ lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page);
+ line = p_blk->start_line + start_line_offset -
+ p_hwfn->p_cxt_mngr->pf_start_line;
+
+ for (; lines; lines--) {
+ dma_addr_t p_phys;
+ void *p_virt;
+ u32 size;
+
+ size = min_t(u32, sz_left,
+ p_blk->real_size_in_page);
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ &p_phys,
+ GFP_KERNEL);
+ if (!p_virt)
+ return -ENOMEM;
+ memset(p_virt, 0, size);
+
+ ilt_shadow[line].p_phys = p_phys;
+ ilt_shadow[line].p_virt = p_virt;
+ ilt_shadow[line].size = size;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
+ line, (u64)p_phys, p_virt, size);
+
+ sz_left -= size;
+ line++;
+ }
+
+ return 0;
+}
+
+static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_ilt_client_cfg *clients = p_mngr->clients;
+ struct qed_ilt_cli_blk *p_blk;
+ u32 size, i, j;
+ int rc;
+
+ size = qed_cxt_ilt_shadow_size(clients);
+ p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
+ GFP_KERNEL);
+ if (!p_mngr->ilt_shadow) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
+ rc = -ENOMEM;
+ goto ilt_shadow_fail;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "Allocated 0x%x bytes for ilt shadow\n",
+ (u32)(size * sizeof(struct qed_dma_mem)));
+
+ for_each_ilt_valid_client(i, clients) {
+ if (!clients[i].active)
+ continue;
+ for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
+ p_blk = &clients[i].pf_blks[j];
+ rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
+ if (rc != 0)
+ goto ilt_shadow_fail;
+ }
+ }
+
+ return 0;
+
+ilt_shadow_fail:
+ qed_ilt_shadow_free(p_hwfn);
+ return rc;
+}
+
+static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ kfree(p_mngr->acquired[type].cid_map);
+ p_mngr->acquired[type].max_count = 0;
+ p_mngr->acquired[type].start_cid = 0;
+ }
+}
+
+static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 start_cid = 0;
+ u32 type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+ u32 size;
+
+ if (cid_cnt == 0)
+ continue;
+
+ size = DIV_ROUND_UP(cid_cnt,
+ sizeof(unsigned long) * BITS_PER_BYTE) *
+ sizeof(unsigned long);
+ p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
+ if (!p_mngr->acquired[type].cid_map)
+ goto cid_map_fail;
+
+ p_mngr->acquired[type].max_count = cid_cnt;
+ p_mngr->acquired[type].start_cid = start_cid;
+
+ p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+ "Type %08x start: %08x count %08x\n",
+ type, p_mngr->acquired[type].start_cid,
+ p_mngr->acquired[type].max_count);
+ start_cid += cid_cnt;
+ }
+
+ return 0;
+
+cid_map_fail:
+ qed_cid_map_free(p_hwfn);
+ return -ENOMEM;
+}
+
+int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr;
+ u32 i;
+
+ p_mngr = kzalloc(sizeof(*p_mngr), GFP_ATOMIC);
+ if (!p_mngr) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize ILT client registers */
+ p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
+ p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
+ p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+
+ p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
+ p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
+ p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
+
+ /* default ILT page size for all clients is 32K */
+ for (i = 0; i < ILT_CLI_MAX; i++)
+ p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+
+ /* Set the cxt mangr pointer priori to further allocations */
+ p_hwfn->p_cxt_mngr = p_mngr;
+
+ return 0;
+}
+
+int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
+{
+ int rc;
+
+ /* Allocate the ILT shadow table */
+ rc = qed_ilt_shadow_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
+ goto tables_alloc_fail;
+ }
+
+ /* Allocate and initialize the acquired cids bitmaps */
+ rc = qed_cid_map_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
+ goto tables_alloc_fail;
+ }
+
+ return 0;
+
+tables_alloc_fail:
+ qed_cxt_mngr_free(p_hwfn);
+ return rc;
+}
+
+void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_cxt_mngr)
+ return;
+
+ qed_cid_map_free(p_hwfn);
+ qed_ilt_shadow_free(p_hwfn);
+ kfree(p_hwfn->p_cxt_mngr);
+
+ p_hwfn->p_cxt_mngr = NULL;
+}
+
+void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ int type;
+
+ /* Reset acquired cids */
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+
+ if (cid_cnt == 0)
+ continue;
+
+ memset(p_mngr->acquired[type].cid_map, 0,
+ DIV_ROUND_UP(cid_cnt,
+ sizeof(unsigned long) * BITS_PER_BYTE) *
+ sizeof(unsigned long));
+ }
+}
+
+/* CDU Common */
+#define CDUC_CXT_SIZE_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
+
+#define CDUC_CXT_SIZE_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
+
+#define CDUC_BLOCK_WASTE_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
+
+#define CDUC_BLOCK_WASTE_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
+
+#define CDUC_NCIB_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
+
+#define CDUC_NCIB_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
+
+static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
+{
+ u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
+
+ /* CDUC - connection configuration */
+ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+ cxt_size = CONN_CXT_SIZE(p_hwfn);
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
+ SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
+ SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
+}
+
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_pf_rt_init_params params;
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct qed_qm_iids iids;
+
+ memset(&iids, 0, sizeof(iids));
+ qed_cxt_qm_iids(p_hwfn, &iids);
+
+ memset(&params, 0, sizeof(params));
+ params.port_id = p_hwfn->port_id;
+ params.pf_id = p_hwfn->rel_pf_id;
+ params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+ params.is_first_pf = p_hwfn->first_on_engine;
+ params.num_pf_cids = iids.cids;
+ params.start_pq = qm_info->start_pq;
+ params.num_pf_pqs = qm_info->num_pqs;
+ params.start_vport = qm_info->num_vports;
+ params.pf_wfq = qm_info->pf_wfq;
+ params.pf_rl = qm_info->pf_rl;
+ params.pq_params = qm_info->qm_pq_params;
+ params.vport_params = qm_info->qm_vport_params;
+
+ qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
+}
+
+/* CM PF */
+static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
+{
+ union qed_qm_pq_params pq_params;
+ u16 pq;
+
+ /* XCM pure-LB queue */
+ memset(&pq_params, 0, sizeof(pq_params));
+ pq_params.core.tc = LB_TC;
+ pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
+
+ return 0;
+}
+
+/* DQ PF */
+static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 dq_pf_max_cid = 0;
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
+
+ /* 5 - PF */
+ dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+}
+
+static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *ilt_clients;
+ int i;
+
+ ilt_clients = p_hwfn->p_cxt_mngr->clients;
+ for_each_ilt_valid_client(i, ilt_clients) {
+ if (!ilt_clients[i].active)
+ continue;
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].first.reg,
+ ilt_clients[i].first.val);
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].last.reg,
+ ilt_clients[i].last.val);
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].p_size.reg,
+ ilt_clients[i].p_size.val);
+ }
+}
+
+/* ILT (PSWRQ2) PF */
+static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *clients;
+ struct qed_cxt_mngr *p_mngr;
+ struct qed_dma_mem *p_shdw;
+ u32 line, rt_offst, i;
+
+ qed_ilt_bounds_init(p_hwfn);
+
+ p_mngr = p_hwfn->p_cxt_mngr;
+ p_shdw = p_mngr->ilt_shadow;
+ clients = p_hwfn->p_cxt_mngr->clients;
+
+ for_each_ilt_valid_client(i, clients) {
+ if (!clients[i].active)
+ continue;
+
+ /** Client's 1st val and RT array are absolute, ILT shadows'
+ * lines are relative.
+ */
+ line = clients[i].first.val - p_mngr->pf_start_line;
+ rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
+ clients[i].first.val * ILT_ENTRY_IN_REGS;
+
+ for (; line <= clients[i].last.val - p_mngr->pf_start_line;
+ line++, rt_offst += ILT_ENTRY_IN_REGS) {
+ u64 ilt_hw_entry = 0;
+
+ /** p_virt could be NULL incase of dynamic
+ * allocation
+ */
+ if (p_shdw[line].p_virt) {
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
+ (p_shdw[line].p_phys >> 12));
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
+ rt_offst, line, i,
+ (u64)(p_shdw[line].p_phys >> 12));
+ }
+
+ STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
+ }
+ }
+}
+
+void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
+{
+ qed_cdu_init_common(p_hwfn);
+}
+
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
+{
+ qed_qm_init_pf(p_hwfn);
+ qed_cm_init_pf(p_hwfn);
+ qed_dq_init_pf(p_hwfn);
+ qed_ilt_init_pf(p_hwfn);
+}
+
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 rel_cid;
+
+ if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
+ DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
+ return -EINVAL;
+ }
+
+ rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
+ p_mngr->acquired[type].max_count);
+
+ if (rel_cid >= p_mngr->acquired[type].max_count) {
+ DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
+ type);
+ return -EINVAL;
+ }
+
+ __set_bit(rel_cid, p_mngr->acquired[type].cid_map);
+
+ *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
+
+ return 0;
+}
+
+static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
+ u32 cid,
+ enum protocol_type *p_type)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_cid_acquired_map *p_map;
+ enum protocol_type p;
+ u32 rel_cid;
+
+ /* Iterate over protocols and find matching cid range */
+ for (p = 0; p < MAX_CONN_TYPES; p++) {
+ p_map = &p_mngr->acquired[p];
+
+ if (!p_map->cid_map)
+ continue;
+ if (cid >= p_map->start_cid &&
+ cid < p_map->start_cid + p_map->max_count)
+ break;
+ }
+ *p_type = p;
+
+ if (p == MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
+ return false;
+ }
+
+ rel_cid = cid - p_map->start_cid;
+ if (!test_bit(rel_cid, p_map->cid_map)) {
+ DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
+ return false;
+ }
+ return true;
+}
+
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
+ u32 cid)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ enum protocol_type type;
+ bool b_acquired;
+ u32 rel_cid;
+
+ /* Test acquired and find matching per-protocol map */
+ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
+
+ if (!b_acquired)
+ return;
+
+ rel_cid = cid - p_mngr->acquired[type].start_cid;
+ __clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
+}
+
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
+ struct qed_cxt_info *p_info)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
+ enum protocol_type type;
+ bool b_acquired;
+
+ /* Test acquired and find matching per-protocol map */
+ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
+
+ if (!b_acquired)
+ return -EINVAL;
+
+ /* set the protocl type */
+ p_info->type = type;
+
+ /* compute context virtual pointer */
+ hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+
+ conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
+ cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
+ line = p_info->iid / cxts_per_p;
+
+ /* Make sure context is allocated (dynamic allocation) */
+ if (!p_mngr->ilt_shadow[line].p_virt)
+ return -EINVAL;
+
+ p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
+ p_info->iid % cxts_per_p * conn_cxt_size;
+
+ DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
+ "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
+ p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
+
+ return 0;
+}
+
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params;
+
+ /* Set the number of required CORE connections */
+ u32 core_cids = 1; /* SPQ */
+
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids);
+
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
new file mode 100644
index 000000000000..c8e1f5e5c42b
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -0,0 +1,139 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_CXT_H
+#define _QED_CXT_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+#include "qed_hsi.h"
+#include "qed.h"
+
+struct qed_cxt_info {
+ void *p_cxt;
+ u32 iid;
+ enum protocol_type type;
+};
+
+/**
+ * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ *
+ * @return int
+ */
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid);
+
+/**
+ * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
+ *
+ *
+ * @param p_hwfn
+ * @param p_info in/out
+ *
+ * @return int
+ */
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
+ struct qed_cxt_info *p_info);
+
+enum qed_cxt_elem_type {
+ QED_ELEM_CXT,
+ QED_ELEM_TASK
+};
+
+/**
+ * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_free
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_setup - Reset the acquired CIDs
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
+ *
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
+ *
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_qm_init_pf - Initailze the QM PF phase, per path
+ *
+ * @param p_hwfn
+ */
+
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_release - Release a cid
+ *
+ * @param p_hwfn
+ * @param cid
+ */
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
+ u32 cid);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
new file mode 100644
index 000000000000..b9b7b7e6fa53
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -0,0 +1,1797 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/etherdevice.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+/* API common to all protocols */
+void qed_init_dp(struct qed_dev *cdev,
+ u32 dp_module, u8 dp_level)
+{
+ u32 i;
+
+ cdev->dp_level = dp_level;
+ cdev->dp_module = dp_module;
+ for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->dp_level = dp_level;
+ p_hwfn->dp_module = dp_module;
+ }
+}
+
+void qed_init_struct(struct qed_dev *cdev)
+{
+ u8 i;
+
+ for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->cdev = cdev;
+ p_hwfn->my_id = i;
+ p_hwfn->b_active = false;
+
+ mutex_init(&p_hwfn->dmae_info.mutex);
+ }
+
+ /* hwfn 0 is always active */
+ cdev->hwfns[0].b_active = true;
+
+ /* set the default cache alignment to 128 */
+ cdev->cache_shift = 7;
+}
+
+static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ kfree(qm_info->qm_pq_params);
+ qm_info->qm_pq_params = NULL;
+ kfree(qm_info->qm_vport_params);
+ qm_info->qm_vport_params = NULL;
+ kfree(qm_info->qm_port_params);
+ qm_info->qm_port_params = NULL;
+}
+
+void qed_resc_free(struct qed_dev *cdev)
+{
+ int i;
+
+ kfree(cdev->fw_data);
+ cdev->fw_data = NULL;
+
+ kfree(cdev->reset_stats);
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ kfree(p_hwfn->p_tx_cids);
+ p_hwfn->p_tx_cids = NULL;
+ kfree(p_hwfn->p_rx_cids);
+ p_hwfn->p_rx_cids = NULL;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ qed_cxt_mngr_free(p_hwfn);
+ qed_qm_info_free(p_hwfn);
+ qed_spq_free(p_hwfn);
+ qed_eq_free(p_hwfn, p_hwfn->p_eq);
+ qed_consq_free(p_hwfn, p_hwfn->p_consq);
+ qed_int_free(p_hwfn);
+ qed_dmae_info_free(p_hwfn);
+ }
+}
+
+static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct init_qm_port_params *p_qm_port;
+ u8 num_vports, i, vport_id, num_ports;
+ u16 num_pqs, multi_cos_tcs = 1;
+
+ memset(qm_info, 0, sizeof(*qm_info));
+
+ num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
+ num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
+
+ /* Sanity checking that setup requires legal number of resources */
+ if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
+ DP_ERR(p_hwfn,
+ "Need too many Physical queues - 0x%04x when only %04x are available\n",
+ num_pqs, RESC_NUM(p_hwfn, QED_PQ));
+ return -EINVAL;
+ }
+
+ /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
+ */
+ qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
+ num_pqs, GFP_ATOMIC);
+ if (!qm_info->qm_pq_params)
+ goto alloc_err;
+
+ qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
+ num_vports, GFP_ATOMIC);
+ if (!qm_info->qm_vport_params)
+ goto alloc_err;
+
+ qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
+ MAX_NUM_PORTS, GFP_ATOMIC);
+ if (!qm_info->qm_port_params)
+ goto alloc_err;
+
+ vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+
+ /* First init per-TC PQs */
+ for (i = 0; i < multi_cos_tcs; i++) {
+ struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
+
+ params->vport_id = vport_id;
+ params->tc_id = p_hwfn->hw_info.non_offload_tc;
+ params->wrr_group = 1;
+ }
+
+ /* Then init pure-LB PQ */
+ qm_info->pure_lb_pq = i;
+ qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+ qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
+ qm_info->qm_pq_params[i].wrr_group = 1;
+ i++;
+
+ qm_info->offload_pq = 0;
+ qm_info->num_pqs = num_pqs;
+ qm_info->num_vports = num_vports;
+
+ /* Initialize qm port parameters */
+ num_ports = p_hwfn->cdev->num_ports_in_engines;
+ for (i = 0; i < num_ports; i++) {
+ p_qm_port = &qm_info->qm_port_params[i];
+ p_qm_port->active = 1;
+ p_qm_port->num_active_phys_tcs = 4;
+ p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
+ p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
+ }
+
+ qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+
+ qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
+
+ qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
+
+ qm_info->pf_wfq = 0;
+ qm_info->pf_rl = 0;
+ qm_info->vport_rl_en = 1;
+
+ return 0;
+
+alloc_err:
+ DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
+ kfree(qm_info->qm_pq_params);
+ kfree(qm_info->qm_vport_params);
+ kfree(qm_info->qm_port_params);
+
+ return -ENOMEM;
+}
+
+int qed_resc_alloc(struct qed_dev *cdev)
+{
+ struct qed_consq *p_consq;
+ struct qed_eq *p_eq;
+ int i, rc = 0;
+
+ cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
+ if (!cdev->fw_data)
+ return -ENOMEM;
+
+ /* Allocate Memory for the Queue->CID mapping */
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ int tx_size = sizeof(struct qed_hw_cid_data) *
+ RESC_NUM(p_hwfn, QED_L2_QUEUE);
+ int rx_size = sizeof(struct qed_hw_cid_data) *
+ RESC_NUM(p_hwfn, QED_L2_QUEUE);
+
+ p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
+ if (!p_hwfn->p_tx_cids) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate memory for Tx Cids\n");
+ goto alloc_err;
+ }
+
+ p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
+ if (!p_hwfn->p_rx_cids) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate memory for Rx Cids\n");
+ goto alloc_err;
+ }
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ /* First allocate the context manager structure */
+ rc = qed_cxt_mngr_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* Set the HW cid/tid numbers (in the contest manager)
+ * Must be done prior to any further computations.
+ */
+ rc = qed_cxt_set_pf_params(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* Prepare and process QM requirements */
+ rc = qed_init_qm_info(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* Compute the ILT client partition */
+ rc = qed_cxt_cfg_ilt_compute(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* CID map / ILT shadow table / T2
+ * The talbes sizes are determined by the computations above
+ */
+ rc = qed_cxt_tables_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* SPQ, must follow ILT because initializes SPQ context */
+ rc = qed_spq_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* SP status block allocation */
+ p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
+ RESERVED_PTT_DPC);
+
+ rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc)
+ goto alloc_err;
+
+ /* EQ */
+ p_eq = qed_eq_alloc(p_hwfn, 256);
+
+ if (!p_eq)
+ goto alloc_err;
+ p_hwfn->p_eq = p_eq;
+
+ p_consq = qed_consq_alloc(p_hwfn);
+ if (!p_consq)
+ goto alloc_err;
+ p_hwfn->p_consq = p_consq;
+
+ /* DMA info initialization */
+ rc = qed_dmae_info_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate memory for dmae_info structure\n");
+ goto alloc_err;
+ }
+ }
+
+ cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
+ if (!cdev->reset_stats) {
+ DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
+ goto alloc_err;
+ }
+
+ return 0;
+
+alloc_err:
+ qed_resc_free(cdev);
+ return rc;
+}
+
+void qed_resc_setup(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ qed_cxt_mngr_setup(p_hwfn);
+ qed_spq_setup(p_hwfn);
+ qed_eq_setup(p_hwfn, p_hwfn->p_eq);
+ qed_consq_setup(p_hwfn, p_hwfn->p_consq);
+
+ /* Read shadow of current MFW mailbox */
+ qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
+ memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
+ p_hwfn->mcp_info->mfw_mb_cur,
+ p_hwfn->mcp_info->mfw_mb_length);
+
+ qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+ }
+}
+
+#define FINAL_CLEANUP_CMD_OFFSET (0)
+#define FINAL_CLEANUP_CMD (0x1)
+#define FINAL_CLEANUP_VALID_OFFSET (6)
+#define FINAL_CLEANUP_VFPF_ID_SHIFT (7)
+#define FINAL_CLEANUP_COMP (0x2)
+#define FINAL_CLEANUP_POLL_CNT (100)
+#define FINAL_CLEANUP_POLL_TIME (10)
+int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 id)
+{
+ u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
+ int rc = -EBUSY;
+
+ addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET;
+
+ command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET;
+ command |= 1 << FINAL_CLEANUP_VALID_OFFSET;
+ command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT;
+ command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT;
+
+ /* Make sure notification is not set before initiating final cleanup */
+ if (REG_RD(p_hwfn, addr)) {
+ DP_NOTICE(
+ p_hwfn,
+ "Unexpected; Found final cleanup notification before initiating final cleanup\n");
+ REG_WR(p_hwfn, addr, 0);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Sending final cleanup for PFVF[%d] [Command %08x\n]",
+ id, command);
+
+ qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
+
+ /* Poll until completion */
+ while (!REG_RD(p_hwfn, addr) && count--)
+ msleep(FINAL_CLEANUP_POLL_TIME);
+
+ if (REG_RD(p_hwfn, addr))
+ rc = 0;
+ else
+ DP_NOTICE(p_hwfn,
+ "Failed to receive FW final cleanup notification\n");
+
+ /* Cleanup afterwards */
+ REG_WR(p_hwfn, addr, 0);
+
+ return rc;
+}
+
+static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
+{
+ int hw_mode = 0;
+
+ hw_mode = (1 << MODE_BB_A0);
+
+ switch (p_hwfn->cdev->num_ports_in_engines) {
+ case 1:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
+ break;
+ case 2:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
+ break;
+ case 4:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
+ p_hwfn->cdev->num_ports_in_engines);
+ return;
+ }
+
+ switch (p_hwfn->cdev->mf_mode) {
+ case SF:
+ hw_mode |= 1 << MODE_SF;
+ break;
+ case MF_OVLAN:
+ hw_mode |= 1 << MODE_MF_SD;
+ break;
+ case MF_NPAR:
+ hw_mode |= 1 << MODE_MF_SI;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n");
+ hw_mode |= 1 << MODE_SF;
+ }
+
+ hw_mode |= 1 << MODE_ASIC;
+
+ p_hwfn->hw_info.hw_mode = hw_mode;
+}
+
+/* Init run time data for all PFs on an engine. */
+static void qed_init_cau_rt_data(struct qed_dev *cdev)
+{
+ u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
+ int i, sb_id;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_igu_info *p_igu_info;
+ struct qed_igu_block *p_block;
+ struct cau_sb_entry sb_entry;
+
+ p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+ for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
+ sb_id++) {
+ p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+ if (!p_block->is_pf)
+ continue;
+
+ qed_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_block->function_id,
+ 0, 0);
+ STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
+ sb_entry);
+ }
+ }
+}
+
+static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int hw_mode)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct qed_qm_common_rt_init_params params;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ int rc = 0;
+
+ qed_init_cau_rt_data(cdev);
+
+ /* Program GTT windows */
+ qed_gtt_init(p_hwfn);
+
+ if (p_hwfn->mcp_info) {
+ if (p_hwfn->mcp_info->func_info.bandwidth_max)
+ qm_info->pf_rl_en = 1;
+ if (p_hwfn->mcp_info->func_info.bandwidth_min)
+ qm_info->pf_wfq_en = 1;
+ }
+
+ memset(&params, 0, sizeof(params));
+ params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
+ params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+ params.pf_rl_en = qm_info->pf_rl_en;
+ params.pf_wfq_en = qm_info->pf_wfq_en;
+ params.vport_rl_en = qm_info->vport_rl_en;
+ params.vport_wfq_en = qm_info->vport_wfq_en;
+ params.port_params = qm_info->qm_port_params;
+
+ qed_qm_common_rt_init(p_hwfn, &params);
+
+ qed_cxt_hw_init_common(p_hwfn);
+
+ /* Close gate from NIG to BRB/Storm; By default they are open, but
+ * we close them to prevent NIG from passing data to reset blocks.
+ * Should have been done in the ENGINE phase, but init-tool lacks
+ * proper port-pretend capabilities.
+ */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+ qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+ qed_port_unpretend(p_hwfn, p_ptt);
+
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
+ if (rc != 0)
+ return rc;
+
+ qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
+
+ /* Disable relaxed ordering in the PCI config space */
+ qed_wr(p_hwfn, p_ptt, 0x20b4,
+ qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
+
+ return rc;
+}
+
+static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int hw_mode)
+{
+ int rc = 0;
+
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
+ hw_mode);
+ return rc;
+}
+
+static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int hw_mode,
+ bool b_hw_start,
+ enum qed_int_mode int_mode,
+ bool allow_npar_tx_switch)
+{
+ u8 rel_pf_id = p_hwfn->rel_pf_id;
+ int rc = 0;
+
+ if (p_hwfn->mcp_info) {
+ struct qed_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+ if (p_info->bandwidth_min)
+ p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
+
+ /* Update rate limit once we'll actually have a link */
+ p_hwfn->qm_info.pf_rl = 100;
+ }
+
+ qed_cxt_hw_init_pf(p_hwfn);
+
+ qed_int_igu_init_rt(p_hwfn);
+
+ /* Set VLAN in NIG if needed */
+ if (hw_mode & (1 << MODE_MF_SD)) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
+ p_hwfn->hw_info.ovlan);
+ }
+
+ /* Enable classification by MAC if needed */
+ if (hw_mode & MODE_MF_SI) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Configuring TAGMAC_CLS_TYPE\n");
+ STORE_RT_REG(p_hwfn,
+ NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
+ }
+
+ /* Protocl Configuration */
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
+
+ /* Cleanup chip from previous driver if such remains exist */
+ rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
+ if (rc != 0)
+ return rc;
+
+ /* PF Init sequence */
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
+ if (rc)
+ return rc;
+
+ /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
+ if (rc)
+ return rc;
+
+ /* Pure runtime initializations - directly to the HW */
+ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
+
+ if (b_hw_start) {
+ /* enable interrupts */
+ qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
+
+ /* send function start command */
+ rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
+ }
+ return rc;
+}
+
+static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 enable)
+{
+ u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
+
+ /* Change PF in PXP */
+ qed_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
+
+ /* wait until value is set - try for 1 second every 50us */
+ for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
+ val = qed_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+ if (val == set_val)
+ break;
+
+ usleep_range(50, 60);
+ }
+
+ if (val != set_val) {
+ DP_NOTICE(p_hwfn,
+ "PFID_ENABLE_MASTER wasn't changed after a second\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_main_ptt)
+{
+ /* Read shadow of current MFW mailbox */
+ qed_mcp_read_mb(p_hwfn, p_main_ptt);
+ memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
+ p_hwfn->mcp_info->mfw_mb_cur,
+ p_hwfn->mcp_info->mfw_mb_length);
+}
+
+int qed_hw_init(struct qed_dev *cdev,
+ bool b_hw_start,
+ enum qed_int_mode int_mode,
+ bool allow_npar_tx_switch,
+ const u8 *bin_fw_data)
+{
+ struct qed_storm_stats *p_stat;
+ u32 load_code, param, *p_address;
+ int rc, mfw_rc, i;
+ u8 fw_vport = 0;
+
+ rc = qed_init_fw_data(cdev, bin_fw_data);
+ if (rc != 0)
+ return rc;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_fw_vport(p_hwfn, 0, &fw_vport);
+ if (rc != 0)
+ return rc;
+
+ /* Enable DMAE in PXP */
+ rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
+
+ qed_calc_hw_mode(p_hwfn);
+
+ rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+ &load_code);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
+ return rc;
+ }
+
+ qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
+ rc, load_code);
+
+ p_hwfn->first_on_engine = (load_code ==
+ FW_MSG_CODE_DRV_LOAD_ENGINE);
+
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_ENGINE:
+ rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode);
+ if (rc)
+ break;
+ /* Fall into */
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode);
+ if (rc)
+ break;
+
+ /* Fall into */
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode,
+ b_hw_start, int_mode,
+ allow_npar_tx_switch);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "init phase failed for loadcode 0x%x (rc %d)\n",
+ load_code, rc);
+
+ /* ACK mfw regardless of success or failure of initialization */
+ mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_LOAD_DONE,
+ 0, &load_code, &param);
+ if (rc)
+ return rc;
+ if (mfw_rc) {
+ DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
+ return mfw_rc;
+ }
+
+ p_hwfn->hw_init_done = true;
+
+ /* init PF stats */
+ p_stat = &p_hwfn->storm_stats;
+ p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_QUEUE_STAT_OFFSET(fw_vport);
+ p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+
+ p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM +
+ USTORM_QUEUE_STAT_OFFSET(fw_vport);
+ p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+
+ p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_QUEUE_STAT_OFFSET(fw_vport);
+ p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+
+ p_address = &p_stat->tstats.address;
+ *p_address = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+ p_stat->tstats.len = sizeof(struct tstorm_per_port_stat);
+ }
+
+ return 0;
+}
+
+#define QED_HW_STOP_RETRY_LIMIT (10)
+int qed_hw_stop(struct qed_dev *cdev)
+{
+ int rc = 0, t_rc;
+ int i, j;
+
+ for_each_hwfn(cdev, j) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
+
+ /* mark the hw as uninitialized... */
+ p_hwfn->hw_init_done = false;
+
+ rc = qed_sp_pf_stop(p_hwfn);
+ if (rc)
+ return rc;
+
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+ for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
+ if ((!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+ (!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK)))
+ break;
+
+ usleep_range(1000, 2000);
+ }
+ if (i == QED_HW_STOP_RETRY_LIMIT)
+ DP_NOTICE(p_hwfn,
+ "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+ (u8)qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK));
+
+ /* Disable Attention Generation */
+ qed_int_igu_disable_int(p_hwfn, p_ptt);
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
+ qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
+
+ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
+
+ /* Need to wait 1ms to guarantee SBs are cleared */
+ usleep_range(1000, 2000);
+ }
+
+ /* Disable DMAE in PXP - in CMT, this should only be done for
+ * first hw-function, and only after all transactions have
+ * stopped for all active hw-functions.
+ */
+ t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
+ cdev->hwfns[0].p_main_ptt,
+ false);
+ if (t_rc != 0)
+ rc = t_rc;
+
+ return rc;
+}
+
+void qed_hw_stop_fastpath(struct qed_dev *cdev)
+{
+ int i, j;
+
+ for_each_hwfn(cdev, j) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_IFDOWN,
+ "Shutting down the fastpath\n");
+
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+ for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
+ if ((!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+ (!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK)))
+ break;
+
+ usleep_range(1000, 2000);
+ }
+ if (i == QED_HW_STOP_RETRY_LIMIT)
+ DP_NOTICE(p_hwfn,
+ "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+ (u8)qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK));
+
+ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
+
+ /* Need to wait 1ms to guarantee SBs are cleared */
+ usleep_range(1000, 2000);
+ }
+}
+
+void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
+{
+ /* Re-open incoming traffic */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+}
+
+static int qed_reg_assert(struct qed_hwfn *hwfn,
+ struct qed_ptt *ptt, u32 reg,
+ bool expected)
+{
+ u32 assert_val = qed_rd(hwfn, ptt, reg);
+
+ if (assert_val != expected) {
+ DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
+ reg, expected);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int qed_hw_reset(struct qed_dev *cdev)
+{
+ int rc = 0;
+ u32 unload_resp, unload_param;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
+
+ /* Check for incorrect states */
+ qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+ QM_REG_USG_CNT_PF_TX, 0);
+ qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+ QM_REG_USG_CNT_PF_OTHER, 0);
+
+ /* Disable PF in HW blocks */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ TCFC_REG_STRONG_ENABLE_PF, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ CCFC_REG_STRONG_ENABLE_PF, 0);
+
+ /* Send unload command to MCP */
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_UNLOAD_REQ,
+ DRV_MB_PARAM_UNLOAD_WOL_MCP,
+ &unload_resp, &unload_param);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
+ unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
+ }
+
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_UNLOAD_DONE,
+ 0, &unload_resp, &unload_param);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
+static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
+{
+ qed_ptt_pool_free(p_hwfn);
+ kfree(p_hwfn->hw_info.p_igu_info);
+}
+
+/* Setup bar access */
+static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
+{
+ int rc;
+
+ /* Allocate PTT pool */
+ rc = qed_ptt_pool_alloc(p_hwfn);
+ if (rc)
+ return rc;
+
+ /* Allocate the main PTT */
+ p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
+
+ /* clear indirect access */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+
+ /* Clean Previous errors if such exist */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+ 1 << p_hwfn->abs_pf_id);
+
+ /* enable internal target-read */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+ return 0;
+}
+
+static void get_function_id(struct qed_hwfn *p_hwfn)
+{
+ /* ME Register */
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
+
+ p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
+
+ p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
+ p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PFID);
+ p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PORT);
+}
+
+static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
+{
+ u32 *feat_num = p_hwfn->hw_info.feat_num;
+ int num_features = 1;
+
+ feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
+ num_features,
+ RESC_NUM(p_hwfn, QED_L2_QUEUE));
+ DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+ "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
+ feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
+ num_features);
+}
+
+static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+{
+ u32 *resc_start = p_hwfn->hw_info.resc_start;
+ u32 *resc_num = p_hwfn->hw_info.resc_num;
+ int num_funcs, i;
+
+ num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB
+ : p_hwfn->cdev->num_ports_in_engines;
+
+ resc_num[QED_SB] = min_t(u32,
+ (MAX_SB_PER_PATH_BB / num_funcs),
+ qed_int_get_num_sbs(p_hwfn, NULL));
+ resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
+ resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
+ resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
+ resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
+ resc_num[QED_RL] = 8;
+ resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
+ resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
+ num_funcs;
+ resc_num[QED_ILT] = 950;
+
+ for (i = 0; i < QED_MAX_RESC; i++)
+ resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
+
+ qed_hw_set_feat(p_hwfn);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+ "The numbers for each resource are:\n"
+ "SB = %d start = %d\n"
+ "L2_QUEUE = %d start = %d\n"
+ "VPORT = %d start = %d\n"
+ "PQ = %d start = %d\n"
+ "RL = %d start = %d\n"
+ "MAC = %d start = %d\n"
+ "VLAN = %d start = %d\n"
+ "ILT = %d start = %d\n",
+ p_hwfn->hw_info.resc_num[QED_SB],
+ p_hwfn->hw_info.resc_start[QED_SB],
+ p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
+ p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
+ p_hwfn->hw_info.resc_num[QED_VPORT],
+ p_hwfn->hw_info.resc_start[QED_VPORT],
+ p_hwfn->hw_info.resc_num[QED_PQ],
+ p_hwfn->hw_info.resc_start[QED_PQ],
+ p_hwfn->hw_info.resc_num[QED_RL],
+ p_hwfn->hw_info.resc_start[QED_RL],
+ p_hwfn->hw_info.resc_num[QED_MAC],
+ p_hwfn->hw_info.resc_start[QED_MAC],
+ p_hwfn->hw_info.resc_num[QED_VLAN],
+ p_hwfn->hw_info.resc_start[QED_VLAN],
+ p_hwfn->hw_info.resc_num[QED_ILT],
+ p_hwfn->hw_info.resc_start[QED_ILT]);
+}
+
+static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
+ u32 port_cfg_addr, link_temp, val, nvm_cfg_addr;
+ struct qed_mcp_link_params *link;
+
+ /* Read global nvm_cfg address */
+ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+
+ /* Verify MCP has initialized it */
+ if (!nvm_cfg_addr) {
+ DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
+ return -EINVAL;
+ }
+
+ /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
+ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+ /* Read Vendor Id / Device Id */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, pci_id);
+ p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) &
+ NVM_CFG1_GLOB_VENDOR_ID_MASK;
+
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, core_cfg);
+
+ core_cfg = qed_rd(p_hwfn, p_ptt, addr);
+
+ switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
+ NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
+ core_cfg);
+ break;
+ }
+
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) +
+ offsetof(struct nvm_cfg1_func, device_id);
+ val = qed_rd(p_hwfn, p_ptt, addr);
+
+ if (IS_MF(p_hwfn)) {
+ p_hwfn->hw_info.device_id =
+ (val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >>
+ NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET;
+ } else {
+ p_hwfn->hw_info.device_id =
+ (val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >>
+ NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET;
+ }
+
+ /* Read default link configuration */
+ link = &p_hwfn->mcp_info->link_input;
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ link_temp = qed_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port, speed_cap_mask));
+ link->speed.advertised_speeds =
+ link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+
+ p_hwfn->mcp_info->link_capabilities.speed_capabilities =
+ link->speed.advertised_speeds;
+
+ link_temp = qed_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port, link_settings));
+ switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
+ NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
+ link->speed.autoneg = true;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
+ link->speed.forced_speed = 1000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
+ link->speed.forced_speed = 10000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
+ link->speed.forced_speed = 25000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
+ link->speed.forced_speed = 40000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
+ link->speed.forced_speed = 50000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
+ link->speed.forced_speed = 100000;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
+ link_temp);
+ }
+
+ link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
+ link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
+ link->pause.autoneg = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
+ link->pause.forced_rx = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
+ link->pause.forced_tx = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
+ link->loopback_mode = 0;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
+ link->speed.forced_speed, link->speed.advertised_speeds,
+ link->speed.autoneg, link->pause.autoneg);
+
+ /* Read Multi-function information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, generic_cont0);
+
+ generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
+
+ mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
+ NVM_CFG1_GLOB_MF_MODE_OFFSET;
+
+ switch (mf_mode) {
+ case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ p_hwfn->cdev->mf_mode = MF_OVLAN;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+ p_hwfn->cdev->mf_mode = MF_NPAR;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_FORCED_SF:
+ p_hwfn->cdev->mf_mode = SF;
+ break;
+ }
+ DP_INFO(p_hwfn, "Multi function mode is %08x\n",
+ p_hwfn->cdev->mf_mode);
+
+ return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+}
+
+static int
+qed_get_hw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_pci_personality personality)
+{
+ u32 port_mode;
+ int rc;
+
+ /* Read the port mode */
+ port_mode = qed_rd(p_hwfn, p_ptt,
+ CNIG_REG_NW_PORT_MODE_BB_B0);
+
+ if (port_mode < 3) {
+ p_hwfn->cdev->num_ports_in_engines = 1;
+ } else if (port_mode <= 5) {
+ p_hwfn->cdev->num_ports_in_engines = 2;
+ } else {
+ DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
+ p_hwfn->cdev->num_ports_in_engines);
+
+ /* Default num_ports_in_engines to something */
+ p_hwfn->cdev->num_ports_in_engines = 1;
+ }
+
+ qed_hw_get_nvm_info(p_hwfn, p_ptt);
+
+ rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
+ if (qed_mcp_is_init(p_hwfn))
+ ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
+ p_hwfn->mcp_info->func_info.mac);
+ else
+ eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
+
+ if (qed_mcp_is_init(p_hwfn)) {
+ if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
+ p_hwfn->hw_info.ovlan =
+ p_hwfn->mcp_info->func_info.ovlan;
+
+ qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+ }
+
+ if (qed_mcp_is_init(p_hwfn)) {
+ enum qed_pci_personality protocol;
+
+ protocol = p_hwfn->mcp_info->func_info.protocol;
+ p_hwfn->hw_info.personality = protocol;
+ }
+
+ qed_hw_get_resc(p_hwfn);
+
+ return rc;
+}
+
+static void qed_get_dev_info(struct qed_dev *cdev)
+{
+ u32 tmp;
+
+ cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CHIP_NUM);
+ cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CHIP_REV);
+ MASK_FIELD(CHIP_REV, cdev->chip_rev);
+
+ /* Learn number of HW-functions */
+ tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CMT_ENABLED_FOR_PAIR);
+
+ if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) {
+ DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
+ cdev->num_hwfns = 2;
+ } else {
+ cdev->num_hwfns = 1;
+ }
+
+ cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CHIP_TEST_REG) >> 4;
+ MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
+ cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ MISCS_REG_CHIP_METAL);
+ MASK_FIELD(CHIP_METAL, cdev->chip_metal);
+
+ DP_INFO(cdev->hwfns,
+ "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ cdev->chip_num, cdev->chip_rev,
+ cdev->chip_bond_id, cdev->chip_metal);
+}
+
+static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
+ void __iomem *p_regview,
+ void __iomem *p_doorbells,
+ enum qed_pci_personality personality)
+{
+ int rc = 0;
+
+ /* Split PCI bars evenly between hwfns */
+ p_hwfn->regview = p_regview;
+ p_hwfn->doorbells = p_doorbells;
+
+ /* Validate that chip access is feasible */
+ if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
+ DP_ERR(p_hwfn,
+ "Reading the ME register returns all Fs; Preventing further chip access\n");
+ return -EINVAL;
+ }
+
+ get_function_id(p_hwfn);
+
+ rc = qed_hw_hwfn_prepare(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
+ goto err0;
+ }
+
+ /* First hwfn learns basic information, e.g., number of hwfns */
+ if (!p_hwfn->my_id)
+ qed_get_dev_info(p_hwfn->cdev);
+
+ /* Initialize MCP structure */
+ rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
+ goto err1;
+ }
+
+ /* Read the device configuration information from the HW and SHMEM */
+ rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to get HW information\n");
+ goto err2;
+ }
+
+ /* Allocate the init RT array and initialize the init-ops engine */
+ rc = qed_init_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
+ goto err2;
+ }
+
+ return rc;
+err2:
+ qed_mcp_free(p_hwfn);
+err1:
+ qed_hw_hwfn_free(p_hwfn);
+err0:
+ return rc;
+}
+
+static u32 qed_hw_bar_size(struct qed_dev *cdev,
+ u8 bar_id)
+{
+ u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0);
+
+ return size / cdev->num_hwfns;
+}
+
+int qed_hw_prepare(struct qed_dev *cdev,
+ int personality)
+{
+ int rc, i;
+
+ /* Store the precompiled init data ptrs */
+ qed_init_iro_array(cdev);
+
+ /* Initialize the first hwfn - will learn number of hwfns */
+ rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview,
+ cdev->doorbells, personality);
+ if (rc)
+ return rc;
+
+ personality = cdev->hwfns[0].hw_info.personality;
+
+ /* Initialize the rest of the hwfns */
+ for (i = 1; i < cdev->num_hwfns; i++) {
+ void __iomem *p_regview, *p_doorbell;
+
+ p_regview = cdev->regview +
+ i * qed_hw_bar_size(cdev, 0);
+ p_doorbell = cdev->doorbells +
+ i * qed_hw_bar_size(cdev, 1);
+ rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview,
+ p_doorbell, personality);
+ if (rc) {
+ /* Cleanup previously initialized hwfns */
+ while (--i >= 0) {
+ qed_init_free(&cdev->hwfns[i]);
+ qed_mcp_free(&cdev->hwfns[i]);
+ qed_hw_hwfn_free(&cdev->hwfns[i]);
+ }
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+void qed_hw_remove(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ qed_init_free(p_hwfn);
+ qed_hw_hwfn_free(p_hwfn);
+ qed_mcp_free(p_hwfn);
+ }
+}
+
+int qed_chain_alloc(struct qed_dev *cdev,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode,
+ u16 num_elems,
+ size_t elem_size,
+ struct qed_chain *p_chain)
+{
+ dma_addr_t p_pbl_phys = 0;
+ void *p_pbl_virt = NULL;
+ dma_addr_t p_phys = 0;
+ void *p_virt = NULL;
+ u16 page_cnt = 0;
+ size_t size;
+
+ if (mode == QED_CHAIN_MODE_SINGLE)
+ page_cnt = 1;
+ else
+ page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
+
+ size = page_cnt * QED_CHAIN_PAGE_SIZE;
+ p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ size, &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(cdev, "Failed to allocate chain mem\n");
+ goto nomem;
+ }
+
+ if (mode == QED_CHAIN_MODE_PBL) {
+ size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+ p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ size, &p_pbl_phys,
+ GFP_KERNEL);
+ if (!p_pbl_virt) {
+ DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
+ goto nomem;
+ }
+
+ qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
+ (u8)elem_size, intended_use,
+ p_pbl_phys, p_pbl_virt);
+ } else {
+ qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
+ (u8)elem_size, intended_use, mode);
+ }
+
+ return 0;
+
+nomem:
+ dma_free_coherent(&cdev->pdev->dev,
+ page_cnt * QED_CHAIN_PAGE_SIZE,
+ p_virt, p_phys);
+ dma_free_coherent(&cdev->pdev->dev,
+ page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
+ p_pbl_virt, p_pbl_phys);
+
+ return -ENOMEM;
+}
+
+void qed_chain_free(struct qed_dev *cdev,
+ struct qed_chain *p_chain)
+{
+ size_t size;
+
+ if (!p_chain->p_virt_addr)
+ return;
+
+ if (p_chain->mode == QED_CHAIN_MODE_PBL) {
+ size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+ dma_free_coherent(&cdev->pdev->dev, size,
+ p_chain->pbl.p_virt_table,
+ p_chain->pbl.p_phys_table);
+ }
+
+ size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
+ dma_free_coherent(&cdev->pdev->dev, size,
+ p_chain->p_virt_addr,
+ p_chain->p_phys_addr);
+}
+
+static void __qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats)
+{
+ int i, j;
+
+ memset(stats, 0, sizeof(*stats));
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct eth_mstorm_per_queue_stat mstats;
+ struct eth_ustorm_per_queue_stat ustats;
+ struct eth_pstorm_per_queue_stat pstats;
+ struct tstorm_per_port_stat tstats;
+ struct port_stats port_stats;
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ memset(&mstats, 0, sizeof(mstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &mstats,
+ p_hwfn->storm_stats.mstats.address,
+ p_hwfn->storm_stats.mstats.len);
+
+ memset(&ustats, 0, sizeof(ustats));
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats,
+ p_hwfn->storm_stats.ustats.address,
+ p_hwfn->storm_stats.ustats.len);
+
+ memset(&pstats, 0, sizeof(pstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats,
+ p_hwfn->storm_stats.pstats.address,
+ p_hwfn->storm_stats.pstats.len);
+
+ memset(&tstats, 0, sizeof(tstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats,
+ p_hwfn->storm_stats.tstats.address,
+ p_hwfn->storm_stats.tstats.len);
+
+ memset(&port_stats, 0, sizeof(port_stats));
+
+ if (p_hwfn->mcp_info)
+ qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, stats),
+ sizeof(port_stats));
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ stats->no_buff_discards +=
+ HILO_64_REGPAIR(mstats.no_buff_discard);
+ stats->packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ stats->ttl0_discard +=
+ HILO_64_REGPAIR(mstats.ttl0_discard);
+ stats->tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ stats->tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ stats->tpa_aborts_num +=
+ HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ stats->tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+
+ stats->rx_ucast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ stats->rx_mcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ stats->rx_bcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ stats->rx_ucast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ stats->rx_mcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ stats->rx_bcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+
+ stats->mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ stats->mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+
+ stats->tx_ucast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ stats->tx_mcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ stats->tx_bcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ stats->tx_ucast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ stats->tx_mcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ stats->tx_bcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ stats->tx_err_drop_pkts +=
+ HILO_64_REGPAIR(pstats.error_drop_pkts);
+ stats->rx_64_byte_packets += port_stats.pmm.r64;
+ stats->rx_127_byte_packets += port_stats.pmm.r127;
+ stats->rx_255_byte_packets += port_stats.pmm.r255;
+ stats->rx_511_byte_packets += port_stats.pmm.r511;
+ stats->rx_1023_byte_packets += port_stats.pmm.r1023;
+ stats->rx_1518_byte_packets += port_stats.pmm.r1518;
+ stats->rx_1522_byte_packets += port_stats.pmm.r1522;
+ stats->rx_2047_byte_packets += port_stats.pmm.r2047;
+ stats->rx_4095_byte_packets += port_stats.pmm.r4095;
+ stats->rx_9216_byte_packets += port_stats.pmm.r9216;
+ stats->rx_16383_byte_packets += port_stats.pmm.r16383;
+ stats->rx_crc_errors += port_stats.pmm.rfcs;
+ stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
+ stats->rx_pause_frames += port_stats.pmm.rxpf;
+ stats->rx_pfc_frames += port_stats.pmm.rxpp;
+ stats->rx_align_errors += port_stats.pmm.raln;
+ stats->rx_carrier_errors += port_stats.pmm.rfcr;
+ stats->rx_oversize_packets += port_stats.pmm.rovr;
+ stats->rx_jabbers += port_stats.pmm.rjbr;
+ stats->rx_undersize_packets += port_stats.pmm.rund;
+ stats->rx_fragments += port_stats.pmm.rfrg;
+ stats->tx_64_byte_packets += port_stats.pmm.t64;
+ stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
+ stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
+ stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
+ stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
+ stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
+ stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
+ stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
+ stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
+ stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
+ stats->tx_pause_frames += port_stats.pmm.txpf;
+ stats->tx_pfc_frames += port_stats.pmm.txpp;
+ stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
+ stats->tx_total_collisions += port_stats.pmm.tncl;
+ stats->rx_mac_bytes += port_stats.pmm.rbyte;
+ stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
+ stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
+ stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
+ stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
+ stats->tx_mac_bytes += port_stats.pmm.tbyte;
+ stats->tx_mac_uc_packets += port_stats.pmm.txuca;
+ stats->tx_mac_mc_packets += port_stats.pmm.txmca;
+ stats->tx_mac_bc_packets += port_stats.pmm.txbca;
+ stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
+
+ for (j = 0; j < 8; j++) {
+ stats->brb_truncates += port_stats.brb.brb_truncate[j];
+ stats->brb_discards += port_stats.brb.brb_discard[j];
+ }
+ }
+}
+
+void qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats)
+{
+ u32 i;
+
+ if (!cdev) {
+ memset(stats, 0, sizeof(*stats));
+ return;
+ }
+
+ __qed_get_vport_stats(cdev, stats);
+
+ if (!cdev->reset_stats)
+ return;
+
+ /* Reduce the statistics baseline */
+ for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
+ ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
+}
+
+/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
+void qed_reset_vport_stats(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct eth_mstorm_per_queue_stat mstats;
+ struct eth_ustorm_per_queue_stat ustats;
+ struct eth_pstorm_per_queue_stat pstats;
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ memset(&mstats, 0, sizeof(mstats));
+ qed_memcpy_to(p_hwfn, p_ptt,
+ p_hwfn->storm_stats.mstats.address,
+ &mstats,
+ p_hwfn->storm_stats.mstats.len);
+
+ memset(&ustats, 0, sizeof(ustats));
+ qed_memcpy_to(p_hwfn, p_ptt,
+ p_hwfn->storm_stats.ustats.address,
+ &ustats,
+ p_hwfn->storm_stats.ustats.len);
+
+ memset(&pstats, 0, sizeof(pstats));
+ qed_memcpy_to(p_hwfn, p_ptt,
+ p_hwfn->storm_stats.pstats.address,
+ &pstats,
+ p_hwfn->storm_stats.pstats.len);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ /* PORT statistics are not necessarily reset, so we need to
+ * read and create a baseline for future statistics.
+ */
+ if (!cdev->reset_stats)
+ DP_INFO(cdev, "Reset stats not allocated\n");
+ else
+ __qed_get_vport_stats(cdev, cdev->reset_stats);
+}
+
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
+ u16 src_id, u16 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
+ u16 min, max;
+
+ min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+ max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
+ DP_NOTICE(p_hwfn,
+ "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return -EINVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
+
+ return 0;
+}
+
+int qed_fw_vport(struct qed_hwfn *p_hwfn,
+ u8 src_id, u8 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
+ u8 min, max;
+
+ min = (u8)RESC_START(p_hwfn, QED_VPORT);
+ max = min + RESC_NUM(p_hwfn, QED_VPORT);
+ DP_NOTICE(p_hwfn,
+ "vport id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return -EINVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
+
+ return 0;
+}
+
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
+ u8 src_id, u8 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
+ u8 min, max;
+
+ min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
+ max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
+ DP_NOTICE(p_hwfn,
+ "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return -EINVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
new file mode 100644
index 000000000000..e29a3ba6c8b0
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -0,0 +1,283 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DEV_API_H
+#define _QED_DEV_API_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qed_int.h"
+
+/**
+ * @brief qed_init_dp - initialize the debug level
+ *
+ * @param cdev
+ * @param dp_module
+ * @param dp_level
+ */
+void qed_init_dp(struct qed_dev *cdev,
+ u32 dp_module,
+ u8 dp_level);
+
+/**
+ * @brief qed_init_struct - initialize the device structure to
+ * its defaults
+ *
+ * @param cdev
+ */
+void qed_init_struct(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_free -
+ *
+ * @param cdev
+ */
+void qed_resc_free(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_alloc -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_resc_alloc(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_setup -
+ *
+ * @param cdev
+ */
+void qed_resc_setup(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_init -
+ *
+ * @param cdev
+ * @param b_hw_start
+ * @param int_mode - interrupt mode [msix, inta, etc.] to use.
+ * @param allow_npar_tx_switch - npar tx switching to be used
+ * for vports configured for tx-switching.
+ * @param bin_fw_data - binary fw data pointer in binary fw file.
+ * Pass NULL if not using binary fw file.
+ *
+ * @return int
+ */
+int qed_hw_init(struct qed_dev *cdev,
+ bool b_hw_start,
+ enum qed_int_mode int_mode,
+ bool allow_npar_tx_switch,
+ const u8 *bin_fw_data);
+
+/**
+ * @brief qed_hw_stop -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_hw_stop(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_stop_fastpath -should be called incase
+ * slowpath is still required for the device,
+ * but fastpath is not.
+ *
+ * @param cdev
+ *
+ */
+void qed_hw_stop_fastpath(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_start_fastpath -restart fastpath traffic,
+ * only if hw_stop_fastpath was called
+ *
+ * @param cdev
+ *
+ */
+void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_hw_reset -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_hw_reset(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_prepare -
+ *
+ * @param cdev
+ * @param personality - personality to initialize
+ *
+ * @return int
+ */
+int qed_hw_prepare(struct qed_dev *cdev,
+ int personality);
+
+/**
+ * @brief qed_hw_remove -
+ *
+ * @param cdev
+ */
+void qed_hw_remove(struct qed_dev *cdev);
+
+/**
+ * @brief qed_ptt_acquire - Allocate a PTT window
+ *
+ * Should be called at the entry point to the driver (at the beginning of an
+ * exported function)
+ *
+ * @param p_hwfn
+ *
+ * @return struct qed_ptt
+ */
+struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_release - Release PTT Window
+ *
+ * Should be called at the end of a flow - at the end of the function that
+ * acquired the PTT.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_ptt_release(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+void qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats);
+void qed_reset_vport_stats(struct qed_dev *cdev);
+
+enum qed_dmae_address_type_t {
+ QED_DMAE_ADDRESS_HOST_VIRT,
+ QED_DMAE_ADDRESS_HOST_PHYS,
+ QED_DMAE_ADDRESS_GRC
+};
+
+/* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the
+ * source is a block of length DMAE_MAX_RW_SIZE and the
+ * destination is larger, the source block will be duplicated as
+ * many times as required to fill the destination block. This is
+ * used mostly to write a zeroed buffer to destination address
+ * using DMA
+ */
+#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
+#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
+
+struct qed_dmae_params {
+ u32 flags; /* consists of QED_DMAE_FLAG_* values */
+};
+
+/**
+ * @brief qed_dmae_host2grc - copy data from source addr to
+ * dmae registers using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param grc_addr (dmae_data_offset)
+ * @param size_in_dwords
+ * @param flags (one of the flags defined above)
+ */
+int
+qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 source_addr,
+ u32 grc_addr,
+ u32 size_in_dwords,
+ u32 flags);
+
+/**
+ * @brief qed_chain_alloc - Allocate and initialize a chain
+ *
+ * @param p_hwfn
+ * @param intended_use
+ * @param mode
+ * @param num_elems
+ * @param elem_size
+ * @param p_chain
+ *
+ * @return int
+ */
+int
+qed_chain_alloc(struct qed_dev *cdev,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode,
+ u16 num_elems,
+ size_t elem_size,
+ struct qed_chain *p_chain);
+
+/**
+ * @brief qed_chain_free - Free chain DMA memory
+ *
+ * @param p_hwfn
+ * @param p_chain
+ */
+void qed_chain_free(struct qed_dev *cdev,
+ struct qed_chain *p_chain);
+
+/**
+ * @@brief qed_fw_l2_queue - Get absolute L2 queue ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return int
+ */
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
+ u16 src_id,
+ u16 *dst_id);
+
+/**
+ * @@brief qed_fw_vport - Get absolute vport ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return int
+ */
+int qed_fw_vport(struct qed_hwfn *p_hwfn,
+ u8 src_id,
+ u8 *dst_id);
+
+/**
+ * @@brief qed_fw_rss_eng - Get absolute RSS engine ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return int
+ */
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
+ u8 src_id,
+ u8 *dst_id);
+
+/**
+ * *@brief Cleanup of previous driver remains prior to load
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param id - For PF, engine-relative. For VF, PF-relative.
+ *
+ * @return int
+ */
+int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 id);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
new file mode 100644
index 000000000000..b2f8e854dfd1
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -0,0 +1,5291 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_HSI_H
+#define _QED_HSI_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/eth_common.h>
+
+struct qed_hwfn;
+struct qed_ptt;
+/********************************/
+/* Add include to common target */
+/********************************/
+
+/* opcodes for the event ring */
+enum common_event_opcode {
+ COMMON_EVENT_PF_START,
+ COMMON_EVENT_PF_STOP,
+ COMMON_EVENT_RESERVED,
+ COMMON_EVENT_RESERVED2,
+ COMMON_EVENT_RESERVED3,
+ COMMON_EVENT_RESERVED4,
+ COMMON_EVENT_RESERVED5,
+ MAX_COMMON_EVENT_OPCODE
+};
+
+/* Common Ramrod Command IDs */
+enum common_ramrod_cmd_id {
+ COMMON_RAMROD_UNUSED,
+ COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
+ COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
+ COMMON_RAMROD_RESERVED,
+ COMMON_RAMROD_RESERVED2,
+ COMMON_RAMROD_RESERVED3,
+ MAX_COMMON_RAMROD_CMD_ID
+};
+
+/* The core storm context for the Ystorm */
+struct ystorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The core storm context for the Pstorm */
+struct pstorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* Core Slowpath Connection storm context of Xstorm */
+struct xstorm_core_conn_st_ctx {
+ __le32 spq_base_lo /* SPQ Ring Base Address low dword */;
+ __le32 spq_base_hi /* SPQ Ring Base Address high dword */;
+ struct regpair consolid_base_addr;
+ __le16 spq_cons /* SPQ Ring Consumer */;
+ __le16 consolid_cons /* Consolidation Ring Consumer */;
+ __le32 reserved0[55] /* Pad to 15 cycles */;
+};
+
+struct xstorm_core_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 core_state /* state */;
+ u8 flags0;
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 /* cf16 */
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 /* cf16en */
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 /* cf23en */
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 /* bit16 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 /* bit17 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 /* bit18 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 /* bit19 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 /* bit20 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 /* bit21 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 /* cf23 */
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2 /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 consolid_prod /* physical_q1 */;
+ __le16 reserved16 /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_or_spq_prod /* word4 */;
+ __le16 word5 /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* cf_array0 */;
+ __le32 reg6 /* cf_array1 */;
+ __le16 word7 /* word7 */;
+ __le16 word8 /* word8 */;
+ __le16 word9 /* word9 */;
+ __le16 word10 /* word10 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ __le32 reg9 /* reg9 */;
+ u8 byte7 /* byte7 */;
+ u8 byte8 /* byte8 */;
+ u8 byte9 /* byte9 */;
+ u8 byte10 /* byte10 */;
+ u8 byte11 /* byte11 */;
+ u8 byte12 /* byte12 */;
+ u8 byte13 /* byte13 */;
+ u8 byte14 /* byte14 */;
+ u8 byte15 /* byte15 */;
+ u8 byte16 /* byte16 */;
+ __le16 word11 /* word11 */;
+ __le32 reg10 /* reg10 */;
+ __le32 reg11 /* reg11 */;
+ __le32 reg12 /* reg12 */;
+ __le32 reg13 /* reg13 */;
+ __le32 reg14 /* reg14 */;
+ __le32 reg15 /* reg15 */;
+ __le32 reg16 /* reg16 */;
+ __le32 reg17 /* reg17 */;
+ __le32 reg18 /* reg18 */;
+ __le32 reg19 /* reg19 */;
+ __le16 word12 /* word12 */;
+ __le16 word13 /* word13 */;
+ __le16 word14 /* word14 */;
+ __le16 word15 /* word15 */;
+};
+
+/* The core storm context for the Mstorm */
+struct mstorm_core_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+/* The core storm context for the Ustorm */
+struct ustorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* core connection context */
+struct core_conn_context {
+ struct ystorm_core_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2] /* padding */;
+ struct pstorm_core_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_core_conn_st_ctx xstorm_st_context;
+ struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct mstorm_core_conn_st_ctx mstorm_st_context;
+ struct regpair mstorm_st_padding[2];
+ struct ustorm_core_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2] /* padding */;
+};
+
+struct eth_mstorm_per_queue_stat {
+ struct regpair ttl0_discard;
+ struct regpair packet_too_big_discard;
+ struct regpair no_buff_discard;
+ struct regpair not_active_discard;
+ struct regpair tpa_coalesced_pkts;
+ struct regpair tpa_coalesced_events;
+ struct regpair tpa_aborts_num;
+ struct regpair tpa_coalesced_bytes;
+};
+
+struct eth_pstorm_per_queue_stat {
+ struct regpair sent_ucast_bytes;
+ struct regpair sent_mcast_bytes;
+ struct regpair sent_bcast_bytes;
+ struct regpair sent_ucast_pkts;
+ struct regpair sent_mcast_pkts;
+ struct regpair sent_bcast_pkts;
+ struct regpair error_drop_pkts;
+};
+
+struct eth_ustorm_per_queue_stat {
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
+};
+
+/* Event Ring Next Page Address */
+struct event_ring_next_addr {
+ struct regpair addr /* Next Page Address */;
+ __le32 reserved[2] /* Reserved */;
+};
+
+union event_ring_element {
+ struct event_ring_entry entry /* Event Ring Entry */;
+ struct event_ring_next_addr next_addr;
+};
+
+enum personality_type {
+ PERSONALITY_RESERVED,
+ PERSONALITY_RESERVED2,
+ PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */,
+ PERSONALITY_RESERVED3,
+ PERSONALITY_ETH /* Ethernet */,
+ PERSONALITY_RESERVED4,
+ MAX_PERSONALITY_TYPE
+};
+
+struct pf_start_tunnel_config {
+ u8 set_vxlan_udp_port_flg;
+ u8 set_geneve_udp_port_flg;
+ u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
+ u8 tx_enable_l2geneve;
+ u8 tx_enable_ipgeneve;
+ u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
+ u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
+ u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
+ u8 tunnel_clss_l2geneve;
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre;
+ u8 tunnel_clss_ipgre;
+ __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
+ __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+};
+
+/* Ramrod data for PF start ramrod */
+struct pf_start_ramrod_data {
+ struct regpair event_ring_pbl_addr;
+ struct regpair consolid_q_pbl_addr;
+ struct pf_start_tunnel_config tunnel_config;
+ __le16 event_ring_sb_id;
+ u8 base_vf_id;
+ u8 num_vfs;
+ u8 event_ring_num_pages;
+ u8 event_ring_sb_index;
+ u8 path_id;
+ u8 warning_as_error;
+ u8 dont_log_ramrods;
+ u8 personality;
+ __le16 log_type_mask;
+ u8 mf_mode /* Multi function mode */;
+ u8 integ_phase /* Integration phase */;
+ u8 allow_npar_tx_switching;
+ u8 inner_to_outer_pri_map[8];
+ u8 pri_map_valid;
+ u32 outer_tag;
+ u8 reserved0[4];
+};
+
+enum ports_mode {
+ ENGX2_PORTX1 /* 2 engines x 1 port */,
+ ENGX2_PORTX2 /* 2 engines x 2 ports */,
+ ENGX1_PORTX1 /* 1 engine x 1 port */,
+ ENGX1_PORTX2 /* 1 engine x 2 ports */,
+ ENGX1_PORTX4 /* 1 engine x 4 ports */,
+ MAX_PORTS_MODE
+};
+
+/* Ramrod Header of SPQE */
+struct ramrod_header {
+ __le32 cid /* Slowpath Connection CID */;
+ u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
+ u8 protocol_id /* Ramrod Protocol ID */;
+ __le16 echo /* Ramrod echo */;
+};
+
+/* Slowpath Element (SPQE) */
+struct slow_path_element {
+ struct ramrod_header hdr /* Ramrod Header */;
+ struct regpair data_ptr;
+};
+
+struct tstorm_per_port_stat {
+ struct regpair trunc_error_discard;
+ struct regpair mac_error_discard;
+ struct regpair mftag_filter_discard;
+ struct regpair eth_mac_filter_discard;
+ struct regpair ll2_mac_filter_discard;
+ struct regpair ll2_conn_disabled_discard;
+ struct regpair iscsi_irregular_pkt;
+ struct regpair fcoe_irregular_pkt;
+ struct regpair roce_irregular_pkt;
+ struct regpair eth_irregular_pkt;
+ struct regpair toe_irregular_pkt;
+ struct regpair preroce_irregular_pkt;
+};
+
+struct atten_status_block {
+ __le32 atten_bits;
+ __le32 atten_ack;
+ __le16 reserved0;
+ __le16 sb_index /* status block running index */;
+ __le32 reserved1;
+};
+
+enum block_addr {
+ GRCBASE_GRC = 0x50000,
+ GRCBASE_MISCS = 0x9000,
+ GRCBASE_MISC = 0x8000,
+ GRCBASE_DBU = 0xa000,
+ GRCBASE_PGLUE_B = 0x2a8000,
+ GRCBASE_CNIG = 0x218000,
+ GRCBASE_CPMU = 0x30000,
+ GRCBASE_NCSI = 0x40000,
+ GRCBASE_OPTE = 0x53000,
+ GRCBASE_BMB = 0x540000,
+ GRCBASE_PCIE = 0x54000,
+ GRCBASE_MCP = 0xe00000,
+ GRCBASE_MCP2 = 0x52000,
+ GRCBASE_PSWHST = 0x2a0000,
+ GRCBASE_PSWHST2 = 0x29e000,
+ GRCBASE_PSWRD = 0x29c000,
+ GRCBASE_PSWRD2 = 0x29d000,
+ GRCBASE_PSWWR = 0x29a000,
+ GRCBASE_PSWWR2 = 0x29b000,
+ GRCBASE_PSWRQ = 0x280000,
+ GRCBASE_PSWRQ2 = 0x240000,
+ GRCBASE_PGLCS = 0x0,
+ GRCBASE_PTU = 0x560000,
+ GRCBASE_DMAE = 0xc000,
+ GRCBASE_TCM = 0x1180000,
+ GRCBASE_MCM = 0x1200000,
+ GRCBASE_UCM = 0x1280000,
+ GRCBASE_XCM = 0x1000000,
+ GRCBASE_YCM = 0x1080000,
+ GRCBASE_PCM = 0x1100000,
+ GRCBASE_QM = 0x2f0000,
+ GRCBASE_TM = 0x2c0000,
+ GRCBASE_DORQ = 0x100000,
+ GRCBASE_BRB = 0x340000,
+ GRCBASE_SRC = 0x238000,
+ GRCBASE_PRS = 0x1f0000,
+ GRCBASE_TSDM = 0xfb0000,
+ GRCBASE_MSDM = 0xfc0000,
+ GRCBASE_USDM = 0xfd0000,
+ GRCBASE_XSDM = 0xf80000,
+ GRCBASE_YSDM = 0xf90000,
+ GRCBASE_PSDM = 0xfa0000,
+ GRCBASE_TSEM = 0x1700000,
+ GRCBASE_MSEM = 0x1800000,
+ GRCBASE_USEM = 0x1900000,
+ GRCBASE_XSEM = 0x1400000,
+ GRCBASE_YSEM = 0x1500000,
+ GRCBASE_PSEM = 0x1600000,
+ GRCBASE_RSS = 0x238800,
+ GRCBASE_TMLD = 0x4d0000,
+ GRCBASE_MULD = 0x4e0000,
+ GRCBASE_YULD = 0x4c8000,
+ GRCBASE_XYLD = 0x4c0000,
+ GRCBASE_PRM = 0x230000,
+ GRCBASE_PBF_PB1 = 0xda0000,
+ GRCBASE_PBF_PB2 = 0xda4000,
+ GRCBASE_RPB = 0x23c000,
+ GRCBASE_BTB = 0xdb0000,
+ GRCBASE_PBF = 0xd80000,
+ GRCBASE_RDIF = 0x300000,
+ GRCBASE_TDIF = 0x310000,
+ GRCBASE_CDU = 0x580000,
+ GRCBASE_CCFC = 0x2e0000,
+ GRCBASE_TCFC = 0x2d0000,
+ GRCBASE_IGU = 0x180000,
+ GRCBASE_CAU = 0x1c0000,
+ GRCBASE_UMAC = 0x51000,
+ GRCBASE_XMAC = 0x210000,
+ GRCBASE_DBG = 0x10000,
+ GRCBASE_NIG = 0x500000,
+ GRCBASE_WOL = 0x600000,
+ GRCBASE_BMBN = 0x610000,
+ GRCBASE_IPC = 0x20000,
+ GRCBASE_NWM = 0x800000,
+ GRCBASE_NWS = 0x700000,
+ GRCBASE_MS = 0x6a0000,
+ GRCBASE_PHY_PCIE = 0x618000,
+ GRCBASE_MISC_AEU = 0x8000,
+ GRCBASE_BAR0_MAP = 0x1c00000,
+ MAX_BLOCK_ADDR
+};
+
+enum block_id {
+ BLOCK_GRC,
+ BLOCK_MISCS,
+ BLOCK_MISC,
+ BLOCK_DBU,
+ BLOCK_PGLUE_B,
+ BLOCK_CNIG,
+ BLOCK_CPMU,
+ BLOCK_NCSI,
+ BLOCK_OPTE,
+ BLOCK_BMB,
+ BLOCK_PCIE,
+ BLOCK_MCP,
+ BLOCK_MCP2,
+ BLOCK_PSWHST,
+ BLOCK_PSWHST2,
+ BLOCK_PSWRD,
+ BLOCK_PSWRD2,
+ BLOCK_PSWWR,
+ BLOCK_PSWWR2,
+ BLOCK_PSWRQ,
+ BLOCK_PSWRQ2,
+ BLOCK_PGLCS,
+ BLOCK_PTU,
+ BLOCK_DMAE,
+ BLOCK_TCM,
+ BLOCK_MCM,
+ BLOCK_UCM,
+ BLOCK_XCM,
+ BLOCK_YCM,
+ BLOCK_PCM,
+ BLOCK_QM,
+ BLOCK_TM,
+ BLOCK_DORQ,
+ BLOCK_BRB,
+ BLOCK_SRC,
+ BLOCK_PRS,
+ BLOCK_TSDM,
+ BLOCK_MSDM,
+ BLOCK_USDM,
+ BLOCK_XSDM,
+ BLOCK_YSDM,
+ BLOCK_PSDM,
+ BLOCK_TSEM,
+ BLOCK_MSEM,
+ BLOCK_USEM,
+ BLOCK_XSEM,
+ BLOCK_YSEM,
+ BLOCK_PSEM,
+ BLOCK_RSS,
+ BLOCK_TMLD,
+ BLOCK_MULD,
+ BLOCK_YULD,
+ BLOCK_XYLD,
+ BLOCK_PRM,
+ BLOCK_PBF_PB1,
+ BLOCK_PBF_PB2,
+ BLOCK_RPB,
+ BLOCK_BTB,
+ BLOCK_PBF,
+ BLOCK_RDIF,
+ BLOCK_TDIF,
+ BLOCK_CDU,
+ BLOCK_CCFC,
+ BLOCK_TCFC,
+ BLOCK_IGU,
+ BLOCK_CAU,
+ BLOCK_UMAC,
+ BLOCK_XMAC,
+ BLOCK_DBG,
+ BLOCK_NIG,
+ BLOCK_WOL,
+ BLOCK_BMBN,
+ BLOCK_IPC,
+ BLOCK_NWM,
+ BLOCK_NWS,
+ BLOCK_MS,
+ BLOCK_PHY_PCIE,
+ BLOCK_MISC_AEU,
+ BLOCK_BAR0_MAP,
+ MAX_BLOCK_ID
+};
+
+enum command_type_bit {
+ IGU_COMMAND_TYPE_NOP = 0,
+ IGU_COMMAND_TYPE_SET = 1,
+ MAX_COMMAND_TYPE_BIT
+};
+
+struct dmae_cmd {
+ __le32 opcode;
+#define DMAE_CMD_SRC_MASK 0x1
+#define DMAE_CMD_SRC_SHIFT 0
+#define DMAE_CMD_DST_MASK 0x3
+#define DMAE_CMD_DST_SHIFT 1
+#define DMAE_CMD_C_DST_MASK 0x1
+#define DMAE_CMD_C_DST_SHIFT 3
+#define DMAE_CMD_CRC_RESET_MASK 0x1
+#define DMAE_CMD_CRC_RESET_SHIFT 4
+#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
+#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
+#define DMAE_CMD_COMP_FUNC_MASK 0x1
+#define DMAE_CMD_COMP_FUNC_SHIFT 7
+#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
+#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
+#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
+#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
+#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
+#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
+#define DMAE_CMD_RESERVED1_MASK 0x1
+#define DMAE_CMD_RESERVED1_SHIFT 13
+#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
+#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
+#define DMAE_CMD_ERR_HANDLING_MASK 0x3
+#define DMAE_CMD_ERR_HANDLING_SHIFT 16
+#define DMAE_CMD_PORT_ID_MASK 0x3
+#define DMAE_CMD_PORT_ID_SHIFT 18
+#define DMAE_CMD_SRC_PF_ID_MASK 0xF
+#define DMAE_CMD_SRC_PF_ID_SHIFT 20
+#define DMAE_CMD_DST_PF_ID_MASK 0xF
+#define DMAE_CMD_DST_PF_ID_SHIFT 24
+#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
+#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
+#define DMAE_CMD_RESERVED2_MASK 0x3
+#define DMAE_CMD_RESERVED2_SHIFT 30
+ __le32 src_addr_lo;
+ __le32 src_addr_hi;
+ __le32 dst_addr_lo;
+ __le32 dst_addr_hi;
+ __le16 length /* Length in DW */;
+ __le16 opcode_b;
+#define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */
+#define DMAE_CMD_SRC_VF_ID_SHIFT 0
+#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */
+#define DMAE_CMD_DST_VF_ID_SHIFT 8
+ __le32 comp_addr_lo /* PCIe completion address low or grc address */;
+ __le32 comp_addr_hi;
+ __le32 comp_val /* Value to write to copmletion address */;
+ __le32 crc32 /* crc16 result */;
+ __le32 crc_32_c /* crc32_c result */;
+ __le16 crc16 /* crc16 result */;
+ __le16 crc16_c /* crc16_c result */;
+ __le16 crc10 /* crc_t10 result */;
+ __le16 reserved;
+ __le16 xsum16 /* checksum16 result */;
+ __le16 xsum8 /* checksum8 result */;
+};
+
+struct igu_cleanup {
+ __le32 sb_id_and_flags;
+#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
+#define IGU_CLEANUP_RESERVED0_SHIFT 0
+#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1 /* cleanup clear - 0, set - 1 */
+#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
+#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
+#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
+#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
+#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
+ __le32 reserved1;
+};
+
+union igu_command {
+ struct igu_prod_cons_update prod_cons_update;
+ struct igu_cleanup cleanup;
+};
+
+struct igu_command_reg_ctrl {
+ __le16 opaque_fid;
+ __le16 igu_command_reg_ctrl_fields;
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
+#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
+#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
+};
+
+struct igu_mapping_line {
+ __le32 igu_mapping_line_fields;
+#define IGU_MAPPING_LINE_VALID_MASK 0x1
+#define IGU_MAPPING_LINE_VALID_SHIFT 0
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
+#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */
+#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
+#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
+#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
+#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
+#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
+};
+
+struct igu_msix_vector {
+ struct regpair address;
+ __le32 data;
+ __le32 msix_vector_fields;
+#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
+#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
+#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
+#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
+#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
+#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
+#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
+#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
+};
+
+enum init_modes {
+ MODE_BB_A0,
+ MODE_RESERVED,
+ MODE_RESERVED2,
+ MODE_ASIC,
+ MODE_RESERVED3,
+ MODE_RESERVED4,
+ MODE_RESERVED5,
+ MODE_SF,
+ MODE_MF_SD,
+ MODE_MF_SI,
+ MODE_PORTS_PER_ENG_1,
+ MODE_PORTS_PER_ENG_2,
+ MODE_PORTS_PER_ENG_4,
+ MODE_40G,
+ MODE_100G,
+ MODE_EAGLE_ENG1_WORKAROUND,
+ MAX_INIT_MODES
+};
+
+enum init_phases {
+ PHASE_ENGINE,
+ PHASE_PORT,
+ PHASE_PF,
+ PHASE_RESERVED,
+ PHASE_QM_PF,
+ MAX_INIT_PHASES
+};
+
+struct mstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+/* per encapsulation type enabling flags */
+struct prs_reg_encapsulation_type_en {
+ u8 flags;
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
+};
+
+enum pxp_tph_st_hint {
+ TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
+ TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
+ TPH_ST_HINT_TARGET,
+ TPH_ST_HINT_TARGET_PRIO,
+ MAX_PXP_TPH_ST_HINT
+};
+
+/* QM hardware structure of enable bypass credit mask */
+struct qm_rf_bypass_mask {
+ u8 flags;
+#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
+#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
+#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
+#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
+#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
+};
+
+/* QM hardware structure of opportunistic credit mask */
+struct qm_rf_opportunistic_mask {
+ __le16 flags;
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
+};
+
+/* QM hardware structure of QM map memory */
+struct qm_rf_pq_map {
+ u32 reg;
+#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */
+#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */
+#define QM_RF_PQ_MAP_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
+};
+
+/* SDM operation gen command (generate aggregative interrupt) */
+struct sdm_op_gen {
+ __le32 command;
+#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF /* completion parameters 0-15 */
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
+#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */
+#define SDM_OP_GEN_RESERVED_SHIFT 20
+};
+
+struct tstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_core_conn_ag_ctx {
+ u8 reserved /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 word1 /* word1 */;
+ __le32 rx_producers /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+};
+
+struct ystorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+ __le16 word4 /* word4 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+};
+
+/*********************************** Init ************************************/
+
+/* Width of GRC address in bits (addresses are specified in dwords) */
+#define GRC_ADDR_BITS 23
+#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
+
+/* indicates an init that should be applied to any phase ID */
+#define ANY_PHASE_ID 0xffff
+
+/* init pattern size in bytes */
+#define INIT_PATTERN_SIZE_BITS 4
+#define MAX_INIT_PATTERN_SIZE BIT(INIT_PATTERN_SIZE_BITS)
+
+/* Max size in dwords of a zipped array */
+#define MAX_ZIPPED_SIZE 8192
+
+/* Global PXP window */
+#define NUM_OF_PXP_WIN 19
+#define PXP_WIN_DWORD_SIZE_BITS 10
+#define PXP_WIN_DWORD_SIZE BIT(PXP_WIN_DWORD_SIZE_BITS)
+#define PXP_WIN_BYTE_SIZE_BITS (PXP_WIN_DWORD_SIZE_BITS + 2)
+#define PXP_WIN_BYTE_SIZE (PXP_WIN_DWORD_SIZE * 4)
+
+/********************************* GRC Dump **********************************/
+
+/* width of GRC dump register sequence length in bits */
+#define DUMP_SEQ_LEN_BITS 8
+#define DUMP_SEQ_LEN_MAX_VAL ((1 << DUMP_SEQ_LEN_BITS) - 1)
+
+/* width of GRC dump memory length in bits */
+#define DUMP_MEM_LEN_BITS 18
+#define DUMP_MEM_LEN_MAX_VAL ((1 << DUMP_MEM_LEN_BITS) - 1)
+
+/* width of register type ID in bits */
+#define REG_TYPE_ID_BITS 6
+#define REG_TYPE_ID_MAX_VAL ((1 << REG_TYPE_ID_BITS) - 1)
+
+/* width of block ID in bits */
+#define BLOCK_ID_BITS 8
+#define BLOCK_ID_MAX_VAL ((1 << BLOCK_ID_BITS) - 1)
+
+/******************************** Idle Check *********************************/
+
+/* max number of idle check predicate immediates */
+#define MAX_IDLE_CHK_PRED_IMM 3
+
+/* max number of idle check argument registers */
+#define MAX_IDLE_CHK_READ_REGS 3
+
+/* max number of idle check loops */
+#define MAX_IDLE_CHK_LOOPS 0x10000
+
+/* max idle check address increment */
+#define MAX_IDLE_CHK_INCREMENT 0x10000
+
+/* inicates an undefined idle check line index */
+#define IDLE_CHK_UNDEFINED_LINE_IDX 0xffffff
+
+/* max number of register values following the idle check header */
+#define IDLE_CHK_MAX_DUMP_REGS 2
+
+/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */
+#define IDLE_CHK_QM_RD_WR_PTR 0
+#define IDLE_CHK_QM_RD_WR_BANK 1
+
+/**************************************/
+/* HSI Functions constants and macros */
+/**************************************/
+
+/* Number of VLAN priorities */
+#define NUM_OF_VLAN_PRIORITIES 8
+
+/* the MCP Trace meta data signautre is duplicated in the perl script that
+ * generats the NVRAM images.
+ */
+#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
+
+/* Binary buffer header */
+struct bin_buffer_hdr {
+ u32 offset;
+ u32 length /* buffer length in bytes */;
+};
+
+/* binary buffer types */
+enum bin_buffer_type {
+ BIN_BUF_FW_VER_INFO /* fw_ver_info struct */,
+ BIN_BUF_INIT_CMD /* init commands */,
+ BIN_BUF_INIT_VAL /* init data */,
+ BIN_BUF_INIT_MODE_TREE /* init modes tree */,
+ BIN_BUF_IRO /* internal RAM offsets array */,
+ MAX_BIN_BUFFER_TYPE
+};
+
+/* Chip IDs */
+enum chip_ids {
+ CHIP_BB_A0 /* BB A0 chip ID */,
+ CHIP_BB_B0 /* BB B0 chip ID */,
+ CHIP_K2 /* AH chip ID */,
+ MAX_CHIP_IDS
+};
+
+enum idle_chk_severity_types {
+ IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */,
+ IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+ IDLE_CHK_SEVERITY_WARNING,
+ MAX_IDLE_CHK_SEVERITY_TYPES
+};
+
+struct init_array_raw_hdr {
+ __le32 data;
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF /* init array params */
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
+};
+
+struct init_array_standard_hdr {
+ __le32 data;
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
+};
+
+struct init_array_zipped_hdr {
+ __le32 data;
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
+};
+
+struct init_array_pattern_hdr {
+ __le32 data;
+#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
+};
+
+union init_array_hdr {
+ struct init_array_raw_hdr raw /* raw init array header */;
+ struct init_array_standard_hdr standard;
+ struct init_array_zipped_hdr zipped /* zipped init array header */;
+ struct init_array_pattern_hdr pattern /* pattern init array header */;
+};
+
+enum init_array_types {
+ INIT_ARR_STANDARD /* standard init array */,
+ INIT_ARR_ZIPPED /* zipped init array */,
+ INIT_ARR_PATTERN /* a repeated pattern */,
+ MAX_INIT_ARRAY_TYPES
+};
+
+/* init operation: callback */
+struct init_callback_op {
+ __le32 op_data;
+#define INIT_CALLBACK_OP_OP_MASK 0xF
+#define INIT_CALLBACK_OP_OP_SHIFT 0
+#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
+ __le16 callback_id /* Callback ID */;
+ __le16 block_id /* Blocks ID */;
+};
+
+/* init comparison types */
+enum init_comparison_types {
+ INIT_COMPARISON_EQ /* init value is included in the init command */,
+ INIT_COMPARISON_OR /* init value is all zeros */,
+ INIT_COMPARISON_AND /* init value is an array of values */,
+ MAX_INIT_COMPARISON_TYPES
+};
+
+/* init operation: delay */
+struct init_delay_op {
+ __le32 op_data;
+#define INIT_DELAY_OP_OP_MASK 0xF
+#define INIT_DELAY_OP_OP_SHIFT 0
+#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT 4
+ __le32 delay /* delay in us */;
+};
+
+/* init operation: if_mode */
+struct init_if_mode_op {
+ __le32 op_data;
+#define INIT_IF_MODE_OP_OP_MASK 0xF
+#define INIT_IF_MODE_OP_OP_SHIFT 0
+#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
+ __le16 reserved2;
+ __le16 modes_buf_offset;
+};
+
+/* init operation: if_phase */
+struct init_if_phase_op {
+ __le32 op_data;
+#define INIT_IF_PHASE_OP_OP_MASK 0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT 0
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
+#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
+ __le32 phase_data;
+#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */
+#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF /* Init phase ID */
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
+};
+
+/* init mode operators */
+enum init_mode_ops {
+ INIT_MODE_OP_NOT /* init mode not operator */,
+ INIT_MODE_OP_OR /* init mode or operator */,
+ INIT_MODE_OP_AND /* init mode and operator */,
+ MAX_INIT_MODE_OPS
+};
+
+/* init operation: raw */
+struct init_raw_op {
+ __le32 op_data;
+#define INIT_RAW_OP_OP_MASK 0xF
+#define INIT_RAW_OP_OP_SHIFT 0
+#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */
+#define INIT_RAW_OP_PARAM1_SHIFT 4
+ __le32 param2 /* Init param 2 */;
+};
+
+/* init array params */
+struct init_op_array_params {
+ __le16 size /* array size in dwords */;
+ __le16 offset /* array start offset in dwords */;
+};
+
+/* Write init operation arguments */
+union init_write_args {
+ __le32 inline_val;
+ __le32 zeros_count;
+ __le32 array_offset;
+ struct init_op_array_params runtime;
+};
+
+/* init operation: write */
+struct init_write_op {
+ __le32 data;
+#define INIT_WRITE_OP_OP_MASK 0xF
+#define INIT_WRITE_OP_OP_SHIFT 0
+#define INIT_WRITE_OP_SOURCE_MASK 0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT 4
+#define INIT_WRITE_OP_RESERVED_MASK 0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT 7
+#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
+#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT 9
+ union init_write_args args /* Write init operation arguments */;
+};
+
+/* init operation: read */
+struct init_read_op {
+ __le32 op_data;
+#define INIT_READ_OP_OP_MASK 0xF
+#define INIT_READ_OP_OP_SHIFT 0
+#define INIT_READ_OP_POLL_COMP_MASK 0x7
+#define INIT_READ_OP_POLL_COMP_SHIFT 4
+#define INIT_READ_OP_RESERVED_MASK 0x1
+#define INIT_READ_OP_RESERVED_SHIFT 7
+#define INIT_READ_OP_POLL_MASK 0x1
+#define INIT_READ_OP_POLL_SHIFT 8
+#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT 9
+ __le32 expected_val;
+};
+
+/* Init operations union */
+union init_op {
+ struct init_raw_op raw /* raw init operation */;
+ struct init_write_op write /* write init operation */;
+ struct init_read_op read /* read init operation */;
+ struct init_if_mode_op if_mode /* if_mode init operation */;
+ struct init_if_phase_op if_phase /* if_phase init operation */;
+ struct init_callback_op callback /* callback init operation */;
+ struct init_delay_op delay /* delay init operation */;
+};
+
+/* Init command operation types */
+enum init_op_types {
+ INIT_OP_READ /* GRC read init command */,
+ INIT_OP_WRITE /* GRC write init command */,
+ INIT_OP_IF_MODE,
+ INIT_OP_IF_PHASE,
+ INIT_OP_DELAY /* delay init command */,
+ INIT_OP_CALLBACK /* callback init command */,
+ MAX_INIT_OP_TYPES
+};
+
+/* init source types */
+enum init_source_types {
+ INIT_SRC_INLINE /* init value is included in the init command */,
+ INIT_SRC_ZEROS /* init value is all zeros */,
+ INIT_SRC_ARRAY /* init value is an array of values */,
+ INIT_SRC_RUNTIME /* init value is provided during runtime */,
+ MAX_INIT_SOURCE_TYPES
+};
+
+/* Internal RAM Offsets macro data */
+struct iro {
+ u32 base /* RAM field offset */;
+ u16 m1 /* multiplier 1 */;
+ u16 m2 /* multiplier 2 */;
+ u16 m3 /* multiplier 3 */;
+ u16 size /* RAM field size */;
+};
+
+/* QM per-port init parameters */
+struct init_qm_port_params {
+ u8 active /* Indicates if this port is active */;
+ u8 num_active_phys_tcs;
+ u16 num_pbf_cmd_lines;
+ u16 num_btb_blocks;
+ __le16 reserved;
+};
+
+/* QM per-PQ init parameters */
+struct init_qm_pq_params {
+ u8 vport_id /* VPORT ID */;
+ u8 tc_id /* TC ID */;
+ u8 wrr_group /* WRR group */;
+ u8 reserved;
+};
+
+/* QM per-vport init parameters */
+struct init_qm_vport_params {
+ u32 vport_rl;
+ u16 vport_wfq;
+ u16 first_tx_pq_id[NUM_OF_TCS];
+};
+
+/* Win 2 */
+#define GTT_BAR0_MAP_REG_IGU_CMD \
+ 0x00f000UL
+/* Win 3 */
+#define GTT_BAR0_MAP_REG_TSDM_RAM \
+ 0x010000UL
+/* Win 4 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM \
+ 0x011000UL
+/* Win 5 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \
+ 0x012000UL
+/* Win 6 */
+#define GTT_BAR0_MAP_REG_USDM_RAM \
+ 0x013000UL
+/* Win 7 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \
+ 0x014000UL
+/* Win 8 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \
+ 0x015000UL
+/* Win 9 */
+#define GTT_BAR0_MAP_REG_XSDM_RAM \
+ 0x016000UL
+/* Win 10 */
+#define GTT_BAR0_MAP_REG_YSDM_RAM \
+ 0x017000UL
+/* Win 11 */
+#define GTT_BAR0_MAP_REG_PSDM_RAM \
+ 0x018000UL
+
+/**
+ * @brief qed_qm_pf_mem_size - prepare QM ILT sizes
+ *
+ * Returns the required host memory size in 4KB units.
+ * Must be called before all QM init HSI functions.
+ *
+ * @param pf_id - physical function ID
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param num_pf_pqs - number of PQs used by this PF
+ * @param num_vf_pqs - number of PQs used by VFs of this PF
+ *
+ * @return The required host memory size in 4KB units.
+ */
+u32 qed_qm_pf_mem_size(u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs);
+
+struct qed_qm_common_rt_init_params {
+ u8 max_ports_per_engine;
+ u8 max_phys_tcs_per_port;
+ bool pf_rl_en;
+ bool pf_wfq_en;
+ bool vport_rl_en;
+ bool vport_wfq_en;
+ struct init_qm_port_params *port_params;
+};
+
+/**
+ * @brief qed_qm_common_rt_init - Prepare QM runtime init values for the
+ * engine phase.
+ *
+ * @param p_hwfn
+ * @param max_ports_per_engine - max number of ports per engine in HW
+ * @param max_phys_tcs_per_port - max number of physical TCs per port in HW
+ * @param pf_rl_en - enable per-PF rate limiters
+ * @param pf_wfq_en - enable per-PF WFQ
+ * @param vport_rl_en - enable per-VPORT rate limiters
+ * @param vport_wfq_en - enable per-VPORT WFQ
+ * @param port_params - array of size MAX_NUM_PORTS with
+ * arameters for each port
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_qm_common_rt_init(
+ struct qed_hwfn *p_hwfn,
+ struct qed_qm_common_rt_init_params *p_params);
+
+struct qed_qm_pf_rt_init_params {
+ u8 port_id;
+ u8 pf_id;
+ u8 max_phys_tcs_per_port;
+ bool is_first_pf;
+ u32 num_pf_cids;
+ u32 num_vf_cids;
+ u32 num_tids;
+ u16 start_pq;
+ u16 num_pf_pqs;
+ u16 num_vf_pqs;
+ u8 start_vport;
+ u8 num_vports;
+ u8 pf_wfq;
+ u32 pf_rl;
+ struct init_qm_pq_params *pq_params;
+ struct init_qm_vport_params *vport_params;
+};
+
+int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params);
+
+/**
+ * @brief qed_init_pf_rl Initializes the rate limit of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 pf_id,
+ u32 pf_rl);
+
+/**
+ * @brief qed_init_vport_rl Initializes the rate limit of the specified VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param vport_id - VPORT ID
+ * @param vport_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+
+int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 vport_id,
+ u32 vport_rl);
+/**
+ * @brief qed_send_qm_stop_cmd Sends a stop command to the QM
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param is_release_cmd - true for release, false for stop.
+ * @param is_tx_pq - true for Tx PQs, false for Other PQs.
+ * @param start_pq - first PQ ID to stop
+ * @param num_pqs - Number of PQs to stop, starting from start_pq.
+ *
+ * @return bool, true if successful, false if timeout occurred while waiting
+ * for QM command done.
+ */
+
+bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq,
+ u16 start_pq,
+ u16 num_pqs);
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + \
+ ((port_id) * \
+ IRO[1].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) (IRO[2].base + \
+ ((vf_id) * \
+ IRO[2].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[2].size)
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_OFFSET (IRO[3].base)
+#define USTORM_FLR_FINAL_ACK_SIZE (IRO[3].size)
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[4].base + \
+ ((pf_id) * \
+ IRO[4].m1))
+#define USTORM_EQE_CONS_SIZE (IRO[4].size)
+/* Ustorm Completion ring consumer */
+#define USTORM_CQ_CONS_OFFSET(global_queue_id) (IRO[5].base + \
+ ((global_queue_id) * \
+ IRO[5].m1))
+#define USTORM_CQ_CONS_SIZE (IRO[5].size)
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[6].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[6].size)
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size)
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+/* Tstorm producers */
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[12].base + \
+ ((core_rx_queue_id) * \
+ IRO[12].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[12].size)
+/* Tstorm LiteL2 queue statistics */
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[13].base + \
+ ((core_rx_q_id) * \
+ IRO[13].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[13].size)
+/* Ustorm LiteL2 queue statistics */
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[14].base + \
+ ((core_rx_q_id) * \
+ IRO[14].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[14].size)
+/* Pstorm LiteL2 queue statistics */
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_txst_id) (IRO[15].base + \
+ ((core_txst_id) * \
+ IRO[15].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[16].base + \
+ ((stat_counter_id) * \
+ IRO[16].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[16].size)
+/* Mstorm producers */
+#define MSTORM_PRODS_OFFSET(queue_id) (IRO[17].base + \
+ ((queue_id) * \
+ IRO[17].m1))
+#define MSTORM_PRODS_SIZE (IRO[17].size)
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[18].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[18].size)
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[19].base + \
+ ((stat_counter_id) * \
+ IRO[19].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[19].size)
+/* Ustorm queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[20].base + \
+ ((queue_id) * \
+ IRO[20].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[20].size)
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[21].base + \
+ ((stat_counter_id) * \
+ IRO[21].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[21].size)
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET(pf_id) (IRO[22].base + \
+ ((pf_id) * \
+ IRO[22].m1))
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[22].size)
+/* Ystorm queue zone */
+#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[23].base + \
+ ((queue_id) * \
+ IRO[23].m1))
+#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[23].size)
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[24].base + \
+ ((rss_id) * \
+ IRO[24].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[24].size)
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[25].base + \
+ ((rss_id) * \
+ IRO[25].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[25].size)
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[26].base + \
+ ((pf_id) * \
+ IRO[26].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[26].size)
+/* Tstorm cmdq-cons of given command queue-id */
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[27].base + \
+ ((cmdq_queue_id) * \
+ IRO[27].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[27].size)
+/* Mstorm rq-cons of given queue-id */
+#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) (IRO[28].base + \
+ ((rq_queue_id) * \
+ IRO[28].m1))
+#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[28].size)
+/* Pstorm RoCE statistics */
+#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[29].base + \
+ ((stat_counter_id) * \
+ IRO[29].m1))
+#define PSTORM_ROCE_STAT_SIZE (IRO[29].size)
+/* Tstorm RoCE statistics */
+#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[30].base + \
+ ((stat_counter_id) * \
+ IRO[30].m1))
+#define TSTORM_ROCE_STAT_SIZE (IRO[30].size)
+
+static const struct iro iro_arr[31] = {
+ { 0x10, 0x0, 0x0, 0x0, 0x8 },
+ { 0x4448, 0x60, 0x0, 0x0, 0x60 },
+ { 0x498, 0x8, 0x0, 0x0, 0x4 },
+ { 0x494, 0x0, 0x0, 0x0, 0x4 },
+ { 0x10, 0x8, 0x0, 0x0, 0x2 },
+ { 0x90, 0x8, 0x0, 0x0, 0x2 },
+ { 0x4540, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x39e0, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x2598, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x4350, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x52d0, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x7a48, 0x0, 0x0, 0x0, 0xf8 },
+ { 0x100, 0x8, 0x0, 0x0, 0x8 },
+ { 0x5808, 0x10, 0x0, 0x0, 0x10 },
+ { 0xb100, 0x30, 0x0, 0x0, 0x30 },
+ { 0x95c0, 0x30, 0x0, 0x0, 0x30 },
+ { 0x54f8, 0x40, 0x0, 0x0, 0x40 },
+ { 0x200, 0x10, 0x0, 0x0, 0x8 },
+ { 0x9e70, 0x0, 0x0, 0x0, 0x4 },
+ { 0x7ca0, 0x40, 0x0, 0x0, 0x30 },
+ { 0xd00, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2790, 0x80, 0x0, 0x0, 0x38 },
+ { 0xa520, 0xf0, 0x0, 0x0, 0xf0 },
+ { 0x80, 0x8, 0x0, 0x0, 0x8 },
+ { 0xac0, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2580, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2500, 0x8, 0x0, 0x0, 0x8 },
+ { 0x440, 0x8, 0x0, 0x0, 0x2 },
+ { 0x1800, 0x8, 0x0, 0x0, 0x2 },
+ { 0x27c8, 0x80, 0x0, 0x0, 0x10 },
+ { 0x4710, 0x10, 0x0, 0x0, 0x10 },
+};
+
+/* Runtime array offsets */
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 17
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 18
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 19
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 20
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 21
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 22
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 23
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1496
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2232
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6648
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6650
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6651
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6652
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6653
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6654
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6655
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6656
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6657
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6658
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6659
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6660
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6661
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6662
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6663
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6665
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6667
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6668
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6669
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6670
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6671
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6672
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6674
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6675
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6676
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6677
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6678
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6679
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6680
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6681
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6682
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6683
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6684
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6685
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6686
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6687
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6689
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6690
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6691
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6692
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6693
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6694
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6695
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6696
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6697
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6698
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6699
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6700
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6701
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28701
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28702
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28703
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28704
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28705
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28706
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28707
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28708
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28709
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28710
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28711
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29127
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29639
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29640
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29641
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29642
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29643
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29644
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29645
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29646
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29647
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29648
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29649
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29650
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29651
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29652
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29653
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29654
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29655
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29656
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29657
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29658
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29659
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29660
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29661
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29662
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29663
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29664
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29665
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29666
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29667
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29668
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29669
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29670
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29671
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29672
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29673
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29674
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29675
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29676
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29677
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29678
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29679
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29680
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29681
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29682
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29683
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29684
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29685
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29686
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29687
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29688
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29689
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29690
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29691
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29692
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29693
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29694
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29695
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29696
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29697
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29698
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29699
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29700
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29701
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29702
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29703
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29704
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29705
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29706
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29834
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29854
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29874
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29875
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29876
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29877
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29878
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29879
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29880
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29881
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29882
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29883
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29884
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29885
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29886
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29887
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29888
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29889
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29890
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29891
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29892
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29893
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29894
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29895
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29896
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29897
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29898
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29899
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29900
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29901
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29902
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29903
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29904
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29905
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29906
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29907
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29908
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29909
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29910
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29911
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29912
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29913
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29914
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29915
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29916
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29917
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29918
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29919
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29920
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29921
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29922
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29923
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29924
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29925
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29926
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29927
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29928
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29929
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29930
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29931
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29932
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29933
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29934
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29935
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29936
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29937
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29938
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29939
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29940
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29941
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29942
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29943
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29944
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29945
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29946
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29947
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29948
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29949
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29950
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29951
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29952
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29953
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29954
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29955
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29956
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29957
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29958
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29964
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29965
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29966
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29967
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29968
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29969
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29970
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29971
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29972
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29973
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29974
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29975
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29976
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29977
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29978
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29979
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29980
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29981
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29982
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29983
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29984
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29985
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29986
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29987
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29988
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29989
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29990
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29991
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29992
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29993
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30249
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30505
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30761
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30762
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30763
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30764
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30780
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30796
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30812
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30813
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30814
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30830
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30846
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31006
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31007
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31008
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31520
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32032
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32544
+#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 33056
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 33568
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34080
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_LLH_CLS_TYPE_DUALMODE_RT_OFFSET 34240
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34241
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34242
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34243
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34244
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34245
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34246
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34247
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34251
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34255
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34259
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34260
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34292
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34308
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34324
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34340
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34356
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34357
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34358
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34359
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34360
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34361
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34362
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34363
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34364
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34365
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34366
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34367
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34368
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34369
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34370
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34371
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34372
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34373
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34374
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34375
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34376
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34377
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34378
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34379
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34380
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34381
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34382
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34383
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34384
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34385
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34386
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34387
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34388
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34389
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34390
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34391
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34392
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34393
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34394
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34395
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34396
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34397
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34398
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34399
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34400
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34401
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34402
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34403
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34404
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34405
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34406
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34407
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34408
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34409
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34410
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34411
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34412
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34413
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34414
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34415
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34416
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34417
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34418
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34419
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34420
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34421
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34422
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34423
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34424
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34425
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34426
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34427
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34428
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34429
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34430
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34431
+
+#define RUNTIME_ARRAY_SIZE 34432
+
+/* The eth storm context for the Ystorm */
+struct ystorm_eth_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The eth storm context for the Pstorm */
+struct pstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/* The eth storm context for the Xstorm */
+struct xstorm_eth_conn_st_ctx {
+ __le32 reserved[60];
+};
+
+struct xstorm_eth_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 /* bit4 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 /* cf16en */
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 /* cf23en */
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 /* bit16 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 /* bit17 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 /* bit18 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 /* bit19 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 /* bit20 */
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 /* bit21 */
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 word1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 go_to_bd_cons /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* cf_array0 */;
+ __le32 reg6 /* cf_array1 */;
+ __le16 word7 /* word7 */;
+ __le16 word8 /* word8 */;
+ __le16 word9 /* word9 */;
+ __le16 word10 /* word10 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ __le32 reg9 /* reg9 */;
+ u8 byte7 /* byte7 */;
+ u8 byte8 /* byte8 */;
+ u8 byte9 /* byte9 */;
+ u8 byte10 /* byte10 */;
+ u8 byte11 /* byte11 */;
+ u8 byte12 /* byte12 */;
+ u8 byte13 /* byte13 */;
+ u8 byte14 /* byte14 */;
+ u8 byte15 /* byte15 */;
+ u8 byte16 /* byte16 */;
+ __le16 word11 /* word11 */;
+ __le32 reg10 /* reg10 */;
+ __le32 reg11 /* reg11 */;
+ __le32 reg12 /* reg12 */;
+ __le32 reg13 /* reg13 */;
+ __le32 reg14 /* reg14 */;
+ __le32 reg15 /* reg15 */;
+ __le32 reg16 /* reg16 */;
+ __le32 reg17 /* reg17 */;
+ __le32 reg18 /* reg18 */;
+ __le32 reg19 /* reg19 */;
+ __le16 word12 /* word12 */;
+ __le16 word13 /* word13 */;
+ __le16 word14 /* word14 */;
+ __le16 word15 /* word15 */;
+};
+
+/* The eth storm context for the Tstorm */
+struct tstorm_eth_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The eth storm context for the Mstorm */
+struct mstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/* The eth storm context for the Ustorm */
+struct ustorm_eth_conn_st_ctx {
+ __le32 reserved[40];
+};
+
+/* eth connection context */
+struct eth_conn_context {
+ struct ystorm_eth_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2] /* padding */;
+ struct pstorm_eth_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2] /* padding */;
+ struct xstorm_eth_conn_st_ctx xstorm_st_context;
+ struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_eth_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2] /* padding */;
+ struct mstorm_eth_conn_st_ctx mstorm_st_context;
+ struct ustorm_eth_conn_st_ctx ustorm_st_context;
+};
+
+enum eth_filter_action {
+ ETH_FILTER_ACTION_REMOVE,
+ ETH_FILTER_ACTION_ADD,
+ ETH_FILTER_ACTION_REPLACE,
+ MAX_ETH_FILTER_ACTION
+};
+
+struct eth_filter_cmd {
+ u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */;
+ u8 vport_id /* the vport id */;
+ u8 action /* filter command action: add/remove/replace */;
+ u8 reserved0;
+ __le32 vni;
+ __le16 mac_lsb;
+ __le16 mac_mid;
+ __le16 mac_msb;
+ __le16 vlan_id;
+};
+
+struct eth_filter_cmd_header {
+ u8 rx;
+ u8 tx;
+ u8 cmd_cnt;
+ u8 assert_on_error;
+ u8 reserved1[4];
+};
+
+enum eth_filter_type {
+ ETH_FILTER_TYPE_MAC,
+ ETH_FILTER_TYPE_VLAN,
+ ETH_FILTER_TYPE_PAIR,
+ ETH_FILTER_TYPE_INNER_MAC,
+ ETH_FILTER_TYPE_INNER_VLAN,
+ ETH_FILTER_TYPE_INNER_PAIR,
+ ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR,
+ ETH_FILTER_TYPE_MAC_VNI_PAIR,
+ ETH_FILTER_TYPE_VNI,
+ MAX_ETH_FILTER_TYPE
+};
+
+enum eth_ramrod_cmd_id {
+ ETH_RAMROD_UNUSED,
+ ETH_RAMROD_VPORT_START /* VPort Start Ramrod */,
+ ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */,
+ ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */,
+ ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
+ ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
+ ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
+ ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+ ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */,
+ ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */,
+ ETH_RAMROD_RESERVED,
+ ETH_RAMROD_RESERVED2,
+ ETH_RAMROD_RESERVED3,
+ ETH_RAMROD_RESERVED4,
+ ETH_RAMROD_RESERVED5,
+ ETH_RAMROD_RESERVED6,
+ ETH_RAMROD_RESERVED7,
+ ETH_RAMROD_RESERVED8,
+ MAX_ETH_RAMROD_CMD_ID
+};
+
+struct eth_vport_rss_config {
+ __le16 capabilities;
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_SHIFT 7
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_SHIFT 8
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x7F
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 9
+ u8 rss_id;
+ u8 rss_mode;
+ u8 update_rss_key;
+ u8 update_rss_ind_table;
+ u8 update_rss_capabilities;
+ u8 tbl_size;
+ __le32 reserved2[2];
+ __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+ __le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
+ __le32 reserved3[2];
+};
+
+enum eth_vport_rss_mode {
+ ETH_VPORT_RSS_MODE_DISABLED,
+ ETH_VPORT_RSS_MODE_REGULAR,
+ MAX_ETH_VPORT_RSS_MODE
+};
+
+struct eth_vport_rx_mode {
+ __le16 state;
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
+ __le16 reserved2[3];
+};
+
+struct eth_vport_tpa_param {
+ u64 reserved[2];
+};
+
+struct eth_vport_tx_mode {
+ __le16 state;
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
+#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
+ __le16 reserved2[3];
+};
+
+struct rx_queue_start_ramrod_data {
+ __le16 rx_queue_id;
+ __le16 num_of_pbl_pages;
+ __le16 bd_max_bytes;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 vport_id;
+ u8 default_rss_queue_flg;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 stats_counter_id;
+ u8 pin_context;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ u8 pxp_st_hint;
+ __le16 pxp_st_index;
+ u8 reserved[4];
+ struct regpair cqe_pbl_addr;
+ struct regpair bd_base;
+ struct regpair sge_base;
+};
+
+struct rx_queue_stop_ramrod_data {
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 vport_id;
+ u8 reserved[3];
+};
+
+struct rx_queue_update_ramrod_data {
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 init_sge_ring_flg;
+ u8 vport_id;
+ u8 pxp_tph_valid_sge;
+ u8 pxp_st_hint;
+ __le16 pxp_st_index;
+ u8 reserved[6];
+ struct regpair sge_base;
+};
+
+struct tx_queue_start_ramrod_data {
+ __le16 sb_id;
+ u8 sb_index;
+ u8 vport_id;
+ u8 tc;
+ u8 stats_counter_id;
+ __le16 qm_pq_id;
+ u8 flags;
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_MASK 0x1F
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_SHIFT 3
+ u8 pin_context;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ __le16 pxp_st_index;
+ u8 pxp_st_hint;
+ u8 reserved1[3];
+ __le16 queue_zone_id;
+ __le16 test_dup_count;
+ __le16 pbl_size;
+ struct regpair pbl_base_addr;
+};
+
+struct tx_queue_stop_ramrod_data {
+ __le16 reserved[4];
+};
+
+struct vport_filter_update_ramrod_data {
+ struct eth_filter_cmd_header filter_cmd_hdr;
+ struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
+};
+
+struct vport_start_ramrod_data {
+ u8 vport_id;
+ u8 sw_fid;
+ __le16 mtu;
+ u8 drop_ttl0_en;
+ u8 inner_vlan_removal_en;
+ struct eth_vport_rx_mode rx_mode;
+ struct eth_vport_tx_mode tx_mode;
+ struct eth_vport_tpa_param tpa_param;
+ __le16 sge_buff_size;
+ u8 max_sges_num;
+ u8 tx_switching_en;
+ u8 anti_spoofing_en;
+ u8 default_vlan_en;
+ u8 handle_ptp_pkts;
+ u8 silent_vlan_removal_en;
+ __le16 default_vlan;
+ u8 untagged;
+ u8 reserved[7];
+};
+
+struct vport_stop_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
+struct vport_update_ramrod_data_cmn {
+ u8 vport_id;
+ u8 update_rx_active_flg;
+ u8 rx_active_flg;
+ u8 update_tx_active_flg;
+ u8 tx_active_flg;
+ u8 update_rx_mode_flg;
+ u8 update_tx_mode_flg;
+ u8 update_approx_mcast_flg;
+ u8 update_rss_flg;
+ u8 update_inner_vlan_removal_en_flg;
+ u8 inner_vlan_removal_en;
+ u8 update_tpa_param_flg;
+ u8 update_tpa_en_flg;
+ u8 update_sge_param_flg;
+ __le16 sge_buff_size;
+ u8 max_sges_num;
+ u8 update_tx_switching_en_flg;
+ u8 tx_switching_en;
+ u8 update_anti_spoofing_en_flg;
+ u8 anti_spoofing_en;
+ u8 update_handle_ptp_pkts;
+ u8 handle_ptp_pkts;
+ u8 update_default_vlan_en_flg;
+ u8 default_vlan_en;
+ u8 update_default_vlan_flg;
+ __le16 default_vlan;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+ u8 silent_vlan_removal_en;
+ u8 reserved;
+};
+
+struct vport_update_ramrod_mcast {
+ __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+};
+
+struct vport_update_ramrod_data {
+ struct vport_update_ramrod_data_cmn common;
+ struct eth_vport_rx_mode rx_mode;
+ struct eth_vport_tx_mode tx_mode;
+ struct eth_vport_tpa_param tpa_param;
+ struct vport_update_ramrod_mcast approx_mcast;
+ struct eth_vport_rss_config rss_config;
+};
+
+struct mstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+struct tstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 rx_bd_cons /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 rx_bd_prod /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define USTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define USTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+ u8 flags2;
+#define USTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define USTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define USTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 tx_bd_cons /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le16 tx_drv_bd_cons /* word2 */;
+ __le16 rx_drv_cqe_cons /* word3 */;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 word1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 go_to_bd_cons /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+};
+
+#define VF_MAX_STATIC 192 /* In case of K2 */
+
+#define MCP_GLOB_PATH_MAX 2
+#define MCP_PORT_MAX 2 /* Global */
+#define MCP_GLOB_PORT_MAX 4 /* Global */
+#define MCP_GLOB_FUNC_MAX 16 /* Global */
+
+typedef u32 offsize_t; /* In DWORDS !!! */
+/* Offset from the beginning of the MCP scratchpad */
+#define OFFSIZE_OFFSET_SHIFT 0
+#define OFFSIZE_OFFSET_MASK 0x0000ffff
+/* Size of specific element (not the whole array if any) */
+#define OFFSIZE_SIZE_SHIFT 16
+#define OFFSIZE_SIZE_MASK 0xffff0000
+
+/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
+#define SECTION_OFFSET(_offsize) ((((_offsize & \
+ OFFSIZE_OFFSET_MASK) >> \
+ OFFSIZE_OFFSET_SHIFT) << 2))
+
+/* QED_SECTION_SIZE is calculating the size in bytes out of offsize */
+#define QED_SECTION_SIZE(_offsize) (((_offsize & \
+ OFFSIZE_SIZE_MASK) >> \
+ OFFSIZE_SIZE_SHIFT) << 2)
+
+/* SECTION_ADDR returns the GRC addr of a section, given offsize and index
+ * within section.
+ */
+#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
+ SECTION_OFFSET(_offsize) + \
+ (QED_SECTION_SIZE(_offsize) * idx))
+
+/* SECTION_OFFSIZE_ADDR returns the GRC addr to the offsize address.
+ * Use offsetof, since the OFFSETUP collide with the firmware definition
+ */
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) (_pub_base + \
+ offsetof(struct \
+ mcp_public_data, \
+ sections[_section]))
+/* PHY configuration */
+struct pmm_phy_cfg {
+ u32 speed;
+#define PMM_SPEED_AUTONEG 0
+
+ u32 pause; /* bitmask */
+#define PMM_PAUSE_NONE 0x0
+#define PMM_PAUSE_AUTONEG 0x1
+#define PMM_PAUSE_RX 0x2
+#define PMM_PAUSE_TX 0x4
+
+ u32 adv_speed; /* Default should be the speed_cap_mask */
+ u32 loopback_mode;
+#define PMM_LOOPBACK_NONE 0
+#define PMM_LOOPBACK_INT_PHY 1
+#define PMM_LOOPBACK_EXT_PHY 2
+#define PMM_LOOPBACK_EXT 3
+#define PMM_LOOPBACK_MAC 4
+
+ /* features */
+ u32 feature_config_flags;
+};
+
+struct port_mf_cfg {
+ u32 dynamic_cfg; /* device control channel */
+#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT 0
+#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
+
+ u32 reserved[1];
+};
+
+/* DO NOT add new fields in the middle
+ * MUST be synced with struct pmm_stats_map
+ */
+struct pmm_stats {
+ u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/
+ u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/
+ u64 r255;
+ u64 r511;
+ u64 r1023;
+ u64 r1518;
+ u64 r1522;
+ u64 r2047;
+ u64 r4095;
+ u64 r9216;
+ u64 r16383;
+ u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/
+ u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/
+ u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/
+ u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter*/
+ u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter*/
+ u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */
+ u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter*/
+ u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */
+ u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */
+ u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */
+ u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */
+ u64 t127;
+ u64 t255;
+ u64 t511;
+ u64 t1023;
+ u64 t1518;
+ u64 t2047;
+ u64 t4095;
+ u64 t9216;
+ u64 t16383;
+ u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */
+ u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */
+ u64 tlpiec;
+ u64 tncl;
+ u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */
+ u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */
+ u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */
+ u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */
+ u64 rxpok;
+ u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */
+ u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */
+ u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */
+ u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */
+ u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */
+};
+
+struct brb_stats {
+ u64 brb_truncate[8];
+ u64 brb_discard[8];
+};
+
+struct port_stats {
+ struct brb_stats brb;
+ struct pmm_stats pmm;
+};
+
+#define CMT_TEAM0 0
+#define CMT_TEAM1 1
+#define CMT_TEAM_MAX 2
+
+struct couple_mode_teaming {
+ u8 port_cmt[MCP_GLOB_PORT_MAX];
+#define PORT_CMT_IN_TEAM BIT(0)
+
+#define PORT_CMT_PORT_ROLE BIT(1)
+#define PORT_CMT_PORT_INACTIVE (0 << 1)
+#define PORT_CMT_PORT_ACTIVE BIT(1)
+
+#define PORT_CMT_TEAM_MASK BIT(2)
+#define PORT_CMT_TEAM0 (0 << 2)
+#define PORT_CMT_TEAM1 BIT(2)
+};
+
+/**************************************
+* LLDP and DCBX HSI structures
+**************************************/
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
+#define DCBX_MAX_APP_PROTOCOL 32
+#define MAX_SYSTEM_LLDP_TLV_DATA 32
+
+enum lldp_agent_e {
+ LLDP_NEAREST_BRIDGE = 0,
+ LLDP_NEAREST_NON_TPMR_BRIDGE,
+ LLDP_NEAREST_CUSTOMER_BRIDGE,
+ LLDP_MAX_LLDP_AGENTS
+};
+
+struct lldp_config_params_s {
+ u32 config;
+#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
+#define LLDP_CONFIG_HOLD_MASK 0x00000f00
+#define LLDP_CONFIG_HOLD_SHIFT 8
+#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
+#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
+#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
+#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
+#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
+ u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+};
+
+struct lldp_status_params_s {
+ u32 prefix_seq_num;
+ u32 status; /* TBD */
+
+ /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
+ u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+
+ /* Holds remote Port ID TLV header, subtype and 9B of payload. */
+ u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+ u32 suffix_seq_num;
+};
+
+struct dcbx_ets_feature {
+ u32 flags;
+#define DCBX_ETS_ENABLED_MASK 0x00000001
+#define DCBX_ETS_ENABLED_SHIFT 0
+#define DCBX_ETS_WILLING_MASK 0x00000002
+#define DCBX_ETS_WILLING_SHIFT 1
+#define DCBX_ETS_ERROR_MASK 0x00000004
+#define DCBX_ETS_ERROR_SHIFT 2
+#define DCBX_ETS_CBS_MASK 0x00000008
+#define DCBX_ETS_CBS_SHIFT 3
+#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
+#define DCBX_ETS_MAX_TCS_SHIFT 4
+ u32 pri_tc_tbl[1];
+#define DCBX_ISCSI_OOO_TC 4
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
+ u32 tc_bw_tbl[2];
+ u32 tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT 0
+#define DCBX_ETS_TSA_CBS 1
+#define DCBX_ETS_TSA_ETS 2
+};
+
+struct dcbx_app_priority_entry {
+ u32 entry;
+#define DCBX_APP_PRI_MAP_MASK 0x000000ff
+#define DCBX_APP_PRI_MAP_SHIFT 0
+#define DCBX_APP_PRI_0 0x01
+#define DCBX_APP_PRI_1 0x02
+#define DCBX_APP_PRI_2 0x04
+#define DCBX_APP_PRI_3 0x08
+#define DCBX_APP_PRI_4 0x10
+#define DCBX_APP_PRI_5 0x20
+#define DCBX_APP_PRI_6 0x40
+#define DCBX_APP_PRI_7 0x80
+#define DCBX_APP_SF_MASK 0x00000300
+#define DCBX_APP_SF_SHIFT 8
+#define DCBX_APP_SF_ETHTYPE 0
+#define DCBX_APP_SF_PORT 1
+#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
+#define DCBX_APP_PROTOCOL_ID_SHIFT 16
+};
+
+/* FW structure in BE */
+struct dcbx_app_priority_feature {
+ u32 flags;
+#define DCBX_APP_ENABLED_MASK 0x00000001
+#define DCBX_APP_ENABLED_SHIFT 0
+#define DCBX_APP_WILLING_MASK 0x00000002
+#define DCBX_APP_WILLING_SHIFT 1
+#define DCBX_APP_ERROR_MASK 0x00000004
+#define DCBX_APP_ERROR_SHIFT 2
+/* Not in use
+ * #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00
+ * #define DCBX_APP_DEFAULT_PRI_SHIFT 8
+ */
+#define DCBX_APP_MAX_TCS_MASK 0x0000f000
+#define DCBX_APP_MAX_TCS_SHIFT 12
+#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_SHIFT 16
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+/* FW structure in BE */
+struct dcbx_features {
+ /* PG feature */
+ struct dcbx_ets_feature ets;
+
+ /* PFC feature */
+ u32 pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
+
+#define DCBX_PFC_FLAGS_MASK 0x0000ff00
+#define DCBX_PFC_FLAGS_SHIFT 8
+#define DCBX_PFC_CAPS_MASK 0x00000f00
+#define DCBX_PFC_CAPS_SHIFT 8
+#define DCBX_PFC_MBC_MASK 0x00004000
+#define DCBX_PFC_MBC_SHIFT 14
+#define DCBX_PFC_WILLING_MASK 0x00008000
+#define DCBX_PFC_WILLING_SHIFT 15
+#define DCBX_PFC_ENABLED_MASK 0x00010000
+#define DCBX_PFC_ENABLED_SHIFT 16
+#define DCBX_PFC_ERROR_MASK 0x00020000
+#define DCBX_PFC_ERROR_SHIFT 17
+
+ /* APP feature */
+ struct dcbx_app_priority_feature app;
+};
+
+struct dcbx_local_params {
+ u32 config;
+#define DCBX_CONFIG_VERSION_MASK 0x00000003
+#define DCBX_CONFIG_VERSION_SHIFT 0
+#define DCBX_CONFIG_VERSION_DISABLED 0
+#define DCBX_CONFIG_VERSION_IEEE 1
+#define DCBX_CONFIG_VERSION_CEE 2
+
+ u32 flags;
+ struct dcbx_features features;
+};
+
+struct dcbx_mib {
+ u32 prefix_seq_num;
+ u32 flags;
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+
+struct lldp_system_tlvs_buffer_s {
+ u16 valid;
+ u16 length;
+ u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
+};
+
+/**************************************/
+/* */
+/* P U B L I C G L O B A L */
+/* */
+/**************************************/
+struct public_global {
+ u32 max_path;
+#define MAX_PATH_BIG_BEAR 2
+#define MAX_PATH_K2 1
+ u32 max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
+ u32 debug_mb_offset;
+ u32 phymod_dbg_mb_offset;
+ struct couple_mode_teaming cmt;
+ s32 internal_temperature;
+ u32 mfw_ver;
+ u32 running_bundle_id;
+};
+
+/**************************************/
+/* */
+/* P U B L I C P A T H */
+/* */
+/**************************************/
+
+/****************************************************************************
+* Shared Memory 2 Region *
+****************************************************************************/
+/* The fw_flr_ack is actually built in the following way: */
+/* 8 bit: PF ack */
+/* 128 bit: VF ack */
+/* 8 bit: ios_dis_ack */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* u32. The fw must have the VF right after the PF since this is how it */
+/* access arrays(it expects always the VF to reside after the PF, and that */
+/* makes the calculation much easier for it. ) */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition */
+/* above */
+/****************************************************************************/
+struct fw_flr_mb {
+ u32 aggint;
+ u32 opgen_addr;
+ u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */
+#define ACCUM_ACK_PF_BASE 0
+#define ACCUM_ACK_PF_SHIFT 0
+
+#define ACCUM_ACK_VF_BASE 8
+#define ACCUM_ACK_VF_SHIFT 3
+
+#define ACCUM_ACK_IOV_DIS_BASE 256
+#define ACCUM_ACK_IOV_DIS_SHIFT 8
+};
+
+struct public_path {
+ struct fw_flr_mb flr_mb;
+ u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
+
+ u32 process_kill;
+#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT 0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
+#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
+};
+
+/**************************************/
+/* */
+/* P U B L I C P O R T */
+/* */
+/**************************************/
+
+/****************************************************************************
+* Driver <-> FW Mailbox *
+****************************************************************************/
+
+struct public_port {
+ u32 validity_map; /* 0x0 (4*2 = 0x8) */
+
+ /* validity bits */
+#define MCP_VALIDITY_PCI_CFG 0x00100000
+#define MCP_VALIDITY_MB 0x00200000
+#define MCP_VALIDITY_DEV_INFO 0x00400000
+#define MCP_VALIDITY_RESERVED 0x00000007
+
+ /* One licensing bit should be set */
+#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
+#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
+#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
+#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
+
+ /* Active MFW */
+#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
+#define MCP_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
+#define MCP_VALIDITY_ACTIVE_MFW_NCSI 0x00000040
+#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
+
+ u32 link_status;
+#define LINK_STATUS_LINK_UP \
+ 0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
+
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+
+#define LINK_STATUS_PFC_ENABLED \
+ 0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
+
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
+
+#define LINK_STATUS_SFP_TX_FAULT \
+ 0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
+
+ u32 link_status1;
+ u32 ext_phy_fw_version;
+ u32 drv_phy_cfg_addr;
+
+ u32 port_stx;
+
+ u32 stat_nig_timer;
+
+ struct port_mf_cfg port_mf_config;
+ struct port_stats stats;
+
+ u32 media_type;
+#define MEDIA_UNSPECIFIED 0x0
+#define MEDIA_SFPP_10G_FIBER 0x1
+#define MEDIA_XFP_FIBER 0x2
+#define MEDIA_DA_TWINAX 0x3
+#define MEDIA_BASE_T 0x4
+#define MEDIA_SFP_1G_FIBER 0x5
+#define MEDIA_KR 0xf0
+#define MEDIA_NOT_PRESENT 0xff
+
+ u32 lfa_status;
+#define LFA_LINK_FLAP_REASON_OFFSET 0
+#define LFA_LINK_FLAP_REASON_MASK 0x000000ff
+#define LFA_NO_REASON (0 << 0)
+#define LFA_LINK_DOWN BIT(0)
+#define LFA_FORCE_INIT BIT(1)
+#define LFA_LOOPBACK_MISMATCH BIT(2)
+#define LFA_SPEED_MISMATCH BIT(3)
+#define LFA_FLOW_CTRL_MISMATCH BIT(4)
+#define LFA_ADV_SPEED_MISMATCH BIT(5)
+#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
+#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
+#define LINK_FLAP_COUNT_OFFSET 16
+#define LINK_FLAP_COUNT_MASK 0x00ff0000
+
+ u32 link_change_count;
+
+ /* LLDP params */
+ struct lldp_config_params_s lldp_config_params[
+ LLDP_MAX_LLDP_AGENTS];
+ struct lldp_status_params_s lldp_status_params[
+ LLDP_MAX_LLDP_AGENTS];
+ struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
+
+ /* DCBX related MIB */
+ struct dcbx_local_params local_admin_dcbx_mib;
+ struct dcbx_mib remote_dcbx_mib;
+ struct dcbx_mib operational_dcbx_mib;
+};
+
+/**************************************/
+/* */
+/* P U B L I C F U N C */
+/* */
+/**************************************/
+
+struct public_func {
+ u32 iscsi_boot_signature;
+ u32 iscsi_boot_block_offset;
+
+ u32 reserved[8];
+
+ u32 config;
+
+ /* E/R/I/D */
+ /* function 0 of each port cannot be hidden */
+#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
+
+#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
+#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
+#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
+#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
+
+ /* MINBW, MAXBW */
+ /* value range - 0..100, increments in 1 % */
+#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT 8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT 16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
+
+ u32 status;
+#define FUNC_STATUS_VLINK_DOWN 0x00000001
+
+ u32 mac_upper; /* MAC */
+#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ u32 mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
+
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 ovlan_stag; /* tags */
+#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT 0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
+
+ u32 pf_allocation; /* vf per pf */
+
+ u32 preserve_data; /* Will be used bt CCM */
+
+ u32 driver_last_activity_ts;
+
+ u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */
+
+ u32 drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT 0
+
+#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT 16
+#define DRV_ID_MCP_HSI_VER_CURRENT BIT(DRV_ID_MCP_HSI_VER_SHIFT)
+
+#define DRV_ID_DRV_TYPE_MASK 0xff000000
+#define DRV_ID_DRV_TYPE_SHIFT 24
+#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT)
+};
+
+/**************************************/
+/* */
+/* P U B L I C M B */
+/* */
+/**************************************/
+/* This is the only section that the driver can write to, and each */
+/* Basically each driver request to set feature parameters,
+ * will be done using a different command, which will be linked
+ * to a specific data structure from the union below.
+ * For huge strucuture, the common blank structure should be used.
+ */
+
+struct mcp_mac {
+ u32 mac_upper; /* Upper 16 bits are always zeroes */
+ u32 mac_lower;
+};
+
+struct mcp_val64 {
+ u32 lo;
+ u32 hi;
+};
+
+struct mcp_file_att {
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+#define MCP_DRV_VER_STR_SIZE 16
+#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
+#define MCP_DRV_NVM_BUF_LEN 32
+struct drv_version_stc {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+union drv_union_data {
+ u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
+ struct mcp_mac wol_mac;
+
+ struct pmm_phy_cfg drv_phy_cfg;
+
+ struct mcp_val64 val64; /* For PHY / AVS commands */
+
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+
+ struct mcp_file_att file_att;
+
+ u32 ack_vf_disabled[VF_MAX_STATIC / 32];
+
+ struct drv_version_stc drv_version;
+};
+
+struct public_drv_mb {
+ u32 drv_mb_header;
+#define DRV_MSG_CODE_MASK 0xffff0000
+#define DRV_MSG_CODE_LOAD_REQ 0x10000000
+#define DRV_MSG_CODE_LOAD_DONE 0x11000000
+#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
+#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
+#define DRV_MSG_CODE_INIT_PHY 0x22000000
+ /* Params - FORCE - Reinitialize the link regardless of LFA */
+ /* - DONT_CARE - Don't flap the link if up */
+#define DRV_MSG_CODE_LINK_RESET 0x23000000
+
+#define DRV_MSG_CODE_SET_LLDP 0x24000000
+#define DRV_MSG_CODE_SET_DCBX 0x25000000
+
+#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
+
+#define DRV_MSG_CODE_INITIATE_FLR 0x02000000
+#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
+#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
+#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
+#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
+#define DRV_MSG_CODE_NVM_DEL_FILE 0x00080000
+#define DRV_MSG_CODE_MCP_RESET 0x00090000
+#define DRV_MSG_CODE_SET_SECURE_MODE 0x000a0000
+#define DRV_MSG_CODE_PHY_RAW_READ 0x000b0000
+#define DRV_MSG_CODE_PHY_RAW_WRITE 0x000c0000
+#define DRV_MSG_CODE_PHY_CORE_READ 0x000d0000
+#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000
+#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+
+#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 drv_mb_param;
+
+ /* UNLOAD_REQ params */
+#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
+#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
+
+ /* UNLOAD_DONE_params */
+#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001
+
+ /* INIT_PHY params */
+#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001
+#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
+
+ /* LLDP / DCBX params*/
+#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
+#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
+#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
+#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
+
+#define DRV_MB_PARAM_NVM_OFFSET_SHIFT 0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
+#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
+
+#define DRV_MB_PARAM_PHY_ADDR_SHIFT 0
+#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF
+#define DRV_MB_PARAM_PHY_LANE_SHIFT 16
+#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000
+#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT 29
+#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000
+#define DRV_MB_PARAM_PHY_PORT_SHIFT 30
+#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000
+
+/* configure vf MSIX params*/
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
+
+ u32 fw_mb_header;
+#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
+#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
+#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
+#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
+#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
+#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
+#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
+#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
+#define FW_MSG_CODE_INIT_PHY_DONE 0x21200000
+#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS 0x21300000
+#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000
+#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000
+#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000
+#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000
+#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000
+#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
+#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
+#define FW_MSG_CODE_FLR_ACK 0x02000000
+#define FW_MSG_CODE_FLR_NACK 0x02100000
+
+#define FW_MSG_CODE_NVM_OK 0x00010000
+#define FW_MSG_CODE_NVM_INVALID_MODE 0x00020000
+#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED 0x00030000
+#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000
+#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND 0x00050000
+#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND 0x00060000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000
+#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC 0x00090000
+#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR 0x000a0000
+#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE 0x000b0000
+#define FW_MSG_CODE_NVM_FILE_NOT_FOUND 0x000c0000
+#define FW_MSG_CODE_NVM_OPERATION_FAILED 0x000d0000
+#define FW_MSG_CODE_NVM_FAILED_UNALIGNED 0x000e0000
+#define FW_MSG_CODE_NVM_BAD_OFFSET 0x000f0000
+#define FW_MSG_CODE_NVM_BAD_SIGNATURE 0x00100000
+#define FW_MSG_CODE_NVM_FILE_READ_ONLY 0x00200000
+#define FW_MSG_CODE_NVM_UNKNOWN_FILE 0x00300000
+#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
+#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000
+#define FW_MSG_CODE_PHY_OK 0x00110000
+#define FW_MSG_CODE_PHY_ERROR 0x00120000
+#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000
+#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000
+#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
+
+#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 fw_mb_param;
+
+ u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK 0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+ u32 mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK 0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+#define MCP_EVENT_MASK 0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+
+ union drv_union_data union_data;
+};
+
+/* MFW - DRV MB */
+/**********************************************************************
+* Description
+* Incremental Aggregative
+* 8-bit MFW counter per message
+* 8-bit ack-counter per message
+* Capabilities
+* Provides up to 256 aggregative message per type
+* Provides 4 message types in dword
+* Message type pointers to byte offset
+* Backward Compatibility by using sizeof for the counters.
+* No lock requires for 32bit messages
+* Limitations:
+* In case of messages greater than 32bit, a dedicated mechanism(e.g lock)
+* is required to prevent data corruption.
+**********************************************************************/
+enum MFW_DRV_MSG_TYPE {
+ MFW_DRV_MSG_LINK_CHANGE,
+ MFW_DRV_MSG_FLR_FW_ACK_FAILED,
+ MFW_DRV_MSG_VF_DISABLED,
+ MFW_DRV_MSG_LLDP_DATA_UPDATED,
+ MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
+ MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
+ MFW_DRV_MSG_ERROR_RECOVERY,
+ MFW_DRV_MSG_MAX
+};
+
+#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+
+struct public_mfw_mb {
+ u32 sup_msgs;
+ u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+ u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+};
+
+/**************************************/
+/* */
+/* P U B L I C D A T A */
+/* */
+/**************************************/
+enum public_sections {
+ PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */
+ PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */
+ PUBLIC_GLOBAL,
+ PUBLIC_PATH,
+ PUBLIC_PORT,
+ PUBLIC_FUNC,
+ PUBLIC_MAX_SECTIONS
+};
+
+struct drv_ver_info_stc {
+ u32 ver;
+ u8 name[32];
+};
+
+struct mcp_public_data {
+ /* The sections fields is an array */
+ u32 num_sections;
+ offsize_t sections[PUBLIC_MAX_SECTIONS];
+ struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
+ struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
+ struct public_global global;
+ struct public_path path[MCP_GLOB_PATH_MAX];
+ struct public_port port[MCP_GLOB_PORT_MAX];
+ struct public_func func[MCP_GLOB_FUNC_MAX];
+ struct drv_ver_info_stc drv_info;
+};
+
+struct nvm_cfg_mac_address {
+ u32 mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
+
+ u32 mac_addr_lo;
+};
+
+/******************************************
+* nvm_cfg1 structs
+******************************************/
+
+struct nvm_cfg1_glob {
+ u32 generic_cont0; /* 0x0 */
+#define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F
+#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0
+#define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0
+#define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1
+#define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2
+#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3
+#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
+#define NVM_CFG1_GLOB_MF_MODE_FORCED_SF 0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1
+#define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000
+#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30
+#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0
+#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1
+
+ u32 engineering_change[3]; /* 0x4 */
+
+ u32 manufacturing_id; /* 0x10 */
+
+ u32 serial_number[4]; /* 0x14 */
+
+ u32 pcie_cfg; /* 0x24 */
+#define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003
+#define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_DISABLED 0x1
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_DISABLED 0x3
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_MASK 0x00000020
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_OFFSET 5
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_DISABLED 0x0
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_ENABLED 0x1
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29
+
+ u32 mgmt_traffic; /* 0x28 */
+#define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001
+#define NVM_CFG1_GLOB_RESERVED60_OFFSET 0
+#define NVM_CFG1_GLOB_RESERVED60_100KHZ 0x0
+#define NVM_CFG1_GLOB_RESERVED60_400KHZ 0x1
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2
+
+ u32 core_cfg; /* 0x2C */
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G 0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G 0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G 0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F 0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E 0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G 0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G 0xB
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G 0xC
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G 0xD
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK 0x00000100
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET 8
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED 0x0
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED 0x1
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK 0x00000200
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET 9
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED 0x0
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED 0x1
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK 0x0003FC00
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET 10
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK 0x03FC0000
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET 18
+#define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000
+#define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26
+#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0
+#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP 0x1
+#define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1
+
+ u32 e_lane_cfg1; /* 0x30 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
+
+ u32 e_lane_cfg2; /* 0x34 */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
+#define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00
+#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8
+#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0
+#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1
+#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2
+#define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000
+#define NVM_CFG1_GLOB_NCSI_OFFSET 12
+#define NVM_CFG1_GLOB_NCSI_DISABLED 0x0
+#define NVM_CFG1_GLOB_NCSI_ENABLED 0x1
+
+ u32 f_lane_cfg1; /* 0x38 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
+
+ u32 f_lane_cfg2; /* 0x3C */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
+
+ u32 eagle_preemphasis; /* 0x40 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
+
+ u32 eagle_driver_current; /* 0x44 */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
+
+ u32 falcon_preemphasis; /* 0x48 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
+
+ u32 falcon_driver_current; /* 0x4C */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
+
+ u32 pci_id; /* 0x50 */
+#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
+#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
+
+ u32 pci_subsys_id; /* 0x54 */
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16
+
+ u32 bar; /* 0x58 */
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF
+#define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00
+#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8
+#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0
+#define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1
+#define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2
+#define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3
+#define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4
+#define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5
+#define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6
+#define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7
+#define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8
+#define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9
+#define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA
+#define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB
+#define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC
+#define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD
+#define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE
+#define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF
+
+ u32 eagle_txfir_main; /* 0x5C */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
+
+ u32 eagle_txfir_post; /* 0x60 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
+
+ u32 falcon_txfir_main; /* 0x64 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
+
+ u32 falcon_txfir_post; /* 0x68 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
+
+ u32 manufacture_ver; /* 0x6C */
+#define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0
+#define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6
+#define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12
+#define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000
+#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18
+#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000
+#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24
+
+ u32 manufacture_time; /* 0x70 */
+#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0
+#define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
+#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
+
+ u32 led_global_settings; /* 0x74 */
+#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
+#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
+#define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0
+#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4
+#define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00
+#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8
+#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000
+#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12
+
+ u32 generic_cont1; /* 0x78 */
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0
+
+ u32 mbi_version; /* 0x7C */
+#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
+#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
+#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
+#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
+
+ u32 mbi_date; /* 0x80 */
+
+ u32 misc_sig; /* 0x84 */
+
+ /* Define the GPIO mapping to switch i2c mux */
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
+
+ u32 reserved[46]; /* 0x88 */
+};
+
+struct nvm_cfg1_path {
+ u32 reserved[30]; /* 0x0 */
+};
+
+struct nvm_cfg1_port {
+ u32 power_dissipated; /* 0x0 */
+#define NVM_CFG1_PORT_POWER_DIS_D0_MASK 0x000000FF
+#define NVM_CFG1_PORT_POWER_DIS_D0_OFFSET 0
+#define NVM_CFG1_PORT_POWER_DIS_D1_MASK 0x0000FF00
+#define NVM_CFG1_PORT_POWER_DIS_D1_OFFSET 8
+#define NVM_CFG1_PORT_POWER_DIS_D2_MASK 0x00FF0000
+#define NVM_CFG1_PORT_POWER_DIS_D2_OFFSET 16
+#define NVM_CFG1_PORT_POWER_DIS_D3_MASK 0xFF000000
+#define NVM_CFG1_PORT_POWER_DIS_D3_OFFSET 24
+
+ u32 power_consumed; /* 0x4 */
+#define NVM_CFG1_PORT_POWER_CONS_D0_MASK 0x000000FF
+#define NVM_CFG1_PORT_POWER_CONS_D0_OFFSET 0
+#define NVM_CFG1_PORT_POWER_CONS_D1_MASK 0x0000FF00
+#define NVM_CFG1_PORT_POWER_CONS_D1_OFFSET 8
+#define NVM_CFG1_PORT_POWER_CONS_D2_MASK 0x00FF0000
+#define NVM_CFG1_PORT_POWER_CONS_D2_OFFSET 16
+#define NVM_CFG1_PORT_POWER_CONS_D3_MASK 0xFF000000
+#define NVM_CFG1_PORT_POWER_CONS_D3_OFFSET 24
+
+ u32 generic_cont0; /* 0x8 */
+#define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF
+#define NVM_CFG1_PORT_LED_MODE_OFFSET 0
+#define NVM_CFG1_PORT_LED_MODE_MAC1 0x0
+#define NVM_CFG1_PORT_LED_MODE_PHY1 0x1
+#define NVM_CFG1_PORT_LED_MODE_PHY2 0x2
+#define NVM_CFG1_PORT_LED_MODE_PHY3 0x3
+#define NVM_CFG1_PORT_LED_MODE_MAC2 0x4
+#define NVM_CFG1_PORT_LED_MODE_PHY4 0x5
+#define NVM_CFG1_PORT_LED_MODE_PHY5 0x6
+#define NVM_CFG1_PORT_LED_MODE_PHY6 0x7
+#define NVM_CFG1_PORT_LED_MODE_MAC3 0x8
+#define NVM_CFG1_PORT_LED_MODE_PHY7 0x9
+#define NVM_CFG1_PORT_LED_MODE_PHY8 0xA
+#define NVM_CFG1_PORT_LED_MODE_PHY9 0xB
+#define NVM_CFG1_PORT_LED_MODE_MAC4 0xC
+#define NVM_CFG1_PORT_LED_MODE_PHY10 0xD
+#define NVM_CFG1_PORT_LED_MODE_PHY11 0xE
+#define NVM_CFG1_PORT_LED_MODE_PHY12 0xF
+#define NVM_CFG1_PORT_ROCE_PRIORITY_MASK 0x0000FF00
+#define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET 8
+#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000
+#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
+#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
+#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
+#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
+#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
+
+ u32 pcie_cfg; /* 0xC */
+#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
+#define NVM_CFG1_PORT_RESERVED15_OFFSET 0
+
+ u32 features; /* 0x10 */
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1
+
+ u32 speed_cap_mask; /* 0x14 */
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G 0x40
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G 0x40
+
+ u32 link_settings; /* 0x18 */
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G 0x7
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G 0x7
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK 0x00004000
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED 0x0
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED 0x1
+
+ u32 phy_cfg; /* 0x1C */
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0xD
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0xE
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0xF
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x10
+#define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000
+#define NVM_CFG1_PORT_AN_MODE_OFFSET 24
+#define NVM_CFG1_PORT_AN_MODE_NONE 0x0
+#define NVM_CFG1_PORT_AN_MODE_CL73 0x1
+#define NVM_CFG1_PORT_AN_MODE_CL37 0x2
+#define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3
+#define NVM_CFG1_PORT_AN_MODE_CL37_BAM 0x4
+#define NVM_CFG1_PORT_AN_MODE_HPAM 0x5
+#define NVM_CFG1_PORT_AN_MODE_SGMII 0x6
+
+ u32 mgmt_traffic; /* 0x20 */
+#define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F
+#define NVM_CFG1_PORT_RESERVED61_OFFSET 0
+#define NVM_CFG1_PORT_RESERVED61_DISABLED 0x0
+#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_RMII 0x1
+#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_SMBUS 0x2
+
+ u32 ext_phy; /* 0x24 */
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
+
+ u32 mba_cfg1; /* 0x28 */
+#define NVM_CFG1_PORT_MBA_MASK 0x00000001
+#define NVM_CFG1_PORT_MBA_OFFSET 0
+#define NVM_CFG1_PORT_MBA_DISABLED 0x0
+#define NVM_CFG1_PORT_MBA_ENABLED 0x1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_MASK 0x00000006
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_OFFSET 1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_AUTO 0x0
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_BBS 0x1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT18H 0x2
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT19H 0x3
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1
+#define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00
+#define NVM_CFG1_PORT_RESERVED5_OFFSET 9
+#define NVM_CFG1_PORT_RESERVED5_DISABLED 0x0
+#define NVM_CFG1_PORT_RESERVED5_2K 0x1
+#define NVM_CFG1_PORT_RESERVED5_4K 0x2
+#define NVM_CFG1_PORT_RESERVED5_8K 0x3
+#define NVM_CFG1_PORT_RESERVED5_16K 0x4
+#define NVM_CFG1_PORT_RESERVED5_32K 0x5
+#define NVM_CFG1_PORT_RESERVED5_64K 0x6
+#define NVM_CFG1_PORT_RESERVED5_128K 0x7
+#define NVM_CFG1_PORT_RESERVED5_256K 0x8
+#define NVM_CFG1_PORT_RESERVED5_512K 0x9
+#define NVM_CFG1_PORT_RESERVED5_1M 0xA
+#define NVM_CFG1_PORT_RESERVED5_2M 0xB
+#define NVM_CFG1_PORT_RESERVED5_4M 0xC
+#define NVM_CFG1_PORT_RESERVED5_8M 0xD
+#define NVM_CFG1_PORT_RESERVED5_16M 0xE
+#define NVM_CFG1_PORT_RESERVED5_32M 0xF
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_MASK 0x001E0000
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_OFFSET 17
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_100G 0x7
+#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000
+#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_OFFSET 21
+
+ u32 mba_cfg2; /* 0x2C */
+#define NVM_CFG1_PORT_MBA_VLAN_VALUE_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_MBA_VLAN_VALUE_OFFSET 0
+#define NVM_CFG1_PORT_MBA_VLAN_MASK 0x00010000
+#define NVM_CFG1_PORT_MBA_VLAN_OFFSET 16
+
+ u32 vf_cfg; /* 0x30 */
+#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_RESERVED8_OFFSET 0
+#define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000
+#define NVM_CFG1_PORT_RESERVED6_OFFSET 16
+#define NVM_CFG1_PORT_RESERVED6_DISABLED 0x0
+#define NVM_CFG1_PORT_RESERVED6_4K 0x1
+#define NVM_CFG1_PORT_RESERVED6_8K 0x2
+#define NVM_CFG1_PORT_RESERVED6_16K 0x3
+#define NVM_CFG1_PORT_RESERVED6_32K 0x4
+#define NVM_CFG1_PORT_RESERVED6_64K 0x5
+#define NVM_CFG1_PORT_RESERVED6_128K 0x6
+#define NVM_CFG1_PORT_RESERVED6_256K 0x7
+#define NVM_CFG1_PORT_RESERVED6_512K 0x8
+#define NVM_CFG1_PORT_RESERVED6_1M 0x9
+#define NVM_CFG1_PORT_RESERVED6_2M 0xA
+#define NVM_CFG1_PORT_RESERVED6_4M 0xB
+#define NVM_CFG1_PORT_RESERVED6_8M 0xC
+#define NVM_CFG1_PORT_RESERVED6_16M 0xD
+#define NVM_CFG1_PORT_RESERVED6_32M 0xE
+#define NVM_CFG1_PORT_RESERVED6_64M 0xF
+
+ struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */
+
+ u32 led_port_settings; /* 0x3C */
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G 0x40
+
+ u32 transceiver_00; /* 0x40 */
+
+ /* Define for mapping of transceiver signal module absent */
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20
+ /* Define the GPIO mux settings to switch i2c mux to this port */
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12
+
+ u32 reserved[133]; /* 0x44 */
+};
+
+struct nvm_cfg1_func {
+ struct nvm_cfg_mac_address mac_address; /* 0x0 */
+
+ u32 rsrv1; /* 0x8 */
+#define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED1_OFFSET 0
+#define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED2_OFFSET 16
+
+ u32 rsrv2; /* 0xC */
+#define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED3_OFFSET 0
+#define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED4_OFFSET 16
+
+ u32 device_id; /* 0x10 */
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0
+#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK 0xFFFF0000
+#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET 16
+
+ u32 cmn_cfg; /* 0x14 */
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_MASK 0x00000007
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_OFFSET 0
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_PXE 0x0
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_RPL 0x1
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_BOOTP 0x2
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_ISCSI_BOOT 0x3
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_FCOE_BOOT 0x4
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_NONE 0x7
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3
+#define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000
+#define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19
+#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0
+#define NVM_CFG1_FUNC_PERSONALITY_ISCSI 0x1
+#define NVM_CFG1_FUNC_PERSONALITY_FCOE 0x2
+#define NVM_CFG1_FUNC_PERSONALITY_ROCE 0x3
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1
+
+ u32 pci_cfg; /* 0x18 */
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0
+#define NVM_CFG1_FUNC_RESERVESD12_MASK 0x00003F80
+#define NVM_CFG1_FUNC_RESERVESD12_OFFSET 7
+#define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000
+#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14
+#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0
+#define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1
+#define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2
+#define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3
+#define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4
+#define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5
+#define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6
+#define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7
+#define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8
+#define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9
+#define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA
+#define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB
+#define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC
+#define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD
+#define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE
+#define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18
+
+ struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */
+
+ struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */
+
+ u32 reserved[9]; /* 0x2C */
+};
+
+struct nvm_cfg1 {
+ struct nvm_cfg1_glob glob; /* 0x0 */
+
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */
+
+ struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */
+
+ struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */
+};
+
+/******************************************
+* nvm_cfg structs
+******************************************/
+
+enum nvm_cfg_sections {
+ NVM_CFG_SECTION_NVM_CFG1,
+ NVM_CFG_SECTION_MAX
+};
+
+struct nvm_cfg {
+ u32 num_sections;
+ u32 sections_offset[NVM_CFG_SECTION_MAX];
+ struct nvm_cfg1 cfg1;
+};
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_2 2
+#define PORT_3 3
+
+extern struct spad_layout g_spad;
+
+#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */
+
+#define SPAD_OFFSET(addr) (((u32)addr - (u32)CPU_SPAD_BASE))
+
+#define TO_OFFSIZE(_offset, _size) \
+ (u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_SHIFT) | \
+ (((u32)(_size) >> 2) << OFFSIZE_SIZE_SHIFT))
+
+enum spad_sections {
+ SPAD_SECTION_TRACE,
+ SPAD_SECTION_NVM_CFG,
+ SPAD_SECTION_PUBLIC,
+ SPAD_SECTION_PRIVATE,
+ SPAD_SECTION_MAX
+};
+
+struct spad_layout {
+ struct nvm_cfg nvm_cfg;
+ struct mcp_public_data public_data;
+};
+
+#define CRC_MAGIC_VALUE 0xDEBB20E3
+#define CRC32_POLYNOMIAL 0xEDB88320
+#define NVM_CRC_SIZE (sizeof(u32))
+
+enum nvm_sw_arbitrator {
+ NVM_SW_ARB_HOST,
+ NVM_SW_ARB_MCP,
+ NVM_SW_ARB_UART,
+ NVM_SW_ARB_RESERVED
+};
+
+/****************************************************************************
+* Boot Strap Region *
+****************************************************************************/
+struct legacy_bootstrap_region {
+ u32 magic_value;
+#define NVM_MAGIC_VALUE 0x669955aa
+ u32 sram_start_addr;
+ u32 code_len; /* boot code length (in dwords) */
+ u32 code_start_addr;
+ u32 crc; /* 32-bit CRC */
+};
+
+/****************************************************************************
+* Directories Region *
+****************************************************************************/
+struct nvm_code_entry {
+ u32 image_type; /* Image type */
+ u32 nvm_start_addr; /* NVM address of the image */
+ u32 len; /* Include CRC */
+ u32 sram_start_addr;
+ u32 sram_run_addr; /* Relevant in case of MIM only */
+};
+
+enum nvm_image_type {
+ NVM_TYPE_TIM1 = 0x01,
+ NVM_TYPE_TIM2 = 0x02,
+ NVM_TYPE_MIM1 = 0x03,
+ NVM_TYPE_MIM2 = 0x04,
+ NVM_TYPE_MBA = 0x05,
+ NVM_TYPE_MODULES_PN = 0x06,
+ NVM_TYPE_VPD = 0x07,
+ NVM_TYPE_MFW_TRACE1 = 0x08,
+ NVM_TYPE_MFW_TRACE2 = 0x09,
+ NVM_TYPE_NVM_CFG1 = 0x0a,
+ NVM_TYPE_L2B = 0x0b,
+ NVM_TYPE_DIR1 = 0x0c,
+ NVM_TYPE_EAGLE_FW1 = 0x0d,
+ NVM_TYPE_FALCON_FW1 = 0x0e,
+ NVM_TYPE_PCIE_FW1 = 0x0f,
+ NVM_TYPE_HW_SET = 0x10,
+ NVM_TYPE_LIM = 0x11,
+ NVM_TYPE_AVS_FW1 = 0x12,
+ NVM_TYPE_DIR2 = 0x13,
+ NVM_TYPE_CCM = 0x14,
+ NVM_TYPE_EAGLE_FW2 = 0x15,
+ NVM_TYPE_FALCON_FW2 = 0x16,
+ NVM_TYPE_PCIE_FW2 = 0x17,
+ NVM_TYPE_AVS_FW2 = 0x18,
+
+ NVM_TYPE_MAX,
+};
+
+#define MAX_NVM_DIR_ENTRIES 200
+
+struct nvm_dir {
+ s32 seq;
+#define NVM_DIR_NEXT_MFW_MASK 0x00000001
+#define NVM_DIR_SEQ_MASK 0xfffffffe
+#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK)
+
+#define IS_DIR_SEQ_VALID(seq) ((seq & NVM_DIR_SEQ_MASK) != NVM_DIR_SEQ_MASK)
+
+ u32 num_images;
+ u32 rsrv;
+ struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */
+};
+
+#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \
+ (_num_images - \
+ 1) * sizeof(struct nvm_code_entry) + \
+ NVM_CRC_SIZE)
+
+struct nvm_vpd_image {
+ u32 format_revision;
+#define VPD_IMAGE_VERSION 1
+
+ /* This array length depends on the number of VPD fields */
+ u8 vpd_data[1];
+};
+
+/****************************************************************************
+* NVRAM FULL MAP *
+****************************************************************************/
+#define DIR_ID_1 (0)
+#define DIR_ID_2 (1)
+#define MAX_DIR_IDS (2)
+
+#define MFW_BUNDLE_1 (0)
+#define MFW_BUNDLE_2 (1)
+#define MAX_MFW_BUNDLES (2)
+
+#define FLASH_PAGE_SIZE 0x1000
+#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE) /* 4Kb */
+#define ASIC_MIM_MAX_SIZE (300 * FLASH_PAGE_SIZE) /* 1.2Mb */
+#define FPGA_MIM_MAX_SIZE (25 * FLASH_PAGE_SIZE) /* 60Kb */
+
+#define LIM_MAX_SIZE ((2 * \
+ FLASH_PAGE_SIZE) - \
+ sizeof(struct legacy_bootstrap_region) - \
+ NVM_RSV_SIZE)
+#define LIM_OFFSET (NVM_OFFSET(lim_image))
+#define NVM_RSV_SIZE (44)
+#define MIM_MAX_SIZE(is_asic) ((is_asic) ? ASIC_MIM_MAX_SIZE : \
+ FPGA_MIM_MAX_SIZE)
+#define MIM_OFFSET(idx, is_asic) (NVM_OFFSET(dir[MAX_MFW_BUNDLES]) + \
+ ((idx == \
+ NVM_TYPE_MIM2) ? MIM_MAX_SIZE(is_asic) : 0))
+#define NVM_FIXED_AREA_SIZE(is_asic) (sizeof(struct nvm_image) + \
+ MIM_MAX_SIZE(is_asic) * 2)
+
+union nvm_dir_union {
+ struct nvm_dir dir;
+ u8 page[FLASH_PAGE_SIZE];
+};
+
+/* Address
+ * +-------------------+ 0x000000
+ * | Bootstrap: |
+ * | magic_number |
+ * | sram_start_addr |
+ * | code_len |
+ * | code_start_addr |
+ * | crc |
+ * +-------------------+ 0x000014
+ * | rsrv |
+ * +-------------------+ 0x000040
+ * | LIM |
+ * +-------------------+ 0x002000
+ * | Dir1 |
+ * +-------------------+ 0x003000
+ * | Dir2 |
+ * +-------------------+ 0x004000
+ * | MIM1 |
+ * +-------------------+ 0x130000
+ * | MIM2 |
+ * +-------------------+ 0x25C000
+ * | Rest Images: |
+ * | TIM1/2 |
+ * | MFW_TRACE1/2 |
+ * | Eagle/Falcon FW |
+ * | PCIE/AVS FW |
+ * | MBA/CCM/L2B |
+ * | VPD |
+ * | optic_modules |
+ * | ... |
+ * +-------------------+ 0x400000
+ */
+struct nvm_image {
+/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
+ /* NVM Offset (size) */
+ struct legacy_bootstrap_region bootstrap;
+ u8 rsrv[NVM_RSV_SIZE];
+ u8 lim_image[LIM_MAX_SIZE];
+ union nvm_dir_union dir[MAX_MFW_BUNDLES];
+
+ /* MIM1_IMAGE 0x004000 (0x12c000) */
+ /* MIM2_IMAGE 0x130000 (0x12c000) */
+/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
+}; /* 0x134 */
+
+#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->f))))
+
+struct hw_set_info {
+ u32 reg_type;
+#define GRC_REG_TYPE 1
+#define PHY_REG_TYPE 2
+#define PCI_REG_TYPE 4
+
+ u32 bank_num;
+ u32 pf_num;
+ u32 operation;
+#define READ_OP 1
+#define WRITE_OP 2
+#define RMW_SET_OP 3
+#define RMW_CLR_OP 4
+
+ u32 reg_addr;
+ u32 reg_data;
+
+ u32 reset_type;
+#define POR_RESET_TYPE BIT(0)
+#define HARD_RESET_TYPE BIT(1)
+#define CORE_RESET_TYPE BIT(2)
+#define MCP_RESET_TYPE BIT(3)
+#define PERSET_ASSERT BIT(4)
+#define PERSET_DEASSERT BIT(5)
+};
+
+struct hw_set_image {
+ u32 format_version;
+#define HW_SET_IMAGE_VERSION 1
+ u32 no_hw_sets;
+
+ /* This array length depends on the no_hw_sets */
+ struct hw_set_info hw_sets[1];
+};
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
new file mode 100644
index 000000000000..ffa99273b353
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -0,0 +1,776 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_reg_addr.h"
+
+#define QED_BAR_ACQUIRE_TIMEOUT 1000
+
+/* Invalid values */
+#define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1))
+
+struct qed_ptt {
+ struct list_head list_entry;
+ unsigned int idx;
+ struct pxp_ptt_entry pxp;
+};
+
+struct qed_ptt_pool {
+ struct list_head free_list;
+ spinlock_t lock; /* ptt synchronized access */
+ struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
+};
+
+int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
+ GFP_ATOMIC);
+ int i;
+
+ if (!p_pool)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&p_pool->free_list);
+ for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+ p_pool->ptts[i].idx = i;
+ p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
+ p_pool->ptts[i].pxp.pretend.control = 0;
+ if (i >= RESERVED_PTT_MAX)
+ list_add(&p_pool->ptts[i].list_entry,
+ &p_pool->free_list);
+ }
+
+ p_hwfn->p_ptt_pool = p_pool;
+ spin_lock_init(&p_pool->lock);
+
+ return 0;
+}
+
+void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ptt *p_ptt;
+ int i;
+
+ for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+ p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
+ p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
+ }
+}
+
+void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->p_ptt_pool);
+ p_hwfn->p_ptt_pool = NULL;
+}
+
+struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ptt *p_ptt;
+ unsigned int i;
+
+ /* Take the free PTT from the list */
+ for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
+ spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+
+ if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
+ p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list,
+ struct qed_ptt, list_entry);
+ list_del(&p_ptt->list_entry);
+
+ spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "allocated ptt %d\n", p_ptt->idx);
+ return p_ptt;
+ }
+
+ spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+ usleep_range(1000, 2000);
+ }
+
+ DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
+ return NULL;
+}
+
+void qed_ptt_release(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+ list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
+ spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+}
+
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ /* The HW is using DWORDS and we need to translate it to Bytes */
+ return le32_to_cpu(p_ptt->pxp.offset) << 2;
+}
+
+static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt)
+{
+ return PXP_PF_WINDOW_ADMIN_PER_PF_START +
+ p_ptt->idx * sizeof(struct pxp_ptt_entry);
+}
+
+u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
+{
+ return PXP_EXTERNAL_BAR_PF_WINDOW_START +
+ p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
+}
+
+void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 new_hw_addr)
+{
+ u32 prev_hw_addr;
+
+ prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
+
+ if (new_hw_addr == prev_hw_addr)
+ return;
+
+ /* Update PTT entery in admin window */
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Updating PTT entry %d to offset 0x%x\n",
+ p_ptt->idx, new_hw_addr);
+
+ /* The HW is using DWORDS and the address is in Bytes */
+ p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, offset),
+ le32_to_cpu(p_ptt->pxp.offset));
+}
+
+static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr)
+{
+ u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
+ u32 offset;
+
+ offset = hw_addr - win_hw_addr;
+
+ /* Verify the address is within the window */
+ if (hw_addr < win_hw_addr ||
+ offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
+ qed_ptt_set_win(p_hwfn, p_ptt, hw_addr);
+ offset = 0;
+ }
+
+ return qed_ptt_get_bar_addr(p_ptt) + offset;
+}
+
+struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx)
+{
+ if (ptt_idx >= RESERVED_PTT_MAX) {
+ DP_NOTICE(p_hwfn,
+ "Requested PTT %d is out of range\n", ptt_idx);
+ return NULL;
+ }
+
+ return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
+}
+
+void qed_wr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr, u32 val)
+{
+ u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
+
+ REG_WR(p_hwfn, bar_addr, val);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+ bar_addr, hw_addr, val);
+}
+
+u32 qed_rd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr)
+{
+ u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
+ u32 val = REG_RD(p_hwfn, bar_addr);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+ bar_addr, hw_addr, val);
+
+ return val;
+}
+
+static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *addr,
+ u32 hw_addr,
+ size_t n,
+ bool to_device)
+{
+ u32 dw_count, *host_addr, hw_offset;
+ size_t quota, done = 0;
+ u32 __iomem *reg_addr;
+
+ while (done < n) {
+ quota = min_t(size_t, n - done,
+ PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
+
+ qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+ hw_offset = qed_ptt_get_bar_addr(p_ptt);
+
+ dw_count = quota / 4;
+ host_addr = (u32 *)((u8 *)addr + done);
+ reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset);
+ if (to_device)
+ while (dw_count--)
+ DIRECT_REG_WR(reg_addr++, *host_addr++);
+ else
+ while (dw_count--)
+ *host_addr++ = DIRECT_REG_RD(reg_addr++);
+
+ done += quota;
+ }
+}
+
+void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *dest, u32 hw_addr, size_t n)
+{
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
+ hw_addr, dest, hw_addr, (unsigned long)n);
+
+ qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
+}
+
+void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr, void *src, size_t n)
+{
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
+ hw_addr, hw_addr, src, (unsigned long)n);
+
+ qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
+}
+
+void qed_fid_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 fid)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
+
+ /* Every pretend undos previous pretends, including
+ * previous port pretend.
+ */
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
+ fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+
+ p_ptt->pxp.pretend.control = cpu_to_le16(control);
+ p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void qed_port_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 port_id)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ p_ptt->pxp.pretend.control = cpu_to_le16(control);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void qed_port_unpretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ p_ptt->pxp.pretend.control = cpu_to_le16(control);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+/* DMAE */
+static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
+ const u8 is_src_type_grc,
+ const u8 is_dst_type_grc,
+ struct qed_dmae_params *p_params)
+{
+ u32 opcode = 0;
+ u16 opcodeB = 0;
+
+ /* Whether the source is the PCIe or the GRC.
+ * 0- The source is the PCIe
+ * 1- The source is the GRC.
+ */
+ opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
+ : DMAE_CMD_SRC_MASK_PCIE) <<
+ DMAE_CMD_SRC_SHIFT;
+ opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
+ DMAE_CMD_SRC_PF_ID_SHIFT);
+
+ /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
+ opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
+ : DMAE_CMD_DST_MASK_PCIE) <<
+ DMAE_CMD_DST_SHIFT;
+ opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
+ DMAE_CMD_DST_PF_ID_SHIFT);
+
+ /* Whether to write a completion word to the completion destination:
+ * 0-Do not write a completion word
+ * 1-Write the completion word
+ */
+ opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
+ opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
+ DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+
+ if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST)
+ opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
+
+ opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
+
+ opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT);
+
+ /* reset source address in next go */
+ opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
+ DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+
+ /* reset dest address in next go */
+ opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
+ DMAE_CMD_DST_ADDR_RESET_SHIFT);
+
+ opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK <<
+ DMAE_CMD_SRC_VF_ID_SHIFT);
+
+ opcodeB |= (DMAE_CMD_DST_VF_ID_MASK <<
+ DMAE_CMD_DST_VF_ID_SHIFT);
+
+ p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
+ p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB);
+}
+
+u32 qed_dmae_idx_to_go_cmd(u8 idx)
+{
+ /* All the DMAE 'go' registers form an array in internal memory */
+ return DMAE_REG_GO_C0 + (idx << 2);
+}
+
+static int
+qed_dmae_post_command(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd;
+ u8 idx_cmd = p_hwfn->dmae_info.channel, i;
+ int qed_status = 0;
+
+ /* verify address is not NULL */
+ if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) ||
+ ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) {
+ DP_NOTICE(p_hwfn,
+ "source or destination address 0 idx_cmd=%d\n"
+ "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+ idx_cmd,
+ le32_to_cpu(command->opcode),
+ le16_to_cpu(command->opcode_b),
+ le16_to_cpu(command->length),
+ le32_to_cpu(command->src_addr_hi),
+ le32_to_cpu(command->src_addr_lo),
+ le32_to_cpu(command->dst_addr_hi),
+ le32_to_cpu(command->dst_addr_lo));
+
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+ idx_cmd,
+ le32_to_cpu(command->opcode),
+ le16_to_cpu(command->opcode_b),
+ le16_to_cpu(command->length),
+ le32_to_cpu(command->src_addr_hi),
+ le32_to_cpu(command->src_addr_lo),
+ le32_to_cpu(command->dst_addr_hi),
+ le32_to_cpu(command->dst_addr_lo));
+
+ /* Copy the command to DMAE - need to do it before every call
+ * for source/dest address no reset.
+ * The first 9 DWs are the command registers, the 10 DW is the
+ * GO register, and the rest are result registers
+ * (which are read only by the client).
+ */
+ for (i = 0; i < DMAE_CMD_SIZE; i++) {
+ u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
+ *(((u32 *)command) + i) : 0;
+
+ qed_wr(p_hwfn, p_ptt,
+ DMAE_REG_CMD_MEM +
+ (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
+ (i * sizeof(u32)), data);
+ }
+
+ qed_wr(p_hwfn, p_ptt,
+ qed_dmae_idx_to_go_cmd(idx_cmd),
+ DMAE_GO_VALUE);
+
+ return qed_status;
+}
+
+int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
+{
+ dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
+ struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
+ u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
+ u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
+
+ *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32),
+ p_addr,
+ GFP_KERNEL);
+ if (!*p_comp) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
+ goto err;
+ }
+
+ p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
+ *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct dmae_cmd),
+ p_addr, GFP_KERNEL);
+ if (!*p_cmd) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n");
+ goto err;
+ }
+
+ p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32) * DMAE_MAX_RW_SIZE,
+ p_addr, GFP_KERNEL);
+ if (!*p_buff) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n");
+ goto err;
+ }
+
+ p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+
+ return 0;
+err:
+ qed_dmae_info_free(p_hwfn);
+ return -ENOMEM;
+}
+
+void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
+{
+ dma_addr_t p_phys;
+
+ /* Just make sure no one is in the middle */
+ mutex_lock(&p_hwfn->dmae_info.mutex);
+
+ if (p_hwfn->dmae_info.p_completion_word) {
+ p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32),
+ p_hwfn->dmae_info.p_completion_word,
+ p_phys);
+ p_hwfn->dmae_info.p_completion_word = NULL;
+ }
+
+ if (p_hwfn->dmae_info.p_dmae_cmd) {
+ p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct dmae_cmd),
+ p_hwfn->dmae_info.p_dmae_cmd,
+ p_phys);
+ p_hwfn->dmae_info.p_dmae_cmd = NULL;
+ }
+
+ if (p_hwfn->dmae_info.p_intermediate_buffer) {
+ p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32) * DMAE_MAX_RW_SIZE,
+ p_hwfn->dmae_info.p_intermediate_buffer,
+ p_phys);
+ p_hwfn->dmae_info.p_intermediate_buffer = NULL;
+ }
+
+ mutex_unlock(&p_hwfn->dmae_info.mutex);
+}
+
+static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
+{
+ u32 wait_cnt = 0;
+ u32 wait_cnt_limit = 10000;
+
+ int qed_status = 0;
+
+ barrier();
+ while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
+ udelay(DMAE_MIN_WAIT_TIME);
+ if (++wait_cnt > wait_cnt_limit) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
+ *p_hwfn->dmae_info.p_completion_word,
+ DMAE_COMPLETION_VAL);
+ qed_status = -EBUSY;
+ break;
+ }
+
+ /* to sync the completion_word since we are not
+ * using the volatile keyword for p_completion_word
+ */
+ barrier();
+ }
+
+ if (qed_status == 0)
+ *p_hwfn->dmae_info.p_completion_word = 0;
+
+ return qed_status;
+}
+
+static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 src_addr,
+ u64 dst_addr,
+ u8 src_type,
+ u8 dst_type,
+ u32 length)
+{
+ dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+ int qed_status = 0;
+
+ switch (src_type) {
+ case QED_DMAE_ADDRESS_GRC:
+ case QED_DMAE_ADDRESS_HOST_PHYS:
+ cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr));
+ cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr));
+ break;
+ /* for virtual source addresses we use the intermediate buffer. */
+ case QED_DMAE_ADDRESS_HOST_VIRT:
+ cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys));
+ cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
+ memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
+ (void *)(uintptr_t)src_addr,
+ length * sizeof(u32));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dst_type) {
+ case QED_DMAE_ADDRESS_GRC:
+ case QED_DMAE_ADDRESS_HOST_PHYS:
+ cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr));
+ cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr));
+ break;
+ /* for virtual source addresses we use the intermediate buffer. */
+ case QED_DMAE_ADDRESS_HOST_VIRT:
+ cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys));
+ cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cmd->length = cpu_to_le16((u16)length);
+
+ qed_dmae_post_command(p_hwfn, p_ptt);
+
+ qed_status = qed_dmae_operation_wait(p_hwfn);
+
+ if (qed_status) {
+ DP_NOTICE(p_hwfn,
+ "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
+ src_addr,
+ dst_addr,
+ length);
+ return qed_status;
+ }
+
+ if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
+ memcpy((void *)(uintptr_t)(dst_addr),
+ &p_hwfn->dmae_info.p_intermediate_buffer[0],
+ length * sizeof(u32));
+
+ return 0;
+}
+
+static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 src_addr, u64 dst_addr,
+ u8 src_type, u8 dst_type,
+ u32 size_in_dwords,
+ struct qed_dmae_params *p_params)
+{
+ dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
+ u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
+ struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+ u64 src_addr_split = 0, dst_addr_split = 0;
+ u16 length_limit = DMAE_MAX_RW_SIZE;
+ int qed_status = 0;
+ u32 offset = 0;
+
+ qed_dmae_opcode(p_hwfn,
+ (src_type == QED_DMAE_ADDRESS_GRC),
+ (dst_type == QED_DMAE_ADDRESS_GRC),
+ p_params);
+
+ cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys));
+ cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys));
+ cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL);
+
+ /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
+ cnt_split = size_in_dwords / length_limit;
+ length_mod = size_in_dwords % length_limit;
+
+ src_addr_split = src_addr;
+ dst_addr_split = dst_addr;
+
+ for (i = 0; i <= cnt_split; i++) {
+ offset = length_limit * i;
+
+ if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) {
+ if (src_type == QED_DMAE_ADDRESS_GRC)
+ src_addr_split = src_addr + offset;
+ else
+ src_addr_split = src_addr + (offset * 4);
+ }
+
+ if (dst_type == QED_DMAE_ADDRESS_GRC)
+ dst_addr_split = dst_addr + offset;
+ else
+ dst_addr_split = dst_addr + (offset * 4);
+
+ length_cur = (cnt_split == i) ? length_mod : length_limit;
+
+ /* might be zero on last iteration */
+ if (!length_cur)
+ continue;
+
+ qed_status = qed_dmae_execute_sub_operation(p_hwfn,
+ p_ptt,
+ src_addr_split,
+ dst_addr_split,
+ src_type,
+ dst_type,
+ length_cur);
+ if (qed_status) {
+ DP_NOTICE(p_hwfn,
+ "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
+ qed_status,
+ src_addr,
+ dst_addr,
+ length_cur);
+ break;
+ }
+ }
+
+ return qed_status;
+}
+
+int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 source_addr,
+ u32 grc_addr,
+ u32 size_in_dwords,
+ u32 flags)
+{
+ u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+ struct qed_dmae_params params;
+ int rc;
+
+ memset(&params, 0, sizeof(struct qed_dmae_params));
+ params.flags = flags;
+
+ mutex_lock(&p_hwfn->dmae_info.mutex);
+
+ rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+ grc_addr_in_dw,
+ QED_DMAE_ADDRESS_HOST_VIRT,
+ QED_DMAE_ADDRESS_GRC,
+ size_in_dwords, &params);
+
+ mutex_unlock(&p_hwfn->dmae_info.mutex);
+
+ return rc;
+}
+
+u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
+ enum protocol_type proto,
+ union qed_qm_pq_params *p_params)
+{
+ u16 pq_id = 0;
+
+ if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) &&
+ !p_params) {
+ DP_NOTICE(p_hwfn,
+ "Protocol %d received NULL PQ params\n",
+ proto);
+ return 0;
+ }
+
+ switch (proto) {
+ case PROTOCOLID_CORE:
+ if (p_params->core.tc == LB_TC)
+ pq_id = p_hwfn->qm_info.pure_lb_pq;
+ else
+ pq_id = p_hwfn->qm_info.offload_pq;
+ break;
+ case PROTOCOLID_ETH:
+ pq_id = p_params->eth.tc;
+ break;
+ default:
+ pq_id = 0;
+ }
+
+ pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
+
+ return pq_id;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
new file mode 100644
index 000000000000..e56d433793be
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -0,0 +1,263 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_HW_H
+#define _QED_HW_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+
+/* Forward decleration */
+struct qed_ptt;
+
+enum reserved_ptts {
+ RESERVED_PTT_EDIAG,
+ RESERVED_PTT_USER_SPACE,
+ RESERVED_PTT_MAIN,
+ RESERVED_PTT_DPC,
+ RESERVED_PTT_MAX
+};
+
+enum _dmae_cmd_dst_mask {
+ DMAE_CMD_DST_MASK_NONE = 0,
+ DMAE_CMD_DST_MASK_PCIE = 1,
+ DMAE_CMD_DST_MASK_GRC = 2
+};
+
+enum _dmae_cmd_src_mask {
+ DMAE_CMD_SRC_MASK_PCIE = 0,
+ DMAE_CMD_SRC_MASK_GRC = 1
+};
+
+enum _dmae_cmd_crc_mask {
+ DMAE_CMD_COMP_CRC_EN_MASK_NONE = 0,
+ DMAE_CMD_COMP_CRC_EN_MASK_SET = 1
+};
+
+/* definitions for DMA constants */
+#define DMAE_GO_VALUE 0x1
+
+#define DMAE_COMPLETION_VAL 0xD1AE
+#define DMAE_CMD_ENDIANITY 0x2
+
+#define DMAE_CMD_SIZE 14
+#define DMAE_CMD_SIZE_TO_FILL (DMAE_CMD_SIZE - 5)
+#define DMAE_MIN_WAIT_TIME 0x2
+#define DMAE_MAX_CLIENTS 32
+
+/**
+ * @brief qed_gtt_init - Initialize GTT windows
+ *
+ * @param p_hwfn
+ */
+void qed_gtt_init(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured
+ *
+ * @param p_hwfn
+ */
+void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool
+ *
+ * @param p_hwfn
+ *
+ * @return struct _qed_status - success (0), negative - error.
+ */
+int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_pool_free -
+ *
+ * @param p_hwfn
+ */
+void qed_ptt_pool_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address
+ *
+ * @param p_hwfn
+ * @param new_hw_addr
+ * @param p_ptt
+ */
+void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 new_hw_addr);
+
+/**
+ * @brief qed_get_reserved_ptt - Get a specific reserved PTT
+ *
+ * @param p_hwfn
+ * @param ptt_idx
+ *
+ * @return struct qed_ptt *
+ */
+struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx);
+
+/**
+ * @brief qed_wr - Write value to BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+void qed_wr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr,
+ u32 val);
+
+/**
+ * @brief qed_rd - Read value from BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+u32 qed_rd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr);
+
+/**
+ * @brief qed_memcpy_from - copy n bytes from BAR using the given
+ * ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param dest
+ * @param hw_addr
+ * @param n
+ */
+void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *dest,
+ u32 hw_addr,
+ size_t n);
+
+/**
+ * @brief qed_memcpy_to - copy n bytes to BAR using the given
+ * ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param hw_addr
+ * @param src
+ * @param n
+ */
+void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr,
+ void *src,
+ size_t n);
+/**
+ * @brief qed_fid_pretend - pretend to another function when
+ * accessing the ptt window. There is no way to unpretend
+ * a function. The only way to cancel a pretend is to
+ * pretend back to the original function.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param fid - fid field of pxp_pretend structure. Can contain
+ * either pf / vf, port/path fields are don't care.
+ */
+void qed_fid_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 fid);
+
+/**
+ * @brief qed_port_pretend - pretend to another port when
+ * accessing the ptt window
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param port_id - the port to pretend to
+ */
+void qed_port_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 port_id);
+
+/**
+ * @brief qed_port_unpretend - cancel any previously set port
+ * pretend
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_port_unpretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
+ * this is declared here since other files will require it.
+ * @param idx
+ */
+u32 qed_dmae_idx_to_go_cmd(u8 idx);
+
+/**
+ * @brief qed_dmae_info_alloc - Init the dmae_info structure
+ * which is part of p_hwfn.
+ * @param p_hwfn
+ */
+int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_dmae_info_free - Free the dmae_info structure
+ * which is part of p_hwfn
+ *
+ * @param p_hwfn
+ */
+void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
+
+union qed_qm_pq_params {
+ struct {
+ u8 tc;
+ } core;
+
+ struct {
+ u8 is_vf;
+ u8 vf_id;
+ u8 tc;
+ } eth;
+};
+
+u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
+ enum protocol_type proto,
+ union qed_qm_pq_params *params);
+
+int qed_init_fw_data(struct qed_dev *cdev,
+ const u8 *fw_data);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
new file mode 100644
index 000000000000..0b21a553cc7d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -0,0 +1,798 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+enum cminterface {
+ MCM_SEC,
+ MCM_PRI,
+ UCM_SEC,
+ UCM_PRI,
+ TCM_SEC,
+ TCM_PRI,
+ YCM_SEC,
+ YCM_PRI,
+ XCM_SEC,
+ XCM_PRI,
+ NUM_OF_CM_INTERFACES
+};
+
+/* general constants */
+#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
+#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
+ QM_PQ_ELEMENT_SIZE, \
+ 0x1000) : 0)
+#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
+ 0x100) - 1 : 0)
+#define QM_INVALID_PQ_ID 0xffff
+/* feature enable */
+#define QM_BYPASS_EN 1
+#define QM_BYTE_CRD_EN 1
+/* other PQ constants */
+#define QM_OTHER_PQS_PER_PF 4
+/* WFQ constants */
+#define QM_WFQ_UPPER_BOUND 6250000
+#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+#define QM_WFQ_VP_PQ_PF_SHIFT 5
+#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+#define QM_WFQ_MAX_INC_VAL 4375000
+#define QM_WFQ_INIT_CRD(inc_val) (2 * (inc_val))
+/* RL constants */
+#define QM_RL_UPPER_BOUND 6250000
+#define QM_RL_PERIOD 5 /* in us */
+#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
+#define QM_RL_INC_VAL(rate) max_t(u32, \
+ (((rate ? rate : 1000000) \
+ * QM_RL_PERIOD) / 8), 1)
+#define QM_RL_MAX_INC_VAL 4375000
+/* AFullOprtnstcCrdMask constants */
+#define QM_OPPOR_LINE_VOQ_DEF 1
+#define QM_OPPOR_FW_STOP_DEF 0
+#define QM_OPPOR_PQ_EMPTY_DEF 1
+#define EAGLE_WORKAROUND_TC 7
+/* Command Queue constants */
+#define PBF_CMDQ_PURE_LB_LINES 150
+#define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8
+#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
+ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
+ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \
+ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
+ (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
+ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \
+ 4) * \
+ 2) | QM_LINE_CRD_REG_SIGN_BIT)
+/* BTB: blocks constants (block size = 256B) */
+#define BTB_JUMBO_PKT_BLOCKS 38
+#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
+#define BTB_EAGLE_WORKAROUND_BLOCKS 4
+#define BTB_PURE_LB_FACTOR 10
+#define BTB_PURE_LB_RATIO 7
+/* QM stop command constants */
+#define QM_STOP_PQ_MASK_WIDTH 32
+#define QM_STOP_CMD_ADDR 0x2
+#define QM_STOP_CMD_STRUCT_SIZE 2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
+#define QM_STOP_CMD_PAUSE_MASK_MASK -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET 1
+#define QM_STOP_CMD_GROUP_ID_SHIFT 16
+#define QM_STOP_CMD_GROUP_ID_MASK 15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
+#define QM_STOP_CMD_PQ_TYPE_MASK 1
+#define QM_STOP_CMD_MAX_POLL_COUNT 100
+#define QM_STOP_CMD_POLL_PERIOD_US 500
+/* QM command macros */
+#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
+ _STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field, \
+ value) SET_FIELD(var[cmd ## _ ## field ## \
+ _OFFSET], \
+ cmd ## _ ## field, \
+ value)
+/* QM: VOQ macros */
+#define PHYS_VOQ(port, tc, max_phy_tcs_pr_port) ((port) * \
+ (max_phy_tcs_pr_port) \
+ + (tc))
+#define LB_VOQ(port) ( \
+ MAX_PHYS_VOQS + (port))
+#define VOQ(port, tc, max_phy_tcs_pr_port) \
+ ((tc) < \
+ LB_TC ? PHYS_VOQ(port, \
+ tc, \
+ max_phy_tcs_pr_port) \
+ : LB_VOQ(port))
+/******************** INTERNAL IMPLEMENTATION *********************/
+/* Prepare PF RL enable/disable runtime init values */
+static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
+ bool pf_rl_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
+ if (pf_rl_en) {
+ /* enable RLs for all VOQs */
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
+ (1 << MAX_NUM_VOQS) - 1);
+ /* write RL period */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFPERIOD_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFPERIODTIMER_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ /* set credit threshold for QM bypass flow */
+ if (QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
+ QM_RL_UPPER_BOUND);
+ }
+}
+
+/* Prepare PF WFQ enable/disable runtime init values */
+static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn,
+ bool pf_wfq_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
+ /* set credit threshold for QM bypass flow */
+ if (pf_wfq_en && QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
+ QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare VPORT RL enable/disable runtime init values */
+static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn,
+ bool vport_rl_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
+ vport_rl_en ? 1 : 0);
+ if (vport_rl_en) {
+ /* write RL period (use timer 0 only) */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ /* set credit threshold for QM bypass flow */
+ if (QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
+ QM_RL_UPPER_BOUND);
+ }
+}
+
+/* Prepare VPORT WFQ enable/disable runtime init values */
+static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn,
+ bool vport_wfq_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
+ vport_wfq_en ? 1 : 0);
+ /* set credit threshold for QM bypass flow */
+ if (vport_wfq_en && QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
+ QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines for
+ * the specified VOQ
+ */
+static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
+ u8 voq,
+ u16 cmdq_lines)
+{
+ u32 qm_line_crd;
+
+ /* In A0 - Limit the size of pbf queue so that only 511 commands with
+ * the minimum size of 4 (FCoE minimum size)
+ */
+ bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
+
+ if (is_bb_a0)
+ cmdq_lines = min_t(u32, cmdq_lines, 1022);
+ qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
+ OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+ (u32)cmdq_lines);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+ qm_line_crd);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines. */
+static void qed_cmdq_lines_rt_init(
+ struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
+{
+ u8 tc, voq, port_id;
+
+ /* clear PBF lines for all VOQs */
+ for (voq = 0; voq < MAX_NUM_VOQS; voq++)
+ STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ if (port_params[port_id].active) {
+ u16 phys_lines, phys_lines_per_tc;
+ u8 phys_tcs = port_params[port_id].num_active_phys_tcs;
+
+ /* find #lines to divide between the active
+ * physical TCs.
+ */
+ phys_lines = port_params[port_id].num_pbf_cmd_lines -
+ PBF_CMDQ_PURE_LB_LINES;
+ /* find #lines per active physical TC */
+ phys_lines_per_tc = phys_lines / phys_tcs;
+ /* init registers per active TC */
+ for (tc = 0; tc < phys_tcs; tc++) {
+ voq = PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
+ qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
+ phys_lines_per_tc);
+ }
+ /* init registers for pure LB TC */
+ qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
+ PBF_CMDQ_PURE_LB_LINES);
+ }
+ }
+}
+
+static void qed_btb_blocks_rt_init(
+ struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
+{
+ u32 usable_blocks, pure_lb_blocks, phys_blocks;
+ u8 tc, voq, port_id;
+
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ u32 temp;
+ u8 phys_tcs;
+
+ if (!port_params[port_id].active)
+ continue;
+
+ phys_tcs = port_params[port_id].num_active_phys_tcs;
+
+ /* subtract headroom blocks */
+ usable_blocks = port_params[port_id].num_btb_blocks -
+ BTB_HEADROOM_BLOCKS;
+
+ /* find blocks per physical TC. use factor to avoid
+ * floating arithmethic.
+ */
+ pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
+ (phys_tcs * BTB_PURE_LB_FACTOR +
+ BTB_PURE_LB_RATIO);
+ pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
+ pure_lb_blocks / BTB_PURE_LB_FACTOR);
+ phys_blocks = (usable_blocks - pure_lb_blocks) / phys_tcs;
+
+ /* init physical TCs */
+ for (tc = 0; tc < phys_tcs; tc++) {
+ voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
+ phys_blocks);
+ }
+
+ /* init pure LB TC */
+ temp = LB_VOQ(port_id);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
+ pure_lb_blocks);
+ }
+}
+
+/* Prepare Tx PQ mapping runtime init values for the specified PF */
+static void qed_tx_pq_map_rt_init(
+ struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params,
+ u32 base_mem_addr_4kb)
+{
+ struct init_qm_vport_params *vport_params = p_params->vport_params;
+ u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+ u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
+ u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
+ QM_PF_QUEUE_GROUP_SIZE;
+ bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
+ u16 i, pq_id, pq_group;
+
+ /* a bit per Tx PQ indicating if the PQ is associated with a VF */
+ u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+ u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
+ u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+ u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
+ u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
+ u32 mem_addr_4kb = base_mem_addr_4kb;
+
+ /* set mapping from PQ group to PF */
+ for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
+ STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
+ (u32)(p_params->pf_id));
+ /* set PQ sizes */
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
+ QM_PQ_SIZE_256B(p_params->num_pf_cids));
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
+ QM_PQ_SIZE_256B(p_params->num_vf_cids));
+
+ /* go over all Tx PQs */
+ for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
+ u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
+ p_params->max_phys_tcs_per_port);
+ bool is_vf_pq = (i >= p_params->num_pf_pqs);
+ struct qm_rf_pq_map tx_pq_map;
+
+ /* update first Tx PQ of VPORT/TC */
+ u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
+ p_params->start_vport;
+ u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
+ u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
+
+ if (first_tx_pq_id == QM_INVALID_PQ_ID) {
+ /* create new VP PQ */
+ pq_ids[p_params->pq_params[i].tc_id] = pq_id;
+ first_tx_pq_id = pq_id;
+ /* map VP PQ to VOQ and PF */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPMAP_RT_OFFSET +
+ first_tx_pq_id,
+ (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+ (p_params->pf_id <<
+ QM_WFQ_VP_PQ_PF_SHIFT));
+ }
+ /* fill PQ map entry */
+ memset(&tx_pq_map, 0, sizeof(tx_pq_map));
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
+ is_vf_pq ? 1 : 0);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
+ is_vf_pq ? p_params->pq_params[i].vport_id : 0);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
+ p_params->pq_params[i].wrr_group);
+ /* write PQ map entry to CAM */
+ STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
+ *((u32 *)&tx_pq_map));
+ /* set base address */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
+ mem_addr_4kb);
+ /* check if VF PQ */
+ if (is_vf_pq) {
+ /* if PQ is associated with a VF, add indication
+ * to PQ VF mask
+ */
+ tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
+ (1 << (pq_id % tx_pq_vf_mask_width));
+ mem_addr_4kb += vport_pq_mem_4kb;
+ } else {
+ mem_addr_4kb += pq_mem_4kb;
+ }
+ }
+
+ /* store Tx PQ VF mask to size select register */
+ for (i = 0; i < num_tx_pq_vf_masks; i++) {
+ if (tx_pq_vf_mask[i]) {
+ if (is_bb_a0) {
+ u32 curr_mask = 0, addr;
+
+ addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4);
+ if (!p_params->is_first_pf)
+ curr_mask = qed_rd(p_hwfn, p_ptt,
+ addr);
+
+ addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
+
+ STORE_RT_REG(p_hwfn, addr,
+ curr_mask | tx_pq_vf_mask[i]);
+ } else {
+ u32 addr;
+
+ addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
+ STORE_RT_REG(p_hwfn, addr,
+ tx_pq_vf_mask[i]);
+ }
+ }
+ }
+}
+
+/* Prepare Other PQ mapping runtime init values for the specified PF */
+static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+ u8 port_id,
+ u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_tids,
+ u32 base_mem_addr_4kb)
+{
+ u16 i, pq_id;
+
+ /* a single other PQ group is used in each PF,
+ * where PQ group i is used in PF i.
+ */
+ u16 pq_group = pf_id;
+ u32 pq_size = num_pf_cids + num_tids;
+ u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+ u32 mem_addr_4kb = base_mem_addr_4kb;
+
+ /* map PQ group to PF */
+ STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
+ (u32)(pf_id));
+ /* set PQ sizes */
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
+ QM_PQ_SIZE_256B(pq_size));
+ /* set base address */
+ for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
+ i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+ STORE_RT_REG(p_hwfn,
+ QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
+ mem_addr_4kb);
+ mem_addr_4kb += pq_mem_4kb;
+ }
+}
+
+/* Prepare PF WFQ runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_qm_pf_rt_init_params *p_params)
+{
+ u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+ u32 crd_reg_offset;
+ u32 inc_val;
+ u16 i;
+
+ if (p_params->pf_id < MAX_NUM_PFS_BB)
+ crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
+ else
+ crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
+ (p_params->pf_id % MAX_NUM_PFS_BB);
+
+ inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
+ if (inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+ return -1;
+ }
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+ inc_val);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
+ QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+
+ for (i = 0; i < num_tx_pqs; i++) {
+ u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
+ p_params->max_phys_tcs_per_port);
+
+ OVERWRITE_RT_REG(p_hwfn,
+ crd_reg_offset + voq * MAX_NUM_PFS_BB,
+ QM_WFQ_INIT_CRD(inc_val) |
+ QM_WFQ_CRD_REG_SIGN_BIT);
+ }
+
+ return 0;
+}
+
+/* Prepare PF RL runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
+ u8 pf_id,
+ u32 pf_rl)
+{
+ u32 inc_val = QM_RL_INC_VAL(pf_rl);
+
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+ return -1;
+ }
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+ QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+ QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+ return 0;
+}
+
+/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
+static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
+ u8 start_vport,
+ u8 num_vports,
+ struct init_qm_vport_params *vport_params)
+{
+ u8 tc, i, vport_id;
+ u32 inc_val;
+
+ /* go over all PF VPORTs */
+ for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+ u32 temp = QM_REG_WFQVPUPPERBOUND_RT_OFFSET;
+ u16 *pq_ids = &vport_params[i].first_tx_pq_id[0];
+
+ if (!vport_params[i].vport_wfq)
+ continue;
+
+ inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+ if (inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT WFQ weight configuration");
+ return -1;
+ }
+
+ /* each VPORT can have several VPORT PQ IDs for
+ * different TCs
+ */
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ u16 vport_pq_id = pq_ids[tc];
+
+ if (vport_pq_id != QM_INVALID_PQ_ID) {
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPWEIGHT_RT_OFFSET +
+ vport_pq_id, inc_val);
+ STORE_RT_REG(p_hwfn, temp + vport_pq_id,
+ QM_WFQ_UPPER_BOUND |
+ QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPCRD_RT_OFFSET +
+ vport_pq_id,
+ QM_WFQ_INIT_CRD(inc_val) |
+ QM_WFQ_CRD_REG_SIGN_BIT);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
+ u8 start_vport,
+ u8 num_vports,
+ struct init_qm_vport_params *vport_params)
+{
+ u8 i, vport_id;
+
+ /* go over all PF VPORTs */
+ for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+ u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
+
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT rate-limit configuration");
+ return -1;
+ }
+
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+ QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
+ QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+ inc_val);
+ }
+
+ return 0;
+}
+
+static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 reg_val, i;
+
+ for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+ i++) {
+ udelay(QM_STOP_CMD_POLL_PERIOD_US);
+ reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
+ }
+
+ /* check if timeout while waiting for SDM command ready */
+ if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Timeout when waiting for QM SDM command ready signal\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd_addr,
+ u32 cmd_data_lsb,
+ u32 cmd_data_msb)
+{
+ if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
+ return false;
+
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
+
+ return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
+}
+
+/******************** INTERFACE IMPLEMENTATION *********************/
+u32 qed_qm_pf_mem_size(u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs)
+{
+ return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
+ QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
+ QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
+}
+
+int qed_qm_common_rt_init(
+ struct qed_hwfn *p_hwfn,
+ struct qed_qm_common_rt_init_params *p_params)
+{
+ /* init AFullOprtnstcCrdMask */
+ u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
+ (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
+ (p_params->pf_wfq_en <<
+ QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
+ (p_params->vport_wfq_en <<
+ QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
+ (p_params->pf_rl_en <<
+ QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
+ (p_params->vport_rl_en <<
+ QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
+ (QM_OPPOR_FW_STOP_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
+ (QM_OPPOR_PQ_EMPTY_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+ qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
+ qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
+ qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+ qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
+ qed_cmdq_lines_rt_init(p_hwfn,
+ p_params->max_ports_per_engine,
+ p_params->max_phys_tcs_per_port,
+ p_params->port_params);
+ qed_btb_blocks_rt_init(p_hwfn,
+ p_params->max_ports_per_engine,
+ p_params->max_phys_tcs_per_port,
+ p_params->port_params);
+ return 0;
+}
+
+int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params)
+{
+ struct init_qm_vport_params *vport_params = p_params->vport_params;
+ u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
+ p_params->num_tids) *
+ QM_OTHER_PQS_PER_PF;
+ u8 tc, i;
+
+ /* clear first Tx PQ ID array for each VPORT */
+ for (i = 0; i < p_params->num_vports; i++)
+ for (tc = 0; tc < NUM_OF_TCS; tc++)
+ vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
+
+ /* map Other PQs (if any) */
+ qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
+ p_params->num_pf_cids, p_params->num_tids, 0);
+
+ /* map Tx PQs */
+ qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
+
+ if (p_params->pf_wfq)
+ if (qed_pf_wfq_rt_init(p_hwfn, p_params))
+ return -1;
+
+ if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
+ return -1;
+
+ if (qed_vp_wfq_rt_init(p_hwfn, p_params->start_vport,
+ p_params->num_vports, vport_params))
+ return -1;
+
+ if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
+ p_params->num_vports, vport_params))
+ return -1;
+
+ return 0;
+}
+
+int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 pf_id,
+ u32 pf_rl)
+{
+ u32 inc_val = QM_RL_INC_VAL(pf_rl);
+
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+ return -1;
+ }
+
+ qed_wr(p_hwfn, p_ptt,
+ QM_REG_RLPFCRD + pf_id * 4,
+ QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
+
+ return 0;
+}
+
+int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 vport_id,
+ u32 vport_rl)
+{
+ u32 inc_val = QM_RL_INC_VAL(vport_rl);
+
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
+ return -1;
+ }
+
+ qed_wr(p_hwfn, p_ptt,
+ QM_REG_RLGLBLCRD + vport_id * 4,
+ QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+
+ return 0;
+}
+
+bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq,
+ u16 start_pq,
+ u16 num_pqs)
+{
+ u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
+ u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
+
+ /* set command's PQ type */
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
+
+ for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
+ /* set PQ bit in mask (stop command only) */
+ if (!is_release_cmd)
+ pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+
+ /* if last PQ or end of PQ mask, write command */
+ if ((pq_id == last_pq) ||
+ (pq_id % QM_STOP_PQ_MASK_WIDTH ==
+ (QM_STOP_PQ_MASK_WIDTH - 1))) {
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+ PAUSE_MASK, pq_mask);
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+ GROUP_ID,
+ pq_id / QM_STOP_PQ_MASK_WIDTH);
+ if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
+ cmd_arr[0], cmd_arr[1]))
+ return false;
+ pq_mask = 0;
+ }
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
new file mode 100644
index 000000000000..796f1390e598
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -0,0 +1,531 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+#define QED_INIT_MAX_POLL_COUNT 100
+#define QED_INIT_POLL_PERIOD_US 500
+
+static u32 pxp_global_win[] = {
+ 0,
+ 0,
+ 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
+ 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
+ 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
+ 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
+ 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
+ 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
+ 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
+ 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
+ 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
+ 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+void qed_init_iro_array(struct qed_dev *cdev)
+{
+ cdev->iro_arr = iro_arr;
+}
+
+/* Runtime configuration helpers */
+void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
+{
+ int i;
+
+ for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
+ p_hwfn->rt_data[i].b_valid = false;
+}
+
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 val)
+{
+ p_hwfn->rt_data[rt_offset].init_val = val;
+ p_hwfn->rt_data[rt_offset].b_valid = true;
+}
+
+void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 *val,
+ size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size / sizeof(u32); i++) {
+ p_hwfn->rt_data[rt_offset + i].init_val = val[i];
+ p_hwfn->rt_data[rt_offset + i].b_valid = true;
+ }
+}
+
+static void qed_init_rt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u32 rt_offset,
+ u32 size)
+{
+ struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset;
+ u32 i;
+
+ for (i = 0; i < size; i++) {
+ if (!rt_data[i].b_valid)
+ continue;
+ qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val);
+ }
+}
+
+int qed_init_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rt_data *rt_data;
+
+ rt_data = kzalloc(sizeof(*rt_data) * RUNTIME_ARRAY_SIZE, GFP_ATOMIC);
+ if (!rt_data)
+ return -ENOMEM;
+
+ p_hwfn->rt_data = rt_data;
+
+ return 0;
+}
+
+void qed_init_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->rt_data);
+ p_hwfn->rt_data = NULL;
+}
+
+static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u32 dmae_data_offset,
+ u32 size,
+ const u32 *buf,
+ bool b_must_dmae,
+ bool b_can_dmae)
+{
+ int rc = 0;
+
+ /* Perform DMAE only for lengthy enough sections or for wide-bus */
+ if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
+ const u32 *data = buf + dmae_data_offset;
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
+ } else {
+ rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+ (uintptr_t)(buf + dmae_data_offset),
+ addr, size, 0);
+ }
+
+ return rc;
+}
+
+static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u32 fill,
+ u32 fill_count)
+{
+ static u32 zero_buffer[DMAE_MAX_RW_SIZE];
+
+ memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
+
+ /* invoke the DMAE virtual/physical buffer API with
+ * 1. DMAE init channel
+ * 2. addr,
+ * 3. p_hwfb->temp_data,
+ * 4. fill_count
+ */
+
+ return qed_dmae_host2grc(p_hwfn, p_ptt,
+ (uintptr_t)(&zero_buffer[0]),
+ addr, fill_count,
+ QED_DMAE_FLAG_RW_REPL_SRC);
+}
+
+static void qed_init_fill(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u32 fill,
+ u32 fill_count)
+{
+ u32 i;
+
+ for (i = 0; i < fill_count; i++, addr += sizeof(u32))
+ qed_wr(p_hwfn, p_ptt, addr, fill);
+}
+
+static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_write_op *cmd,
+ bool b_must_dmae,
+ bool b_can_dmae)
+{
+ u32 data = le32_to_cpu(cmd->data);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
+ u32 offset, output_len, input_len, max_size;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ union init_array_hdr *hdr;
+ const u32 *array_data;
+ int rc = 0;
+ u32 size;
+
+ array_data = cdev->fw_data->arr_data;
+
+ hdr = (union init_array_hdr *)(array_data +
+ dmae_array_offset);
+ data = le32_to_cpu(hdr->raw.data);
+ switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
+ case INIT_ARR_ZIPPED:
+ offset = dmae_array_offset + 1;
+ input_len = GET_FIELD(data,
+ INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
+ max_size = MAX_ZIPPED_SIZE * 4;
+ memset(p_hwfn->unzip_buf, 0, max_size);
+
+ output_len = qed_unzip_data(p_hwfn, input_len,
+ (u8 *)&array_data[offset],
+ max_size, (u8 *)p_hwfn->unzip_buf);
+ if (output_len) {
+ rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
+ output_len,
+ p_hwfn->unzip_buf,
+ b_must_dmae, b_can_dmae);
+ } else {
+ DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
+ rc = -EINVAL;
+ }
+ break;
+ case INIT_ARR_PATTERN:
+ {
+ u32 repeats = GET_FIELD(data,
+ INIT_ARRAY_PATTERN_HDR_REPETITIONS);
+ u32 i;
+
+ size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
+
+ for (i = 0; i < repeats; i++, addr += size << 2) {
+ rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
+ dmae_array_offset + 1,
+ size, array_data,
+ b_must_dmae, b_can_dmae);
+ if (rc)
+ break;
+ }
+ break;
+ }
+ case INIT_ARR_STANDARD:
+ size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
+ rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
+ dmae_array_offset + 1,
+ size, array_data,
+ b_must_dmae, b_can_dmae);
+ break;
+ }
+
+ return rc;
+}
+
+/* init_ops write command */
+static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_write_op *cmd,
+ bool b_can_dmae)
+{
+ u32 data = le32_to_cpu(cmd->data);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
+ union init_write_args *arg = &cmd->args;
+ int rc = 0;
+
+ /* Sanitize */
+ if (b_must_dmae && !b_can_dmae) {
+ DP_NOTICE(p_hwfn,
+ "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
+ addr);
+ return -EINVAL;
+ }
+
+ switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
+ case INIT_SRC_INLINE:
+ qed_wr(p_hwfn, p_ptt, addr,
+ le32_to_cpu(arg->inline_val));
+ break;
+ case INIT_SRC_ZEROS:
+ if (b_must_dmae ||
+ (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
+ rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
+ le32_to_cpu(arg->zeros_count));
+ else
+ qed_init_fill(p_hwfn, p_ptt, addr, 0,
+ le32_to_cpu(arg->zeros_count));
+ break;
+ case INIT_SRC_ARRAY:
+ rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
+ b_must_dmae, b_can_dmae);
+ break;
+ case INIT_SRC_RUNTIME:
+ qed_init_rt(p_hwfn, p_ptt, addr,
+ le16_to_cpu(arg->runtime.offset),
+ le16_to_cpu(arg->runtime.size));
+ break;
+ }
+
+ return rc;
+}
+
+static inline bool comp_eq(u32 val, u32 expected_val)
+{
+ return val == expected_val;
+}
+
+static inline bool comp_and(u32 val, u32 expected_val)
+{
+ return (val & expected_val) == expected_val;
+}
+
+static inline bool comp_or(u32 val, u32 expected_val)
+{
+ return (val | expected_val) > 0;
+}
+
+/* init_ops read/poll commands */
+static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_read_op *cmd)
+{
+ u32 data = le32_to_cpu(cmd->op_data);
+ u32 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
+
+ bool (*comp_check)(u32 val,
+ u32 expected_val);
+ u32 delay = QED_INIT_POLL_PERIOD_US, val;
+
+ val = qed_rd(p_hwfn, p_ptt, addr);
+
+ data = le32_to_cpu(cmd->op_data);
+ if (GET_FIELD(data, INIT_READ_OP_POLL)) {
+ int i;
+
+ switch (GET_FIELD(data, INIT_READ_OP_POLL_COMP)) {
+ case INIT_COMPARISON_EQ:
+ comp_check = comp_eq;
+ break;
+ case INIT_COMPARISON_OR:
+ comp_check = comp_or;
+ break;
+ case INIT_COMPARISON_AND:
+ comp_check = comp_and;
+ break;
+ default:
+ comp_check = NULL;
+ DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
+ data);
+ return;
+ }
+
+ for (i = 0;
+ i < QED_INIT_MAX_POLL_COUNT &&
+ !comp_check(val, le32_to_cpu(cmd->expected_val));
+ i++) {
+ udelay(delay);
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ }
+
+ if (i == QED_INIT_MAX_POLL_COUNT)
+ DP_ERR(p_hwfn,
+ "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
+ addr, le32_to_cpu(cmd->expected_val),
+ val, data);
+ }
+}
+
+/* init_ops callbacks entry point */
+static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_callback_op *p_cmd)
+{
+ DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
+}
+
+static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
+ u16 *offset,
+ int modes)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ const u8 *modes_tree_buf;
+ u8 arg1, arg2, tree_val;
+
+ modes_tree_buf = cdev->fw_data->modes_tree_buf;
+ tree_val = modes_tree_buf[(*offset)++];
+ switch (tree_val) {
+ case INIT_MODE_OP_NOT:
+ return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
+ case INIT_MODE_OP_OR:
+ arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ return arg1 | arg2;
+ case INIT_MODE_OP_AND:
+ arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
+ return arg1 & arg2;
+ default:
+ tree_val -= MAX_INIT_MODE_OPS;
+ return (modes & (1 << tree_val)) ? 1 : 0;
+ }
+}
+
+static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
+ struct init_if_mode_op *p_cmd,
+ int modes)
+{
+ u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
+
+ if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
+ return 0;
+ else
+ return GET_FIELD(le32_to_cpu(p_cmd->op_data),
+ INIT_IF_MODE_OP_CMD_OFFSET);
+}
+
+static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
+ struct init_if_phase_op *p_cmd,
+ u32 phase,
+ u32 phase_id)
+{
+ u32 data = le32_to_cpu(p_cmd->phase_data);
+ u32 op_data = le32_to_cpu(p_cmd->op_data);
+
+ if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
+ (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
+ GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
+ return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
+ else
+ return 0;
+}
+
+int qed_init_run(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int phase,
+ int phase_id,
+ int modes)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 cmd_num, num_init_ops;
+ union init_op *init_ops;
+ bool b_dmae = false;
+ int rc = 0;
+
+ num_init_ops = cdev->fw_data->init_ops_size;
+ init_ops = cdev->fw_data->init_ops;
+
+ p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
+ if (!p_hwfn->unzip_buf) {
+ DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n");
+ return -ENOMEM;
+ }
+
+ for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
+ union init_op *cmd = &init_ops[cmd_num];
+ u32 data = le32_to_cpu(cmd->raw.op_data);
+
+ switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
+ case INIT_OP_WRITE:
+ rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
+ b_dmae);
+ break;
+ case INIT_OP_READ:
+ qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
+ break;
+ case INIT_OP_IF_MODE:
+ cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
+ modes);
+ break;
+ case INIT_OP_IF_PHASE:
+ cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
+ phase, phase_id);
+ b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
+ break;
+ case INIT_OP_DELAY:
+ /* qed_init_run is always invoked from
+ * sleep-able context
+ */
+ udelay(le32_to_cpu(cmd->delay.delay));
+ break;
+
+ case INIT_OP_CALLBACK:
+ qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+ break;
+ }
+
+ if (rc)
+ break;
+ }
+
+ kfree(p_hwfn->unzip_buf);
+ return rc;
+}
+
+void qed_gtt_init(struct qed_hwfn *p_hwfn)
+{
+ u32 gtt_base;
+ u32 i;
+
+ /* Set the global windows */
+ gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
+
+ for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
+ if (pxp_global_win[i])
+ REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
+ pxp_global_win[i]);
+}
+
+int qed_init_fw_data(struct qed_dev *cdev,
+ const u8 *data)
+{
+ struct qed_fw_data *fw = cdev->fw_data;
+ struct bin_buffer_hdr *buf_hdr;
+ u32 offset, len;
+
+ if (!data) {
+ DP_NOTICE(cdev, "Invalid fw data\n");
+ return -EINVAL;
+ }
+
+ buf_hdr = (struct bin_buffer_hdr *)data;
+
+ offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
+ fw->init_ops = (union init_op *)(data + offset);
+
+ offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
+ fw->arr_data = (u32 *)(data + offset);
+
+ offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
+ fw->modes_tree_buf = (u8 *)(data + offset);
+ len = buf_hdr[BIN_BUF_INIT_CMD].length;
+ fw->init_ops_size = len / sizeof(struct init_raw_op);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
new file mode 100644
index 000000000000..1e832049983d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
@@ -0,0 +1,110 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_INIT_OPS_H
+#define _QED_INIT_OPS_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+/**
+ * @brief qed_init_iro_array - init iro_arr.
+ *
+ *
+ * @param cdev
+ */
+void qed_init_iro_array(struct qed_dev *cdev);
+
+/**
+ * @brief qed_init_run - Run the init-sequence.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param phase
+ * @param phase_id
+ * @param modes
+ * @return _qed_status_t
+ */
+int qed_init_run(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int phase,
+ int phase_id,
+ int modes);
+
+/**
+ * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
+ *
+ *
+ * @param p_hwfn
+ *
+ * @return _qed_status_t
+ */
+int qed_init_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_hwfn_deallocate
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_init_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_clear_rt_data - Clears the runtime init array.
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ */
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 val);
+
+#define STORE_RT_REG(hwfn, offset, val) \
+ qed_init_store_rt_reg(hwfn, offset, val)
+
+#define OVERWRITE_RT_REG(hwfn, offset, val) \
+ qed_init_store_rt_reg(hwfn, offset, val)
+
+/**
+ * @brief
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ * @param size
+ */
+void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 *val,
+ size_t size);
+
+#define STORE_RT_REG_AGG(hwfn, offset, val) \
+ qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+
+/**
+ * @brief
+ * Initialize GTT global windows and set admin window
+ * related params of GTT/PTT to default values.
+ *
+ * @param p_hwfn
+ */
+void qed_gtt_init(struct qed_hwfn *p_hwfn);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
new file mode 100644
index 000000000000..2e399b6137a2
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -0,0 +1,1134 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+struct qed_pi_info {
+ qed_int_comp_cb_t comp_cb;
+ void *cookie;
+};
+
+struct qed_sb_sp_info {
+ struct qed_sb_info sb_info;
+
+ /* per protocol index data */
+ struct qed_pi_info pi_info_arr[PIS_PER_SB];
+};
+
+#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
+
+#define ATTN_STATE_BITS (0xfff)
+#define ATTN_BITS_MASKABLE (0x3ff)
+struct qed_sb_attn_info {
+ /* Virtual & Physical address of the SB */
+ struct atten_status_block *sb_attn;
+ dma_addr_t sb_phys;
+
+ /* Last seen running index */
+ u16 index;
+
+ /* Previously asserted attentions, which are still unasserted */
+ u16 known_attn;
+
+ /* Cleanup address for the link's general hw attention */
+ u32 mfw_attn_addr;
+};
+
+static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
+ struct qed_sb_attn_info *p_sb_desc)
+{
+ u16 rc = 0;
+ u16 index;
+
+ /* Make certain HW write took affect */
+ mmiowb();
+
+ index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
+ if (p_sb_desc->index != index) {
+ p_sb_desc->index = index;
+ rc = QED_SB_ATT_IDX;
+ }
+
+ /* Make certain we got a consistent view with HW */
+ mmiowb();
+
+ return rc;
+}
+
+/**
+ * @brief qed_int_assertion - handles asserted attention bits
+ *
+ * @param p_hwfn
+ * @param asserted_bits newly asserted bits
+ * @return int
+ */
+static int qed_int_assertion(struct qed_hwfn *p_hwfn,
+ u16 asserted_bits)
+{
+ struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+ u32 igu_mask;
+
+ /* Mask the source of the attention in the IGU */
+ igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ IGU_REG_ATTENTION_ENABLE);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
+ igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
+ igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "inner known ATTN state: 0x%04x --> 0x%04x\n",
+ sb_attn_sw->known_attn,
+ sb_attn_sw->known_attn | asserted_bits);
+ sb_attn_sw->known_attn |= asserted_bits;
+
+ /* Handle MCP events */
+ if (asserted_bits & 0x100) {
+ qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
+ /* Clean the MCP attention */
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ sb_attn_sw->mfw_attn_addr, 0);
+ }
+
+ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_SET_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3),
+ (u32)asserted_bits);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n",
+ asserted_bits);
+
+ return 0;
+}
+
+/**
+ * @brief - handles deassertion of previously asserted attentions.
+ *
+ * @param p_hwfn
+ * @param deasserted_bits - newly deasserted bits
+ * @return int
+ *
+ */
+static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
+ u16 deasserted_bits)
+{
+ struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+ u32 aeu_mask;
+
+ if (deasserted_bits != 0x100)
+ DP_ERR(p_hwfn, "Unexpected - non-link deassertion\n");
+
+ /* Clear IGU indication for the deasserted bits */
+ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_CLR_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3),
+ ~((u32)deasserted_bits));
+
+ /* Unmask deasserted attentions in IGU */
+ aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ IGU_REG_ATTENTION_ENABLE);
+ aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
+
+ /* Clear deassertion from inner state */
+ sb_attn_sw->known_attn &= ~deasserted_bits;
+
+ return 0;
+}
+
+static int qed_int_attentions(struct qed_hwfn *p_hwfn)
+{
+ struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
+ struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
+ u32 attn_bits = 0, attn_acks = 0;
+ u16 asserted_bits, deasserted_bits;
+ __le16 index;
+ int rc = 0;
+
+ /* Read current attention bits/acks - safeguard against attentions
+ * by guaranting work on a synchronized timeframe
+ */
+ do {
+ index = p_sb_attn->sb_index;
+ attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
+ attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
+ } while (index != p_sb_attn->sb_index);
+ p_sb_attn->sb_index = index;
+
+ /* Attention / Deassertion are meaningful (and in correct state)
+ * only when they differ and consistent with known state - deassertion
+ * when previous attention & current ack, and assertion when current
+ * attention with no previous attention
+ */
+ asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
+ ~p_sb_attn_sw->known_attn;
+ deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
+ p_sb_attn_sw->known_attn;
+
+ if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) {
+ DP_INFO(p_hwfn,
+ "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
+ index, attn_bits, attn_acks, asserted_bits,
+ deasserted_bits, p_sb_attn_sw->known_attn);
+ } else if (asserted_bits == 0x100) {
+ DP_INFO(p_hwfn,
+ "MFW indication via attention\n");
+ } else {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "MFW indication [deassertion]\n");
+ }
+
+ if (asserted_bits) {
+ rc = qed_int_assertion(p_hwfn, asserted_bits);
+ if (rc)
+ return rc;
+ }
+
+ if (deasserted_bits) {
+ rc = qed_int_deassertion(p_hwfn, deasserted_bits);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
+ void __iomem *igu_addr,
+ u32 ack_cons)
+{
+ struct igu_prod_cons_update igu_ack = { 0 };
+
+ igu_ack.sb_id_and_flags =
+ ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+ (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+ (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+ (IGU_SEG_ACCESS_ATTN <<
+ IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+ DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags);
+
+ /* Both segments (interrupts & acks) are written to same place address;
+ * Need to guarantee all commands will be received (in-order) by HW.
+ */
+ mmiowb();
+ barrier();
+}
+
+void qed_int_sp_dpc(unsigned long hwfn_cookie)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie;
+ struct qed_pi_info *pi_info = NULL;
+ struct qed_sb_attn_info *sb_attn;
+ struct qed_sb_info *sb_info;
+ int arr_size;
+ u16 rc = 0;
+
+ if (!p_hwfn) {
+ DP_ERR(p_hwfn->cdev, "DPC called - no hwfn!\n");
+ return;
+ }
+
+ if (!p_hwfn->p_sp_sb) {
+ DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n");
+ return;
+ }
+
+ sb_info = &p_hwfn->p_sp_sb->sb_info;
+ arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
+ if (!sb_info) {
+ DP_ERR(p_hwfn->cdev,
+ "Status block is NULL - cannot ack interrupts\n");
+ return;
+ }
+
+ if (!p_hwfn->p_sb_attn) {
+ DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn");
+ return;
+ }
+ sb_attn = p_hwfn->p_sb_attn;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
+ p_hwfn, p_hwfn->my_id);
+
+ /* Disable ack for def status block. Required both for msix +
+ * inta in non-mask mode, in inta does no harm.
+ */
+ qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
+
+ /* Gather Interrupts/Attentions information */
+ if (!sb_info->sb_virt) {
+ DP_ERR(
+ p_hwfn->cdev,
+ "Interrupt Status block is NULL - cannot check for new interrupts!\n");
+ } else {
+ u32 tmp_index = sb_info->sb_ack;
+
+ rc = qed_sb_update_sb_idx(sb_info);
+ DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
+ "Interrupt indices: 0x%08x --> 0x%08x\n",
+ tmp_index, sb_info->sb_ack);
+ }
+
+ if (!sb_attn || !sb_attn->sb_attn) {
+ DP_ERR(
+ p_hwfn->cdev,
+ "Attentions Status block is NULL - cannot check for new attentions!\n");
+ } else {
+ u16 tmp_index = sb_attn->index;
+
+ rc |= qed_attn_update_idx(p_hwfn, sb_attn);
+ DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
+ "Attention indices: 0x%08x --> 0x%08x\n",
+ tmp_index, sb_attn->index);
+ }
+
+ /* Check if we expect interrupts at this time. if not just ack them */
+ if (!(rc & QED_SB_EVENT_MASK)) {
+ qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+ return;
+ }
+
+ /* Check the validity of the DPC ptt. If not ack interrupts and fail */
+ if (!p_hwfn->p_dpc_ptt) {
+ DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
+ qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+ return;
+ }
+
+ if (rc & QED_SB_ATT_IDX)
+ qed_int_attentions(p_hwfn);
+
+ if (rc & QED_SB_IDX) {
+ int pi;
+
+ /* Look for a free index */
+ for (pi = 0; pi < arr_size; pi++) {
+ pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
+ if (pi_info->comp_cb)
+ pi_info->comp_cb(p_hwfn, pi_info->cookie);
+ }
+ }
+
+ if (sb_attn && (rc & QED_SB_ATT_IDX))
+ /* This should be done before the interrupts are enabled,
+ * since otherwise a new attention will be generated.
+ */
+ qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
+
+ qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+}
+
+static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
+
+ if (p_sb) {
+ if (p_sb->sb_attn)
+ dma_free_coherent(&cdev->pdev->dev,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn),
+ p_sb->sb_attn,
+ p_sb->sb_phys);
+ kfree(p_sb);
+ }
+}
+
+static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+ memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
+
+ sb_info->index = 0;
+ sb_info->known_attn = 0;
+
+ /* Configure Attention Status Block in IGU */
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
+ lower_32_bits(p_hwfn->p_sb_attn->sb_phys));
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
+ upper_32_bits(p_hwfn->p_sb_attn->sb_phys));
+}
+
+static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr)
+{
+ struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+ sb_info->sb_attn = sb_virt_addr;
+ sb_info->sb_phys = sb_phy_addr;
+
+ /* Set the address of cleanup for the mcp attention */
+ sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
+ MISC_REG_AEU_GENERAL_ATTN_0;
+
+ qed_int_sb_attn_setup(p_hwfn, p_ptt);
+}
+
+static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct qed_sb_attn_info *p_sb;
+ void *p_virt;
+ dma_addr_t p_phys = 0;
+
+ /* SB struct */
+ p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
+ if (!p_sb) {
+ DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n");
+ return -ENOMEM;
+ }
+
+ /* SB ring */
+ p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn),
+ &p_phys, GFP_KERNEL);
+
+ if (!p_virt) {
+ DP_NOTICE(cdev, "Failed to allocate status block (attentions)\n");
+ kfree(p_sb);
+ return -ENOMEM;
+ }
+
+ /* Attention setup */
+ p_hwfn->p_sb_attn = p_sb;
+ qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
+
+ return 0;
+}
+
+/* coalescing timeout = timeset << (timer_res + 1) */
+#define QED_CAU_DEF_RX_USECS 24
+#define QED_CAU_DEF_TX_USECS 48
+
+void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
+ struct cau_sb_entry *p_sb_entry,
+ u8 pf_id,
+ u16 vf_number,
+ u8 vf_valid)
+{
+ u32 cau_state;
+
+ memset(p_sb_entry, 0, sizeof(*p_sb_entry));
+
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
+
+ /* setting the time resultion to a fixed value ( = 1) */
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
+ QED_CAU_DEF_RX_TIMER_RES);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
+ QED_CAU_DEF_TX_TIMER_RES);
+
+ cau_state = CAU_HC_DISABLE_STATE;
+
+ if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+ cau_state = CAU_HC_ENABLE_STATE;
+ if (!p_hwfn->cdev->rx_coalesce_usecs)
+ p_hwfn->cdev->rx_coalesce_usecs =
+ QED_CAU_DEF_RX_USECS;
+ if (!p_hwfn->cdev->tx_coalesce_usecs)
+ p_hwfn->cdev->tx_coalesce_usecs =
+ QED_CAU_DEF_TX_USECS;
+ }
+
+ SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
+ SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
+}
+
+void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t sb_phys,
+ u16 igu_sb_id,
+ u16 vf_number,
+ u8 vf_valid)
+{
+ struct cau_sb_entry sb_entry;
+ u32 val;
+
+ qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
+ vf_number, vf_valid);
+
+ if (p_hwfn->hw_init_done) {
+ val = CAU_REG_SB_ADDR_MEMORY + igu_sb_id * sizeof(u64);
+ qed_wr(p_hwfn, p_ptt, val, lower_32_bits(sb_phys));
+ qed_wr(p_hwfn, p_ptt, val + sizeof(u32),
+ upper_32_bits(sb_phys));
+
+ val = CAU_REG_SB_VAR_MEMORY + igu_sb_id * sizeof(u64);
+ qed_wr(p_hwfn, p_ptt, val, sb_entry.data);
+ qed_wr(p_hwfn, p_ptt, val + sizeof(u32), sb_entry.params);
+ } else {
+ /* Initialize Status Block Address */
+ STORE_RT_REG_AGG(p_hwfn,
+ CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
+ igu_sb_id * 2,
+ sb_phys);
+
+ STORE_RT_REG_AGG(p_hwfn,
+ CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
+ igu_sb_id * 2,
+ sb_entry);
+ }
+
+ /* Configure pi coalescing if set */
+ if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+ u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >>
+ (QED_CAU_DEF_RX_TIMER_RES + 1);
+ u8 num_tc = 1, i;
+
+ qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
+ QED_COAL_RX_STATE_MACHINE,
+ timeset);
+
+ timeset = p_hwfn->cdev->tx_coalesce_usecs >>
+ (QED_CAU_DEF_TX_TIMER_RES + 1);
+
+ for (i = 0; i < num_tc; i++) {
+ qed_int_cau_conf_pi(p_hwfn, p_ptt,
+ igu_sb_id, TX_PI(i),
+ QED_COAL_TX_STATE_MACHINE,
+ timeset);
+ }
+ }
+}
+
+void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 igu_sb_id,
+ u32 pi_index,
+ enum qed_coalescing_fsm coalescing_fsm,
+ u8 timeset)
+{
+ struct cau_pi_entry pi_entry;
+ u32 sb_offset;
+ u32 pi_offset;
+
+ sb_offset = igu_sb_id * PIS_PER_SB;
+ memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
+
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+ if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+ else
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+ pi_offset = sb_offset + pi_index;
+ if (p_hwfn->hw_init_done) {
+ qed_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
+ *((u32 *)&(pi_entry)));
+ } else {
+ STORE_RT_REG(p_hwfn,
+ CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+ *((u32 *)&(pi_entry)));
+ }
+}
+
+void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info)
+{
+ /* zero status block and ack counter */
+ sb_info->sb_ack = 0;
+ memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+ qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+ sb_info->igu_sb_id, 0, 0);
+}
+
+/**
+ * @brief qed_get_igu_sb_id - given a sw sb_id return the
+ * igu_sb_id
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return u16
+ */
+static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
+ u16 sb_id)
+{
+ u16 igu_sb_id;
+
+ /* Assuming continuous set of IGU SBs dedicated for given PF */
+ if (sb_id == QED_SP_SB_ID)
+ igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ else
+ igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
+ (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
+
+ return igu_sb_id;
+}
+
+int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ u16 sb_id)
+{
+ sb_info->sb_virt = sb_virt_addr;
+ sb_info->sb_phys = sb_phy_addr;
+
+ sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
+
+ if (sb_id != QED_SP_SB_ID) {
+ p_hwfn->sbs_info[sb_id] = sb_info;
+ p_hwfn->num_sbs++;
+ }
+
+ sb_info->cdev = p_hwfn->cdev;
+
+ /* The igu address will hold the absolute address that needs to be
+ * written to for a specific status block
+ */
+ sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ (sb_info->igu_sb_id << 3);
+
+ sb_info->flags |= QED_SB_INFO_INIT;
+
+ qed_int_sb_setup(p_hwfn, p_ptt, sb_info);
+
+ return 0;
+}
+
+int qed_int_sb_release(struct qed_hwfn *p_hwfn,
+ struct qed_sb_info *sb_info,
+ u16 sb_id)
+{
+ if (sb_id == QED_SP_SB_ID) {
+ DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+ return -EINVAL;
+ }
+
+ /* zero status block and ack counter */
+ sb_info->sb_ack = 0;
+ memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+ p_hwfn->sbs_info[sb_id] = NULL;
+ p_hwfn->num_sbs--;
+
+ return 0;
+}
+
+static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
+
+ if (p_sb) {
+ if (p_sb->sb_info.sb_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ SB_ALIGNED_SIZE(p_hwfn),
+ p_sb->sb_info.sb_virt,
+ p_sb->sb_info.sb_phys);
+ kfree(p_sb);
+ }
+}
+
+static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_sb_sp_info *p_sb;
+ dma_addr_t p_phys = 0;
+ void *p_virt;
+
+ /* SB struct */
+ p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
+ if (!p_sb) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n");
+ return -ENOMEM;
+ }
+
+ /* SB ring */
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ SB_ALIGNED_SIZE(p_hwfn),
+ &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(p_hwfn, "Failed to allocate status block\n");
+ kfree(p_sb);
+ return -ENOMEM;
+ }
+
+ /* Status Block setup */
+ p_hwfn->p_sp_sb = p_sb;
+ qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt,
+ p_phys, QED_SP_SB_ID);
+
+ memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
+
+ return 0;
+}
+
+static void qed_int_sp_sb_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ if (!p_hwfn)
+ return;
+
+ if (p_hwfn->p_sp_sb)
+ qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
+ else
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to setup Slow path status block - NULL pointer\n");
+
+ if (p_hwfn->p_sb_attn)
+ qed_int_sb_attn_setup(p_hwfn, p_ptt);
+ else
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to setup attentions status block - NULL pointer\n");
+}
+
+int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+ qed_int_comp_cb_t comp_cb,
+ void *cookie,
+ u8 *sb_idx,
+ __le16 **p_fw_cons)
+{
+ struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+ int qed_status = -ENOMEM;
+ u8 pi;
+
+ /* Look for a free index */
+ for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
+ if (!p_sp_sb->pi_info_arr[pi].comp_cb) {
+ p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
+ p_sp_sb->pi_info_arr[pi].cookie = cookie;
+ *sb_idx = pi;
+ *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
+ qed_status = 0;
+ break;
+ }
+ }
+
+ return qed_status;
+}
+
+int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
+{
+ struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+ int qed_status = -ENOMEM;
+
+ if (p_sp_sb->pi_info_arr[pi].comp_cb) {
+ p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
+ p_sp_sb->pi_info_arr[pi].cookie = NULL;
+ qed_status = 0;
+ }
+
+ return qed_status;
+}
+
+u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
+{
+ return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
+}
+
+void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode)
+{
+ u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
+
+ p_hwfn->cdev->int_mode = int_mode;
+ switch (p_hwfn->cdev->int_mode) {
+ case QED_INT_MODE_INTA:
+ igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
+ igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ break;
+
+ case QED_INT_MODE_MSI:
+ igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+ igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ break;
+
+ case QED_INT_MODE_MSIX:
+ igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+ break;
+ case QED_INT_MODE_POLL:
+ break;
+ }
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
+}
+
+void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode)
+{
+ int i;
+
+ p_hwfn->b_int_enabled = 1;
+
+ /* Mask non-link attentions */
+ for (i = 0; i < 9; i++)
+ qed_wr(p_hwfn, p_ptt,
+ MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0);
+
+ /* Enable interrupt Generation */
+ qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
+
+ /* Configure AEU signal change to produce attentions for link */
+ qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
+ qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
+
+ /* Flush the writes to IGU */
+ mmiowb();
+
+ /* Unmask AEU signals toward IGU */
+ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
+}
+
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ p_hwfn->b_int_enabled = 0;
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
+}
+
+#define IGU_CLEANUP_SLEEP_LENGTH (1000)
+void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id,
+ bool cleanup_set,
+ u16 opaque_fid
+ )
+{
+ u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
+ u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
+ u32 data = 0;
+ u32 cmd_ctrl = 0;
+ u32 val = 0;
+ u32 sb_bit = 0;
+ u32 sb_bit_addr = 0;
+
+ /* Set the data field */
+ SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
+ SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
+ SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
+
+ /* Set the control register */
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
+
+ barrier();
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
+
+ /* Flush the write to IGU */
+ mmiowb();
+
+ /* calculate where to read the status bit from */
+ sb_bit = 1 << (sb_id % 32);
+ sb_bit_addr = sb_id / 32 * sizeof(u32);
+
+ sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
+
+ /* Now wait for the command to complete */
+ do {
+ val = qed_rd(p_hwfn, p_ptt, sb_bit_addr);
+
+ if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
+ break;
+
+ usleep_range(5000, 10000);
+ } while (--sleep_cnt);
+
+ if (!sleep_cnt)
+ DP_NOTICE(p_hwfn,
+ "Timeout waiting for clear status 0x%08x [for sb %d]\n",
+ val, sb_id);
+}
+
+void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id,
+ u16 opaque,
+ bool b_set)
+{
+ int pi;
+
+ /* Set */
+ if (b_set)
+ qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
+
+ /* Clear */
+ qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+
+ /* Clear the CAU for the SB */
+ for (pi = 0; pi < 12; pi++)
+ qed_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
+}
+
+void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_set,
+ bool b_slowpath)
+{
+ u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
+ u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
+ u32 sb_id = 0;
+ u32 val = 0;
+
+ val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
+ val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
+ val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
+ qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU cleaning SBs [%d,...,%d]\n",
+ igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
+
+ for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
+
+ if (b_slowpath) {
+ sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU cleaning slowpath SB [%d]\n", sb_id);
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
+ }
+}
+
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_igu_info *p_igu_info;
+ struct qed_igu_block *blk;
+ u32 val;
+ u16 sb_id;
+ u16 prev_sb_id = 0xFF;
+
+ p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_ATOMIC);
+
+ if (!p_hwfn->hw_info.p_igu_info)
+ return -ENOMEM;
+
+ p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+ /* Initialize base sb / sb cnt for PFs */
+ p_igu_info->igu_base_sb = 0xffff;
+ p_igu_info->igu_sb_cnt = 0;
+ p_igu_info->igu_dsb_id = 0xffff;
+ p_igu_info->igu_base_sb_iov = 0xffff;
+
+ for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+ sb_id++) {
+ blk = &p_igu_info->igu_map.igu_blocks[sb_id];
+
+ val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+
+ /* stop scanning when hit first invalid PF entry */
+ if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+ GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+ break;
+
+ blk->status = QED_IGU_STATUS_VALID;
+ blk->function_id = GET_FIELD(val,
+ IGU_MAPPING_LINE_FUNCTION_NUMBER);
+ blk->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+ blk->vector_number = GET_FIELD(val,
+ IGU_MAPPING_LINE_VECTOR_NUMBER);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU_BLOCK[sb_id]:%x:func_id = %d is_pf = %d vector_num = 0x%x\n",
+ val, blk->function_id, blk->is_pf,
+ blk->vector_number);
+
+ if (blk->is_pf) {
+ if (blk->function_id == p_hwfn->rel_pf_id) {
+ blk->status |= QED_IGU_STATUS_PF;
+
+ if (blk->vector_number == 0) {
+ if (p_igu_info->igu_dsb_id == 0xffff)
+ p_igu_info->igu_dsb_id = sb_id;
+ } else {
+ if (p_igu_info->igu_base_sb ==
+ 0xffff) {
+ p_igu_info->igu_base_sb = sb_id;
+ } else if (prev_sb_id != sb_id - 1) {
+ DP_NOTICE(p_hwfn->cdev,
+ "consecutive igu vectors for HWFN %x broken",
+ p_hwfn->rel_pf_id);
+ break;
+ }
+ prev_sb_id = sb_id;
+ /* we don't count the default */
+ (p_igu_info->igu_sb_cnt)++;
+ }
+ }
+ }
+ }
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
+ p_igu_info->igu_base_sb,
+ p_igu_info->igu_sb_cnt,
+ p_igu_info->igu_dsb_id);
+
+ if (p_igu_info->igu_base_sb == 0xffff ||
+ p_igu_info->igu_dsb_id == 0xffff ||
+ p_igu_info->igu_sb_cnt == 0) {
+ DP_NOTICE(p_hwfn,
+ "IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
+ p_igu_info->igu_base_sb,
+ p_igu_info->igu_sb_cnt,
+ p_igu_info->igu_dsb_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Initialize igu runtime registers
+ *
+ * @param p_hwfn
+ */
+void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
+{
+ u32 igu_pf_conf = 0;
+
+ igu_pf_conf |= IGU_PF_CONF_FUNC_EN;
+
+ STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
+}
+
+u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
+{
+ u64 intr_status = 0;
+ u32 intr_status_lo = 0;
+ u32 intr_status_hi = 0;
+ u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
+ IGU_CMD_INT_ACK_BASE;
+ u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
+ IGU_CMD_INT_ACK_BASE;
+
+ intr_status_lo = REG_RD(p_hwfn,
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ lsb_igu_cmd_addr * 8);
+ intr_status_hi = REG_RD(p_hwfn,
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ msb_igu_cmd_addr * 8);
+ intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
+
+ return intr_status;
+}
+
+static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
+{
+ tasklet_init(p_hwfn->sp_dpc,
+ qed_int_sp_dpc, (unsigned long)p_hwfn);
+ p_hwfn->b_sp_dpc_enabled = true;
+}
+
+static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
+{
+ p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_ATOMIC);
+ if (!p_hwfn->sp_dpc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->sp_dpc);
+}
+
+int qed_int_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ int rc = 0;
+
+ rc = qed_int_sp_dpc_alloc(p_hwfn);
+ if (rc) {
+ DP_ERR(p_hwfn->cdev, "Failed to allocate sp dpc mem\n");
+ return rc;
+ }
+ rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
+ if (rc) {
+ DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n");
+ return rc;
+ }
+ rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
+ if (rc) {
+ DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n");
+ return rc;
+ }
+ return rc;
+}
+
+void qed_int_free(struct qed_hwfn *p_hwfn)
+{
+ qed_int_sp_sb_free(p_hwfn);
+ qed_int_sb_attn_free(p_hwfn);
+ qed_int_sp_dpc_free(p_hwfn);
+}
+
+void qed_int_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ qed_int_sp_sb_setup(p_hwfn, p_ptt);
+ qed_int_sp_dpc_setup(p_hwfn);
+}
+
+int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+ int *p_iov_blks)
+{
+ struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
+
+ if (!info)
+ return 0;
+
+ if (p_iov_blks)
+ *p_iov_blks = info->free_blks;
+
+ return info->igu_sb_cnt;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
new file mode 100644
index 000000000000..16b57518e706
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -0,0 +1,391 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_INT_H
+#define _QED_INT_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+/* Fields of IGU PF CONFIGRATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN (0x1 << 0) /* function enable */
+#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
+#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) /* INT enable */
+#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
+
+/* Igu control commands
+ */
+enum igu_ctrl_cmd {
+ IGU_CTRL_CMD_TYPE_RD,
+ IGU_CTRL_CMD_TYPE_WR,
+ MAX_IGU_CTRL_CMD
+};
+
+/* Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+ u32 ctrl_data;
+#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */
+#define IGU_CTRL_REG_FID_SHIFT 0
+#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */
+#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16
+#define IGU_CTRL_REG_RESERVED_MASK 0x1
+#define IGU_CTRL_REG_RESERVED_SHIFT 28
+#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */
+#define IGU_CTRL_REG_TYPE_SHIFT 31
+};
+
+enum qed_coalescing_fsm {
+ QED_COAL_RX_STATE_MACHINE,
+ QED_COAL_TX_STATE_MACHINE
+};
+
+/**
+ * @brief qed_int_cau_conf_pi - configure cau for a given
+ * status block
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param igu_sb_id
+ * @param pi_index
+ * @param state
+ * @param timeset
+ */
+void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 igu_sb_id,
+ u32 pi_index,
+ enum qed_coalescing_fsm coalescing_fsm,
+ u8 timeset);
+
+/**
+ * @brief qed_int_igu_enable_int - enable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode - interrupt mode to use
+ */
+void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode);
+
+/**
+ * @brief qed_int_igu_disable_int - disable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc
+ * register from igu.
+ *
+ * @param p_hwfn
+ *
+ * @return u64
+ */
+u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn);
+
+#define QED_SP_SB_ID 0xffff
+/**
+ * @brief qed_int_sb_init - Initializes the sb_info structure.
+ *
+ * once the structure is initialized it can be passed to sb related functions.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info points to an uninitialized (but
+ * allocated) sb_info structure
+ * @param sb_virt_addr
+ * @param sb_phy_addr
+ * @param sb_id the sb_id to be used (zero based in driver)
+ * should use QED_SP_SB_ID for SP Status block
+ *
+ * @return int
+ */
+int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ u16 sb_id);
+/**
+ * @brief qed_int_sb_setup - Setup the sb.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info initialized sb_info structure
+ */
+void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info);
+
+/**
+ * @brief qed_int_sb_release - releases the sb_info structure.
+ *
+ * once the structure is released, it's memory can be freed
+ *
+ * @param p_hwfn
+ * @param sb_info points to an allocated sb_info structure
+ * @param sb_id the sb_id to be used (zero based in driver)
+ * should never be equal to QED_SP_SB_ID
+ * (SP Status block)
+ *
+ * @return int
+ */
+int qed_int_sb_release(struct qed_hwfn *p_hwfn,
+ struct qed_sb_info *sb_info,
+ u16 sb_id);
+
+/**
+ * @brief qed_int_sp_dpc - To be called when an interrupt is received on the
+ * default status block.
+ *
+ * @param p_hwfn - pointer to hwfn
+ *
+ */
+void qed_int_sp_dpc(unsigned long hwfn_cookie);
+
+/**
+ * @brief qed_int_get_num_sbs - get the number of status
+ * blocks configured for this funciton in the igu.
+ *
+ * @param p_hwfn
+ * @param p_iov_blks - configured free blks for vfs
+ *
+ * @return int - number of status blocks configured
+ */
+int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+ int *p_iov_blks);
+
+/**
+ * @file
+ *
+ * @brief Interrupt handler
+ */
+
+#define QED_CAU_DEF_RX_TIMER_RES 0
+#define QED_CAU_DEF_TX_TIMER_RES 0
+
+#define QED_SB_ATT_IDX 0x0001
+#define QED_SB_EVENT_MASK 0x0003
+
+#define SB_ALIGNED_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+
+struct qed_igu_block {
+ u8 status;
+#define QED_IGU_STATUS_FREE 0x01
+#define QED_IGU_STATUS_VALID 0x02
+#define QED_IGU_STATUS_PF 0x04
+
+ u8 vector_number;
+ u8 function_id;
+ u8 is_pf;
+};
+
+struct qed_igu_map {
+ struct qed_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
+};
+
+struct qed_igu_info {
+ struct qed_igu_map igu_map;
+ u16 igu_dsb_id;
+ u16 igu_base_sb;
+ u16 igu_base_sb_iov;
+ u16 igu_sb_cnt;
+ u16 igu_sb_cnt_iov;
+ u16 free_blks;
+};
+
+/* TODO Names of function may change... */
+void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_set,
+ bool b_slowpath);
+
+void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_int_igu_read_cam - Reads the IGU CAM.
+ * This function needs to be called during hardware
+ * prepare. It reads the info from igu cam to know which
+ * status block is the default / base status block etc.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn,
+ void *cookie);
+/**
+ * @brief qed_int_register_cb - Register callback func for
+ * slowhwfn statusblock.
+ *
+ * Every protocol that uses the slowhwfn status block
+ * should register a callback function that will be called
+ * once there is an update of the sp status block.
+ *
+ * @param p_hwfn
+ * @param comp_cb - function to be called when there is an
+ * interrupt on the sp sb
+ *
+ * @param cookie - passed to the callback function
+ * @param sb_idx - OUT parameter which gives the chosen index
+ * for this protocol.
+ * @param p_fw_cons - pointer to the actual address of the
+ * consumer for this protocol.
+ *
+ * @return int
+ */
+int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+ qed_int_comp_cb_t comp_cb,
+ void *cookie,
+ u8 *sb_idx,
+ __le16 **p_fw_cons);
+
+/**
+ * @brief qed_int_unregister_cb - Unregisters callback
+ * function from sp sb.
+ * Partner of qed_int_register_cb -> should be called
+ * when no longer required.
+ *
+ * @param p_hwfn
+ * @param pi
+ *
+ * @return int
+ */
+int qed_int_unregister_cb(struct qed_hwfn *p_hwfn,
+ u8 pi);
+
+/**
+ * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id.
+ *
+ * @param p_hwfn
+ *
+ * @return u16
+ */
+u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ * block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id - igu status block id
+ * @param cleanup_set - set(1) / clear(0)
+ * @param opaque_fid - the function for which to perform
+ * cleanup, for example a PF on behalf of
+ * its VFs.
+ */
+void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id,
+ bool cleanup_set,
+ u16 opaque_fid);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ * block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id - igu status block id
+ * @param opaque - opaque fid of the sb owner.
+ * @param cleanup_set - set(1) / clear(0)
+ */
+void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id,
+ u16 opaque,
+ bool b_set);
+
+/**
+ * @brief qed_int_cau_conf - configure cau for a given status
+ * block
+ *
+ * @param p_hwfn
+ * @param ptt
+ * @param sb_phys
+ * @param igu_sb_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t sb_phys,
+ u16 igu_sb_id,
+ u16 vf_number,
+ u8 vf_valid);
+
+/**
+ * @brief qed_int_alloc
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_int_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_int_free
+ *
+ * @param p_hwfn
+ */
+void qed_int_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_int_setup
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_int_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Enable Interrupt & Attention for hw function
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode
+ */
+void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode);
+
+/**
+ * @brief - Initialize CAU status block entry
+ *
+ * @param p_hwfn
+ * @param p_sb_entry
+ * @param pf_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
+ struct cau_sb_entry *p_sb_entry,
+ u8 pf_id,
+ u16 vf_number,
+ u8 vf_valid);
+
+#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev))
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
new file mode 100644
index 000000000000..f72036a2ef5b
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -0,0 +1,1704 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include "qed.h"
+#include <linux/qed/qed_chain.h>
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include <linux/qed/qed_eth_if.h>
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+enum qed_rss_caps {
+ QED_RSS_IPV4 = 0x1,
+ QED_RSS_IPV6 = 0x2,
+ QED_RSS_IPV4_TCP = 0x4,
+ QED_RSS_IPV6_TCP = 0x8,
+ QED_RSS_IPV4_UDP = 0x10,
+ QED_RSS_IPV6_UDP = 0x20,
+};
+
+/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
+#define QED_RSS_IND_TABLE_SIZE 128
+#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
+
+struct qed_rss_params {
+ u8 update_rss_config;
+ u8 rss_enable;
+ u8 rss_eng_id;
+ u8 update_rss_capabilities;
+ u8 update_rss_ind_table;
+ u8 update_rss_key;
+ u8 rss_caps;
+ u8 rss_table_size_log;
+ u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
+ u32 rss_key[QED_RSS_KEY_SIZE];
+};
+
+enum qed_filter_opcode {
+ QED_FILTER_ADD,
+ QED_FILTER_REMOVE,
+ QED_FILTER_MOVE,
+ QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
+ QED_FILTER_FLUSH, /* Removes all filters */
+};
+
+enum qed_filter_ucast_type {
+ QED_FILTER_MAC,
+ QED_FILTER_VLAN,
+ QED_FILTER_MAC_VLAN,
+ QED_FILTER_INNER_MAC,
+ QED_FILTER_INNER_VLAN,
+ QED_FILTER_INNER_PAIR,
+ QED_FILTER_INNER_MAC_VNI_PAIR,
+ QED_FILTER_MAC_VNI_PAIR,
+ QED_FILTER_VNI,
+};
+
+struct qed_filter_ucast {
+ enum qed_filter_opcode opcode;
+ enum qed_filter_ucast_type type;
+ u8 is_rx_filter;
+ u8 is_tx_filter;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ unsigned char mac[ETH_ALEN];
+ u8 assert_on_error;
+ u16 vlan;
+ u32 vni;
+};
+
+struct qed_filter_mcast {
+ /* MOVE is not supported for multicast */
+ enum qed_filter_opcode opcode;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ u8 num_mc_addrs;
+#define QED_MAX_MC_ADDRS 64
+ unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+struct qed_filter_accept_flags {
+ u8 update_rx_mode_config;
+ u8 update_tx_mode_config;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+#define QED_ACCEPT_NONE 0x01
+#define QED_ACCEPT_UCAST_MATCHED 0x02
+#define QED_ACCEPT_UCAST_UNMATCHED 0x04
+#define QED_ACCEPT_MCAST_MATCHED 0x08
+#define QED_ACCEPT_MCAST_UNMATCHED 0x10
+#define QED_ACCEPT_BCAST 0x20
+};
+
+struct qed_sp_vport_update_params {
+ u16 opaque_fid;
+ u8 vport_id;
+ u8 update_vport_active_rx_flg;
+ u8 vport_active_rx_flg;
+ u8 update_vport_active_tx_flg;
+ u8 vport_active_tx_flg;
+ u8 update_approx_mcast_flg;
+ unsigned long bins[8];
+ struct qed_rss_params *rss_params;
+ struct qed_filter_accept_flags accept_flags;
+};
+
+#define QED_MAX_SGES_NUM 16
+#define CRC32_POLY 0x1edc6f41
+
+static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+ u32 concrete_fid,
+ u16 opaque_fid,
+ u8 vport_id,
+ u16 mtu,
+ u8 drop_ttl0_flg,
+ u8 inner_vlan_removal_en_flg)
+{
+ struct qed_sp_init_request_params params;
+ struct vport_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = -EINVAL;
+ u16 rx_mode = 0;
+ u8 abs_vport_id = 0;
+
+ rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != 0)
+ return rc;
+
+ memset(&params, 0, sizeof(params));
+ params.ramrod_data_size = sizeof(*p_ramrod);
+ params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ opaque_fid,
+ ETH_RAMROD_VPORT_START,
+ PROTOCOLID_ETH,
+ &params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vport_start;
+ p_ramrod->vport_id = abs_vport_id;
+
+ p_ramrod->mtu = cpu_to_le16(mtu);
+ p_ramrod->inner_vlan_removal_en = inner_vlan_removal_en_flg;
+ p_ramrod->drop_ttl0_en = drop_ttl0_flg;
+
+ SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
+ SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
+
+ p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
+
+ /* TPA related fields */
+ memset(&p_ramrod->tpa_param, 0,
+ sizeof(struct eth_vport_tpa_param));
+
+ /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
+ p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
+ concrete_fid);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_rss_params *p_params)
+{
+ struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
+ u16 abs_l2_queue = 0, capabilities = 0;
+ int rc = 0, i;
+
+ if (!p_params) {
+ p_ramrod->common.update_rss_flg = 0;
+ return rc;
+ }
+
+ BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
+ ETH_RSS_IND_TABLE_ENTRIES_NUM);
+
+ rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
+ if (rc)
+ return rc;
+
+ p_ramrod->common.update_rss_flg = p_params->update_rss_config;
+ rss->update_rss_capabilities = p_params->update_rss_capabilities;
+ rss->update_rss_ind_table = p_params->update_rss_ind_table;
+ rss->update_rss_key = p_params->update_rss_key;
+
+ rss->rss_mode = p_params->rss_enable ?
+ ETH_VPORT_RSS_MODE_REGULAR :
+ ETH_VPORT_RSS_MODE_DISABLED;
+
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV4));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV6));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
+ !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
+ rss->tbl_size = p_params->rss_table_size_log;
+
+ rss->capabilities = cpu_to_le16(capabilities);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
+ p_ramrod->common.update_rss_flg,
+ rss->rss_mode, rss->update_rss_capabilities,
+ capabilities, rss->update_rss_ind_table,
+ rss->update_rss_key);
+
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ rc = qed_fw_l2_queue(p_hwfn,
+ (u8)p_params->rss_ind_table[i],
+ &abs_l2_queue);
+ if (rc)
+ return rc;
+
+ rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
+ i, rss->indirection_table[i]);
+ }
+
+ for (i = 0; i < 10; i++)
+ rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
+
+ return rc;
+}
+
+static void
+qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_filter_accept_flags accept_flags)
+{
+ p_ramrod->common.update_rx_mode_flg =
+ accept_flags.update_rx_mode_config;
+
+ p_ramrod->common.update_tx_mode_flg =
+ accept_flags.update_tx_mode_config;
+
+ /* Set Rx mode accept flags */
+ if (p_ramrod->common.update_rx_mode_flg) {
+ u8 accept_filter = accept_flags.rx_accept_filter;
+ u16 state = 0;
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
+ !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
+ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
+ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
+ !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
+ !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
+ (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
+ !!(accept_filter & QED_ACCEPT_BCAST));
+
+ p_ramrod->rx_mode.state = cpu_to_le16(state);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "p_ramrod->rx_mode.state = 0x%x\n", state);
+ }
+
+ /* Set Tx mode accept flags */
+ if (p_ramrod->common.update_tx_mode_flg) {
+ u8 accept_filter = accept_flags.tx_accept_filter;
+ u16 state = 0;
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
+ !!(accept_filter & QED_ACCEPT_NONE));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
+ (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
+ !!(accept_filter & QED_ACCEPT_NONE));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
+ (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
+ !!(accept_filter & QED_ACCEPT_BCAST));
+
+ p_ramrod->tx_mode.state = cpu_to_le16(state);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "p_ramrod->tx_mode.state = 0x%x\n", state);
+ }
+}
+
+static void
+qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_sp_vport_update_params *p_params)
+{
+ int i;
+
+ memset(&p_ramrod->approx_mcast.bins, 0,
+ sizeof(p_ramrod->approx_mcast.bins));
+
+ if (p_params->update_approx_mcast_flg) {
+ p_ramrod->common.update_approx_mcast_flg = 1;
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ u32 *p_bins = (u32 *)p_params->bins;
+ __le32 val = cpu_to_le32(p_bins[i]);
+
+ p_ramrod->approx_mcast.bins[i] = val;
+ }
+ }
+}
+
+static int
+qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct qed_rss_params *p_rss_params = p_params->rss_params;
+ struct vport_update_ramrod_data_cmn *p_cmn;
+ struct qed_sp_init_request_params sp_params;
+ struct vport_update_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ u8 abs_vport_id = 0;
+ int rc = -EINVAL;
+
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc != 0)
+ return rc;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = comp_mode;
+ sp_params.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ p_params->opaque_fid,
+ ETH_RAMROD_VPORT_UPDATE,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ /* Copy input params to ramrod according to FW struct */
+ p_ramrod = &p_ent->ramrod.vport_update;
+ p_cmn = &p_ramrod->common;
+
+ p_cmn->vport_id = abs_vport_id;
+ p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
+ p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
+ p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
+ p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
+
+ rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
+ if (rc) {
+ /* Return spq entry which is taken in qed_sp_init_request()*/
+ qed_spq_return_entry(p_hwfn, p_ent);
+ return rc;
+ }
+
+ /* Update mcast bins for VFs, PF doesn't use this functionality */
+ qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
+
+ qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u8 vport_id)
+{
+ struct qed_sp_init_request_params sp_params;
+ struct vport_stop_ramrod_data *p_ramrod;
+ struct qed_spq_entry *p_ent;
+ u8 abs_vport_id = 0;
+ int rc;
+
+ rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != 0)
+ return rc;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ opaque_fid,
+ ETH_RAMROD_VPORT_STOP,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vport_stop;
+ p_ramrod->vport_id = abs_vport_id;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_filter_accept_cmd(struct qed_dev *cdev,
+ u8 vport,
+ struct qed_filter_accept_flags accept_flags,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct qed_sp_vport_update_params vport_update_params;
+ int i, rc;
+
+ /* Prepare and send the vport rx_mode change */
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = vport;
+ vport_update_params.accept_flags = accept_flags;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
+ comp_mode, p_comp_data);
+ if (rc != 0) {
+ DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
+ accept_flags.rx_accept_filter,
+ accept_flags.tx_accept_filter);
+ }
+
+ return 0;
+}
+
+static int qed_sp_release_queue_cid(
+ struct qed_hwfn *p_hwfn,
+ struct qed_hw_cid_data *p_cid_data)
+{
+ if (!p_cid_data->b_cid_allocated)
+ return 0;
+
+ qed_cxt_release_cid(p_hwfn, p_cid_data->cid);
+
+ p_cid_data->b_cid_allocated = false;
+
+ return 0;
+}
+
+static int
+qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *params,
+ u8 stats_id,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size)
+{
+ struct rx_queue_start_ramrod_data *p_ramrod = NULL;
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_hw_cid_data *p_rx_cid;
+ u16 abs_rx_q_id = 0;
+ u8 abs_vport_id = 0;
+ int rc = -EINVAL;
+
+ /* Store information for the stop */
+ p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
+ p_rx_cid->cid = cid;
+ p_rx_cid->opaque_fid = opaque_fid;
+ p_rx_cid->vport_id = params->vport_id;
+
+ rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
+ if (rc != 0)
+ return rc;
+
+ rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
+ if (rc != 0)
+ return rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ opaque_fid, cid, params->queue_id, params->vport_id,
+ params->sb);
+
+ memset(&sp_params, 0, sizeof(params));
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ cid, opaque_fid,
+ ETH_RAMROD_RX_QUEUE_START,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_start;
+
+ p_ramrod->sb_id = cpu_to_le16(params->sb);
+ p_ramrod->sb_index = params->sb_idx;
+ p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->complete_cqe_flg = 0;
+ p_ramrod->complete_event_flg = 1;
+
+ p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
+ p_ramrod->bd_base.hi = DMA_HI_LE(bd_chain_phys_addr);
+ p_ramrod->bd_base.lo = DMA_LO_LE(bd_chain_phys_addr);
+
+ p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
+ p_ramrod->cqe_pbl_addr.hi = DMA_HI_LE(cqe_pbl_addr);
+ p_ramrod->cqe_pbl_addr.lo = DMA_LO_LE(cqe_pbl_addr);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ return rc;
+}
+
+static int
+qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void __iomem **pp_prod)
+{
+ struct qed_hw_cid_data *p_rx_cid;
+ u64 init_prod_val = 0;
+ u16 abs_l2_queue = 0;
+ u8 abs_stats_id = 0;
+ int rc;
+
+ rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
+ if (rc != 0)
+ return rc;
+
+ rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
+ if (rc != 0)
+ return rc;
+
+ *pp_prod = (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_PRODS_OFFSET(abs_l2_queue);
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+ (u32 *)(&init_prod_val));
+
+ /* Allocate a CID for the queue */
+ p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &p_rx_cid->cid);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
+ return rc;
+ }
+ p_rx_cid->b_cid_allocated = true;
+
+ rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
+ opaque_fid,
+ p_rx_cid->cid,
+ params,
+ abs_stats_id,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr,
+ cqe_pbl_size);
+
+ if (rc != 0)
+ qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+
+ return rc;
+}
+
+static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ bool eq_completion_only,
+ bool cqe_completion)
+{
+ struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+ struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ u16 abs_rx_q_id = 0;
+ int rc = -EINVAL;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ p_rx_cid->cid,
+ p_rx_cid->opaque_fid,
+ ETH_RAMROD_RX_QUEUE_STOP,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_stop;
+
+ qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+ qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
+ p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+
+ /* Cleaning the queue requires the completion to arrive there.
+ * In addition, VFs require the answer to come as eqe to PF.
+ */
+ p_ramrod->complete_cqe_flg =
+ (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) &&
+ !eq_completion_only) || cqe_completion;
+ p_ramrod->complete_event_flg =
+ !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) ||
+ eq_completion_only;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+}
+
+static int
+qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *p_params,
+ u8 stats_id,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ union qed_qm_pq_params *p_pq_params)
+{
+ struct tx_queue_start_ramrod_data *p_ramrod = NULL;
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_hw_cid_data *p_tx_cid;
+ u8 abs_vport_id;
+ int rc = -EINVAL;
+ u16 pq_id;
+
+ /* Store information for the stop */
+ p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
+ p_tx_cid->cid = cid;
+ p_tx_cid->opaque_fid = opaque_fid;
+
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc)
+ return rc;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, cid,
+ opaque_fid,
+ ETH_RAMROD_TX_QUEUE_START,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.tx_queue_start;
+ p_ramrod->vport_id = abs_vport_id;
+
+ p_ramrod->sb_id = cpu_to_le16(p_params->sb);
+ p_ramrod->sb_index = p_params->sb_idx;
+ p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->tc = p_pq_params->eth.tc;
+
+ p_ramrod->pbl_size = cpu_to_le16(pbl_size);
+ p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr);
+ p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr);
+
+ pq_id = qed_get_qm_pq(p_hwfn,
+ PROTOCOLID_ETH,
+ p_pq_params);
+ p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *p_params,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void __iomem **pp_doorbell)
+{
+ struct qed_hw_cid_data *p_tx_cid;
+ union qed_qm_pq_params pq_params;
+ u8 abs_stats_id = 0;
+ int rc;
+
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
+ if (rc)
+ return rc;
+
+ p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
+ memset(p_tx_cid, 0, sizeof(*p_tx_cid));
+ memset(&pq_params, 0, sizeof(pq_params));
+
+ /* Allocate a CID for the queue */
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &p_tx_cid->cid);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
+ return rc;
+ }
+ p_tx_cid->b_cid_allocated = true;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ opaque_fid, p_tx_cid->cid,
+ p_params->queue_id, p_params->vport_id, p_params->sb);
+
+ rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
+ opaque_fid,
+ p_tx_cid->cid,
+ p_params,
+ abs_stats_id,
+ pbl_addr,
+ pbl_size,
+ &pq_params);
+
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY);
+
+ if (rc)
+ qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+
+ return rc;
+}
+
+static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
+ u16 tx_queue_id)
+{
+ struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = -EINVAL;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(struct tx_queue_stop_ramrod_data);
+ sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ p_tx_cid->cid,
+ p_tx_cid->opaque_fid,
+ ETH_RAMROD_TX_QUEUE_STOP,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+}
+
+static enum eth_filter_action
+qed_filter_action(enum qed_filter_opcode opcode)
+{
+ enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
+
+ switch (opcode) {
+ case QED_FILTER_ADD:
+ action = ETH_FILTER_ACTION_ADD;
+ break;
+ case QED_FILTER_REMOVE:
+ action = ETH_FILTER_ACTION_REMOVE;
+ break;
+ case QED_FILTER_REPLACE:
+ case QED_FILTER_FLUSH:
+ action = ETH_FILTER_ACTION_REPLACE;
+ break;
+ default:
+ action = MAX_ETH_FILTER_ACTION;
+ }
+
+ return action;
+}
+
+static void qed_set_fw_mac_addr(__le16 *fw_msb,
+ __le16 *fw_mid,
+ __le16 *fw_lsb,
+ u8 *mac)
+{
+ ((u8 *)fw_msb)[0] = mac[1];
+ ((u8 *)fw_msb)[1] = mac[0];
+ ((u8 *)fw_mid)[0] = mac[3];
+ ((u8 *)fw_mid)[1] = mac[2];
+ ((u8 *)fw_lsb)[0] = mac[5];
+ ((u8 *)fw_lsb)[1] = mac[4];
+}
+
+static int
+qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ struct vport_filter_update_ramrod_data **pp_ramrod,
+ struct qed_spq_entry **pp_ent,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ u8 vport_to_add_to = 0, vport_to_remove_from = 0;
+ struct vport_filter_update_ramrod_data *p_ramrod;
+ struct qed_sp_init_request_params sp_params;
+ struct eth_filter_cmd *p_first_filter;
+ struct eth_filter_cmd *p_second_filter;
+ enum eth_filter_action action;
+ int rc;
+
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+ &vport_to_remove_from);
+ if (rc)
+ return rc;
+
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+ &vport_to_add_to);
+ if (rc)
+ return rc;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(**pp_ramrod);
+ sp_params.comp_mode = comp_mode;
+ sp_params.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, pp_ent,
+ qed_spq_get_cid(p_hwfn),
+ opaque_fid,
+ ETH_RAMROD_FILTERS_UPDATE,
+ PROTOCOLID_ETH,
+ &sp_params);
+ if (rc)
+ return rc;
+
+ *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
+ p_ramrod = *pp_ramrod;
+ p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
+ p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
+
+ switch (p_filter_cmd->opcode) {
+ case QED_FILTER_FLUSH:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 0; break;
+ case QED_FILTER_MOVE:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
+ default:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
+ }
+
+ p_first_filter = &p_ramrod->filter_cmds[0];
+ p_second_filter = &p_ramrod->filter_cmds[1];
+
+ switch (p_filter_cmd->type) {
+ case QED_FILTER_MAC:
+ p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
+ case QED_FILTER_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
+ case QED_FILTER_MAC_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
+ case QED_FILTER_INNER_MAC:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
+ case QED_FILTER_INNER_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
+ case QED_FILTER_INNER_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
+ case QED_FILTER_INNER_MAC_VNI_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
+ break;
+ case QED_FILTER_MAC_VNI_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
+ case QED_FILTER_VNI:
+ p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
+ }
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
+ qed_set_fw_mac_addr(&p_first_filter->mac_msb,
+ &p_first_filter->mac_mid,
+ &p_first_filter->mac_lsb,
+ (u8 *)p_filter_cmd->mac);
+ }
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
+ p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_VNI))
+ p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
+
+ if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
+ p_second_filter->type = p_first_filter->type;
+ p_second_filter->mac_msb = p_first_filter->mac_msb;
+ p_second_filter->mac_mid = p_first_filter->mac_mid;
+ p_second_filter->mac_lsb = p_first_filter->mac_lsb;
+ p_second_filter->vlan_id = p_first_filter->vlan_id;
+ p_second_filter->vni = p_first_filter->vni;
+
+ p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
+
+ p_first_filter->vport_id = vport_to_remove_from;
+
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
+ p_second_filter->vport_id = vport_to_add_to;
+ } else {
+ action = qed_filter_action(p_filter_cmd->opcode);
+
+ if (action == MAX_ETH_FILTER_ACTION) {
+ DP_NOTICE(p_hwfn,
+ "%d is not supported yet\n",
+ p_filter_cmd->opcode);
+ return -EINVAL;
+ }
+
+ p_first_filter->action = action;
+ p_first_filter->vport_id = (p_filter_cmd->opcode ==
+ QED_FILTER_REMOVE) ?
+ vport_to_remove_from :
+ vport_to_add_to;
+ }
+
+ return 0;
+}
+
+static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct vport_filter_update_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct eth_filter_cmd_header *p_header;
+ int rc;
+
+ rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
+ &p_ramrod, &p_ent,
+ comp_mode, p_comp_data);
+ if (rc != 0) {
+ DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
+ return rc;
+ }
+ p_header = &p_ramrod->filter_cmd_hdr;
+ p_header->assert_on_error = p_filter_cmd->assert_on_error;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc != 0) {
+ DP_ERR(p_hwfn,
+ "Unicast filter ADD command failed %d\n",
+ rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
+ (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
+ ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
+ "REMOVE" :
+ ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
+ "MOVE" : "REPLACE")),
+ (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
+ ((p_filter_cmd->type == QED_FILTER_VLAN) ?
+ "VLAN" : "MAC & VLAN"),
+ p_ramrod->filter_cmd_hdr.cmd_cnt,
+ p_filter_cmd->is_rx_filter,
+ p_filter_cmd->is_tx_filter);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
+ p_filter_cmd->vport_to_add_to,
+ p_filter_cmd->vport_to_remove_from,
+ p_filter_cmd->mac[0],
+ p_filter_cmd->mac[1],
+ p_filter_cmd->mac[2],
+ p_filter_cmd->mac[3],
+ p_filter_cmd->mac[4],
+ p_filter_cmd->mac[5],
+ p_filter_cmd->vlan);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Description:
+ * Calculates crc 32 on a buffer
+ * Note: crc32_length MUST be aligned to 8
+ * Return:
+ ******************************************************************************/
+static u32 qed_calc_crc32c(u8 *crc32_packet,
+ u32 crc32_length,
+ u32 crc32_seed,
+ u8 complement)
+{
+ u32 byte = 0;
+ u32 bit = 0;
+ u8 msb = 0;
+ u8 current_byte = 0;
+ u32 crc32_result = crc32_seed;
+
+ if ((!crc32_packet) ||
+ (crc32_length == 0) ||
+ ((crc32_length % 8) != 0))
+ return crc32_result;
+ for (byte = 0; byte < crc32_length; byte++) {
+ current_byte = crc32_packet[byte];
+ for (bit = 0; bit < 8; bit++) {
+ msb = (u8)(crc32_result >> 31);
+ crc32_result = crc32_result << 1;
+ if (msb != (0x1 & (current_byte >> bit))) {
+ crc32_result = crc32_result ^ CRC32_POLY;
+ crc32_result |= 1; /*crc32_result[0] = 1;*/
+ }
+ }
+ }
+ return crc32_result;
+}
+
+static inline u32 qed_crc32c_le(u32 seed,
+ u8 *mac,
+ u32 len)
+{
+ u32 packet_buf[2] = { 0 };
+
+ memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
+ return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
+}
+
+static u8 qed_mcast_bin_from_mac(u8 *mac)
+{
+ u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
+ mac, ETH_ALEN);
+
+ return crc & 0xff;
+}
+
+static int
+qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+ struct vport_update_ramrod_data *p_ramrod = NULL;
+ struct qed_sp_init_request_params sp_params;
+ struct qed_spq_entry *p_ent = NULL;
+ u8 abs_vport_id = 0;
+ int rc, i;
+
+ if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+ &abs_vport_id);
+ if (rc)
+ return rc;
+ } else {
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+ &abs_vport_id);
+ if (rc)
+ return rc;
+ }
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ sp_params.comp_mode = comp_mode;
+ sp_params.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ p_hwfn->hw_info.opaque_fid,
+ ETH_RAMROD_VPORT_UPDATE,
+ PROTOCOLID_ETH,
+ &sp_params);
+
+ if (rc) {
+ DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.vport_update;
+ p_ramrod->common.update_approx_mcast_flg = 1;
+
+ /* explicitly clear out the entire vector */
+ memset(&p_ramrod->approx_mcast.bins, 0,
+ sizeof(p_ramrod->approx_mcast.bins));
+ memset(bins, 0, sizeof(unsigned long) *
+ ETH_MULTICAST_MAC_BINS_IN_REGS);
+ /* filter ADD op is explicit set op and it removes
+ * any existing filters for the vport
+ */
+ if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+ u32 bit;
+
+ bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+ __set_bit(bit, bins);
+ }
+
+ /* Convert to correct endianity */
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ u32 *p_bins = (u32 *)bins;
+ struct vport_update_ramrod_mcast *approx_mcast;
+
+ approx_mcast = &p_ramrod->approx_mcast;
+ approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
+ }
+ }
+
+ p_ramrod->common.vport_id = abs_vport_id;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_filter_mcast_cmd(struct qed_dev *cdev,
+ struct qed_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ int rc = 0;
+ int i;
+
+ /* only ADD and REMOVE operations are supported for multi-cast */
+ if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
+ (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
+ (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
+ return -EINVAL;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ u16 opaque_fid;
+
+ if (rc != 0)
+ break;
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ rc = qed_sp_eth_filter_mcast(p_hwfn,
+ opaque_fid,
+ p_filter_cmd,
+ comp_mode,
+ p_comp_data);
+ }
+ return rc;
+}
+
+static int qed_filter_ucast_cmd(struct qed_dev *cdev,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ int rc = 0;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ u16 opaque_fid;
+
+ if (rc != 0)
+ break;
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ rc = qed_sp_eth_filter_ucast(p_hwfn,
+ opaque_fid,
+ p_filter_cmd,
+ comp_mode,
+ p_comp_data);
+ }
+
+ return rc;
+}
+
+static int qed_fill_eth_dev_info(struct qed_dev *cdev,
+ struct qed_dev_eth_info *info)
+{
+ int i;
+
+ memset(info, 0, sizeof(*info));
+
+ info->num_tc = 1;
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ for_each_hwfn(cdev, i)
+ info->num_queues += FEAT_NUM(&cdev->hwfns[i],
+ QED_PF_L2_QUE);
+ if (cdev->int_params.fp_msix_cnt)
+ info->num_queues = min_t(u8, info->num_queues,
+ cdev->int_params.fp_msix_cnt);
+ } else {
+ info->num_queues = cdev->num_hwfns;
+ }
+
+ info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
+ ether_addr_copy(info->port_mac,
+ cdev->hwfns[0].hw_info.hw_mac_addr);
+
+ qed_fill_dev_info(cdev, &info->common);
+
+ return 0;
+}
+
+static void qed_register_eth_ops(struct qed_dev *cdev,
+ struct qed_eth_cb_ops *ops,
+ void *cookie)
+{
+ cdev->protocol_ops.eth = ops;
+ cdev->ops_cookie = cookie;
+}
+
+static int qed_start_vport(struct qed_dev *cdev,
+ u8 vport_id,
+ u16 mtu,
+ u8 drop_ttl0_flg,
+ u8 inner_vlan_removal_en_flg)
+{
+ int rc, i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_sp_vport_start(p_hwfn,
+ p_hwfn->hw_info.concrete_fid,
+ p_hwfn->hw_info.opaque_fid,
+ vport_id,
+ mtu,
+ drop_ttl0_flg,
+ inner_vlan_removal_en_flg);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to start VPORT\n");
+ return rc;
+ }
+
+ qed_hw_start_fastpath(p_hwfn);
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Started V-PORT %d with MTU %d\n",
+ vport_id, mtu);
+ }
+
+ qed_reset_vport_stats(cdev);
+
+ return 0;
+}
+
+static int qed_stop_vport(struct qed_dev *cdev,
+ u8 vport_id)
+{
+ int rc, i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_sp_vport_stop(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ vport_id);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop VPORT\n");
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static int qed_update_vport(struct qed_dev *cdev,
+ struct qed_update_vport_params *params)
+{
+ struct qed_sp_vport_update_params sp_params;
+ struct qed_rss_params sp_rss_params;
+ int rc, i;
+
+ if (!cdev)
+ return -ENODEV;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ memset(&sp_rss_params, 0, sizeof(sp_rss_params));
+
+ /* Translate protocol params into sp params */
+ sp_params.vport_id = params->vport_id;
+ sp_params.update_vport_active_rx_flg =
+ params->update_vport_active_flg;
+ sp_params.update_vport_active_tx_flg =
+ params->update_vport_active_flg;
+ sp_params.vport_active_rx_flg = params->vport_active_flg;
+ sp_params.vport_active_tx_flg = params->vport_active_flg;
+
+ /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
+ * We need to re-fix the rss values per engine for CMT.
+ */
+ if (cdev->num_hwfns > 1 && params->update_rss_flg) {
+ struct qed_update_vport_rss_params *rss =
+ &params->rss_params;
+ int k, max = 0;
+
+ /* Find largest entry, since it's possible RSS needs to
+ * be disabled [in case only 1 queue per-hwfn]
+ */
+ for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
+ max = (max > rss->rss_ind_table[k]) ?
+ max : rss->rss_ind_table[k];
+
+ /* Either fix RSS values or disable RSS */
+ if (cdev->num_hwfns < max + 1) {
+ int divisor = (max + cdev->num_hwfns - 1) /
+ cdev->num_hwfns;
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "CMT - fixing RSS values (modulo %02x)\n",
+ divisor);
+
+ for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
+ rss->rss_ind_table[k] =
+ rss->rss_ind_table[k] % divisor;
+ } else {
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "CMT - 1 queue per-hwfn; Disabling RSS\n");
+ params->update_rss_flg = 0;
+ }
+ }
+
+ /* Now, update the RSS configuration for actual configuration */
+ if (params->update_rss_flg) {
+ sp_rss_params.update_rss_config = 1;
+ sp_rss_params.rss_enable = 1;
+ sp_rss_params.update_rss_capabilities = 1;
+ sp_rss_params.update_rss_ind_table = 1;
+ sp_rss_params.update_rss_key = 1;
+ sp_rss_params.rss_caps = QED_RSS_IPV4 |
+ QED_RSS_IPV6 |
+ QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
+ sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
+ memcpy(sp_rss_params.rss_ind_table,
+ params->rss_params.rss_ind_table,
+ QED_RSS_IND_TABLE_SIZE * sizeof(u16));
+ memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
+ QED_RSS_KEY_SIZE * sizeof(u32));
+ }
+ sp_params.rss_params = &sp_rss_params;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = qed_sp_vport_update(p_hwfn, &sp_params,
+ QED_SPQ_MODE_EBLOCK,
+ NULL);
+ if (rc) {
+ DP_ERR(cdev, "Failed to update VPORT\n");
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Updated V-PORT %d: active_flag %d [update %d]\n",
+ params->vport_id, params->vport_active_flg,
+ params->update_vport_active_flg);
+ }
+
+ return 0;
+}
+
+static int qed_start_rxq(struct qed_dev *cdev,
+ struct qed_queue_start_common_params *params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void __iomem **pp_prod)
+{
+ int rc, hwfn_index;
+ struct qed_hwfn *p_hwfn;
+
+ hwfn_index = params->rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ /* Fix queue ID in 100g mode */
+ params->queue_id /= cdev->num_hwfns;
+
+ rc = qed_sp_eth_rx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ params,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr,
+ cqe_pbl_size,
+ pp_prod);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id);
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+ params->queue_id, params->rss_id, params->vport_id,
+ params->sb);
+
+ return 0;
+}
+
+static int qed_stop_rxq(struct qed_dev *cdev,
+ struct qed_stop_rxq_params *params)
+{
+ int rc, hwfn_index;
+ struct qed_hwfn *p_hwfn;
+
+ hwfn_index = params->rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ rc = qed_sp_eth_rx_queue_stop(p_hwfn,
+ params->rx_queue_id / cdev->num_hwfns,
+ params->eq_completion_only,
+ false);
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qed_start_txq(struct qed_dev *cdev,
+ struct qed_queue_start_common_params *p_params,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void __iomem **pp_doorbell)
+{
+ struct qed_hwfn *p_hwfn;
+ int rc, hwfn_index;
+
+ hwfn_index = p_params->rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ /* Fix queue ID in 100g mode */
+ p_params->queue_id /= cdev->num_hwfns;
+
+ rc = qed_sp_eth_tx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_params,
+ pbl_addr,
+ pbl_size,
+ pp_doorbell);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+ p_params->queue_id, p_params->rss_id, p_params->vport_id,
+ p_params->sb);
+
+ return 0;
+}
+
+#define QED_HW_STOP_RETRY_LIMIT (10)
+static int qed_fastpath_stop(struct qed_dev *cdev)
+{
+ qed_hw_stop_fastpath(cdev);
+
+ return 0;
+}
+
+static int qed_stop_txq(struct qed_dev *cdev,
+ struct qed_stop_txq_params *params)
+{
+ struct qed_hwfn *p_hwfn;
+ int rc, hwfn_index;
+
+ hwfn_index = params->rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ rc = qed_sp_eth_tx_queue_stop(p_hwfn,
+ params->tx_queue_id / cdev->num_hwfns);
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
+ enum qed_filter_rx_mode_type type)
+{
+ struct qed_filter_accept_flags accept_flags;
+
+ memset(&accept_flags, 0, sizeof(accept_flags));
+
+ accept_flags.update_rx_mode_config = 1;
+ accept_flags.update_tx_mode_config = 1;
+ accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+ QED_ACCEPT_MCAST_MATCHED |
+ QED_ACCEPT_BCAST;
+ accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+ QED_ACCEPT_MCAST_MATCHED |
+ QED_ACCEPT_BCAST;
+
+ if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
+ accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
+ QED_ACCEPT_MCAST_UNMATCHED;
+ else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
+ accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+
+ return qed_filter_accept_cmd(cdev, 0, accept_flags,
+ QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter_ucast(struct qed_dev *cdev,
+ struct qed_filter_ucast_params *params)
+{
+ struct qed_filter_ucast ucast;
+
+ if (!params->vlan_valid && !params->mac_valid) {
+ DP_NOTICE(
+ cdev,
+ "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
+ return -EINVAL;
+ }
+
+ memset(&ucast, 0, sizeof(ucast));
+ switch (params->type) {
+ case QED_FILTER_XCAST_TYPE_ADD:
+ ucast.opcode = QED_FILTER_ADD;
+ break;
+ case QED_FILTER_XCAST_TYPE_DEL:
+ ucast.opcode = QED_FILTER_REMOVE;
+ break;
+ case QED_FILTER_XCAST_TYPE_REPLACE:
+ ucast.opcode = QED_FILTER_REPLACE;
+ break;
+ default:
+ DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
+ params->type);
+ }
+
+ if (params->vlan_valid && params->mac_valid) {
+ ucast.type = QED_FILTER_MAC_VLAN;
+ ether_addr_copy(ucast.mac, params->mac);
+ ucast.vlan = params->vlan;
+ } else if (params->mac_valid) {
+ ucast.type = QED_FILTER_MAC;
+ ether_addr_copy(ucast.mac, params->mac);
+ } else {
+ ucast.type = QED_FILTER_VLAN;
+ ucast.vlan = params->vlan;
+ }
+
+ ucast.is_rx_filter = true;
+ ucast.is_tx_filter = true;
+
+ return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter_mcast(struct qed_dev *cdev,
+ struct qed_filter_mcast_params *params)
+{
+ struct qed_filter_mcast mcast;
+ int i;
+
+ memset(&mcast, 0, sizeof(mcast));
+ switch (params->type) {
+ case QED_FILTER_XCAST_TYPE_ADD:
+ mcast.opcode = QED_FILTER_ADD;
+ break;
+ case QED_FILTER_XCAST_TYPE_DEL:
+ mcast.opcode = QED_FILTER_REMOVE;
+ break;
+ default:
+ DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
+ params->type);
+ }
+
+ mcast.num_mc_addrs = params->num;
+ for (i = 0; i < mcast.num_mc_addrs; i++)
+ ether_addr_copy(mcast.mac[i], params->mac[i]);
+
+ return qed_filter_mcast_cmd(cdev, &mcast,
+ QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter(struct qed_dev *cdev,
+ struct qed_filter_params *params)
+{
+ enum qed_filter_rx_mode_type accept_flags;
+
+ switch (params->type) {
+ case QED_FILTER_TYPE_UCAST:
+ return qed_configure_filter_ucast(cdev, &params->filter.ucast);
+ case QED_FILTER_TYPE_MCAST:
+ return qed_configure_filter_mcast(cdev, &params->filter.mcast);
+ case QED_FILTER_TYPE_RX_MODE:
+ accept_flags = params->filter.accept_flags;
+ return qed_configure_filter_rx_mode(cdev, accept_flags);
+ default:
+ DP_NOTICE(cdev, "Unknown filter type %d\n",
+ (int)params->type);
+ return -EINVAL;
+ }
+}
+
+static int qed_fp_cqe_completion(struct qed_dev *dev,
+ u8 rss_id,
+ struct eth_slow_path_rx_cqe *cqe)
+{
+ return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
+ cqe);
+}
+
+static const struct qed_eth_ops qed_eth_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .fill_dev_info = &qed_fill_eth_dev_info,
+ .register_ops = &qed_register_eth_ops,
+ .vport_start = &qed_start_vport,
+ .vport_stop = &qed_stop_vport,
+ .vport_update = &qed_update_vport,
+ .q_rx_start = &qed_start_rxq,
+ .q_rx_stop = &qed_stop_rxq,
+ .q_tx_start = &qed_start_txq,
+ .q_tx_stop = &qed_stop_txq,
+ .filter_config = &qed_configure_filter,
+ .fastpath_stop = &qed_fastpath_stop,
+ .eth_cqe_completion = &qed_fp_cqe_completion,
+ .get_vport_stats = &qed_get_vport_stats,
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(u32 version)
+{
+ if (version != QED_ETH_INTERFACE_VERSION) {
+ pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
+ version, QED_ETH_INTERFACE_VERSION);
+ return NULL;
+ }
+
+ return &qed_eth_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_eth_ops);
+
+void qed_put_eth_ops(void)
+{
+ /* TODO - reference count for module? */
+}
+EXPORT_SYMBOL(qed_put_eth_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
new file mode 100644
index 000000000000..947c7af72b25
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -0,0 +1,1169 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/stddef.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/qed/qed_if.h>
+
+#include "qed.h"
+#include "qed_sp.h"
+#include "qed_dev_api.h"
+#include "qed_mcp.h"
+#include "qed_hw.h"
+
+static const char version[] =
+ "QLogic QL4xxx 40G/100G Ethernet Driver qed " DRV_MODULE_VERSION "\n";
+
+MODULE_DESCRIPTION("QLogic 25G/40G/50G/100G Core Module");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define FW_FILE_VERSION \
+ __stringify(FW_MAJOR_VERSION) "." \
+ __stringify(FW_MINOR_VERSION) "." \
+ __stringify(FW_REVISION_VERSION) "." \
+ __stringify(FW_ENGINEERING_VERSION)
+
+#define QED_FW_FILE_NAME \
+ "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
+
+static int __init qed_init(void)
+{
+ pr_notice("qed_init called\n");
+
+ pr_info("%s", version);
+
+ return 0;
+}
+
+static void __exit qed_cleanup(void)
+{
+ pr_notice("qed_cleanup called\n");
+}
+
+module_init(qed_init);
+module_exit(qed_cleanup);
+
+/* Check if the DMA controller on the machine can properly handle the DMA
+ * addressing required by the device.
+*/
+static int qed_set_coherency_mask(struct qed_dev *cdev)
+{
+ struct device *dev = &cdev->pdev->dev;
+
+ if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
+ if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
+ DP_NOTICE(cdev,
+ "Can't request 64-bit consistent allocations\n");
+ return -EIO;
+ }
+ } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+ DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void qed_free_pci(struct qed_dev *cdev)
+{
+ struct pci_dev *pdev = cdev->pdev;
+
+ if (cdev->doorbells)
+ iounmap(cdev->doorbells);
+ if (cdev->regview)
+ iounmap(cdev->regview);
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+}
+
+/* Performs PCI initializations as well as initializing PCI-related parameters
+ * in the device structrue. Returns 0 in case of success.
+ */
+static int qed_init_pci(struct qed_dev *cdev,
+ struct pci_dev *pdev)
+{
+ int rc;
+
+ cdev->pdev = pdev;
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ DP_NOTICE(cdev, "Cannot enable PCI device\n");
+ goto err0;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ DP_NOTICE(cdev, "No memory region found in bar #0\n");
+ rc = -EIO;
+ goto err1;
+ }
+
+ if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ DP_NOTICE(cdev, "No memory region found in bar #2\n");
+ rc = -EIO;
+ goto err1;
+ }
+
+ if (atomic_read(&pdev->enable_cnt) == 1) {
+ rc = pci_request_regions(pdev, "qed");
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to request PCI memory resources\n");
+ goto err1;
+ }
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+ }
+
+ if (!pci_is_pcie(pdev)) {
+ DP_NOTICE(cdev, "The bus is not PCI Express\n");
+ rc = -EIO;
+ goto err2;
+ }
+
+ cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (cdev->pci_params.pm_cap == 0)
+ DP_NOTICE(cdev, "Cannot find power management capability\n");
+
+ rc = qed_set_coherency_mask(cdev);
+ if (rc)
+ goto err2;
+
+ cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
+ cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
+ cdev->pci_params.irq = pdev->irq;
+
+ cdev->regview = pci_ioremap_bar(pdev, 0);
+ if (!cdev->regview) {
+ DP_NOTICE(cdev, "Cannot map register space, aborting\n");
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
+ cdev->db_size = pci_resource_len(cdev->pdev, 2);
+ cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
+ if (!cdev->doorbells) {
+ DP_NOTICE(cdev, "Cannot map doorbell space\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+
+err2:
+ pci_release_regions(pdev);
+err1:
+ pci_disable_device(pdev);
+err0:
+ return rc;
+}
+
+int qed_fill_dev_info(struct qed_dev *cdev,
+ struct qed_dev_info *dev_info)
+{
+ struct qed_ptt *ptt;
+
+ memset(dev_info, 0, sizeof(struct qed_dev_info));
+
+ dev_info->num_hwfns = cdev->num_hwfns;
+ dev_info->pci_mem_start = cdev->pci_params.mem_start;
+ dev_info->pci_mem_end = cdev->pci_params.mem_end;
+ dev_info->pci_irq = cdev->pci_params.irq;
+ dev_info->is_mf = IS_MF(&cdev->hwfns[0]);
+ ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
+
+ dev_info->fw_major = FW_MAJOR_VERSION;
+ dev_info->fw_minor = FW_MINOR_VERSION;
+ dev_info->fw_rev = FW_REVISION_VERSION;
+ dev_info->fw_eng = FW_ENGINEERING_VERSION;
+ dev_info->mf_mode = cdev->mf_mode;
+
+ qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev);
+
+ ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (ptt) {
+ qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->flash_size);
+
+ qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+ }
+
+ return 0;
+}
+
+static void qed_free_cdev(struct qed_dev *cdev)
+{
+ kfree((void *)cdev);
+}
+
+static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
+{
+ struct qed_dev *cdev;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return cdev;
+
+ qed_init_struct(cdev);
+
+ return cdev;
+}
+
+/* Sets the requested power state */
+static int qed_set_power_state(struct qed_dev *cdev,
+ pci_power_t state)
+{
+ if (!cdev)
+ return -ENODEV;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
+ return 0;
+}
+
+/* probing */
+static struct qed_dev *qed_probe(struct pci_dev *pdev,
+ enum qed_protocol protocol,
+ u32 dp_module,
+ u8 dp_level)
+{
+ struct qed_dev *cdev;
+ int rc;
+
+ cdev = qed_alloc_cdev(pdev);
+ if (!cdev)
+ goto err0;
+
+ cdev->protocol = protocol;
+
+ qed_init_dp(cdev, dp_module, dp_level);
+
+ rc = qed_init_pci(cdev, pdev);
+ if (rc) {
+ DP_ERR(cdev, "init pci failed\n");
+ goto err1;
+ }
+ DP_INFO(cdev, "PCI init completed successfully\n");
+
+ rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
+ if (rc) {
+ DP_ERR(cdev, "hw prepare failed\n");
+ goto err2;
+ }
+
+ DP_INFO(cdev, "qed_probe completed successffuly\n");
+
+ return cdev;
+
+err2:
+ qed_free_pci(cdev);
+err1:
+ qed_free_cdev(cdev);
+err0:
+ return NULL;
+}
+
+static void qed_remove(struct qed_dev *cdev)
+{
+ if (!cdev)
+ return;
+
+ qed_hw_remove(cdev);
+
+ qed_free_pci(cdev);
+
+ qed_set_power_state(cdev, PCI_D3hot);
+
+ qed_free_cdev(cdev);
+}
+
+static void qed_disable_msix(struct qed_dev *cdev)
+{
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ pci_disable_msix(cdev->pdev);
+ kfree(cdev->int_params.msix_table);
+ } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
+ pci_disable_msi(cdev->pdev);
+ }
+
+ memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
+}
+
+static int qed_enable_msix(struct qed_dev *cdev,
+ struct qed_int_params *int_params)
+{
+ int i, rc, cnt;
+
+ cnt = int_params->in.num_vectors;
+
+ for (i = 0; i < cnt; i++)
+ int_params->msix_table[i].entry = i;
+
+ rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
+ int_params->in.min_msix_cnt, cnt);
+ if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
+ (rc % cdev->num_hwfns)) {
+ pci_disable_msix(cdev->pdev);
+
+ /* If fastpath is initialized, we need at least one interrupt
+ * per hwfn [and the slow path interrupts]. New requested number
+ * should be a multiple of the number of hwfns.
+ */
+ cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
+ DP_NOTICE(cdev,
+ "Trying to enable MSI-X with less vectors (%d out of %d)\n",
+ cnt, int_params->in.num_vectors);
+ rc = pci_enable_msix_exact(cdev->pdev,
+ int_params->msix_table, cnt);
+ if (!rc)
+ rc = cnt;
+ }
+
+ if (rc > 0) {
+ /* MSI-x configuration was achieved */
+ int_params->out.int_mode = QED_INT_MODE_MSIX;
+ int_params->out.num_vectors = rc;
+ rc = 0;
+ } else {
+ DP_NOTICE(cdev,
+ "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
+ cnt, rc);
+ }
+
+ return rc;
+}
+
+/* This function outputs the int mode and the number of enabled msix vector */
+static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
+{
+ struct qed_int_params *int_params = &cdev->int_params;
+ struct msix_entry *tbl;
+ int rc = 0, cnt;
+
+ switch (int_params->in.int_mode) {
+ case QED_INT_MODE_MSIX:
+ /* Allocate MSIX table */
+ cnt = int_params->in.num_vectors;
+ int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
+ if (!int_params->msix_table) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Enable MSIX */
+ rc = qed_enable_msix(cdev, int_params);
+ if (!rc)
+ goto out;
+
+ DP_NOTICE(cdev, "Failed to enable MSI-X\n");
+ kfree(int_params->msix_table);
+ if (force_mode)
+ goto out;
+ /* Fallthrough */
+
+ case QED_INT_MODE_MSI:
+ rc = pci_enable_msi(cdev->pdev);
+ if (!rc) {
+ int_params->out.int_mode = QED_INT_MODE_MSI;
+ goto out;
+ }
+
+ DP_NOTICE(cdev, "Failed to enable MSI\n");
+ if (force_mode)
+ goto out;
+ /* Fallthrough */
+
+ case QED_INT_MODE_INTA:
+ int_params->out.int_mode = QED_INT_MODE_INTA;
+ rc = 0;
+ goto out;
+ default:
+ DP_NOTICE(cdev, "Unknown int_mode value %d\n",
+ int_params->in.int_mode);
+ rc = -EINVAL;
+ }
+
+out:
+ cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
+
+ return rc;
+}
+
+static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
+ int index, void(*handler)(void *))
+{
+ struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
+ int relative_idx = index / cdev->num_hwfns;
+
+ hwfn->simd_proto_handler[relative_idx].func = handler;
+ hwfn->simd_proto_handler[relative_idx].token = token;
+}
+
+static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
+{
+ struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
+ int relative_idx = index / cdev->num_hwfns;
+
+ memset(&hwfn->simd_proto_handler[relative_idx], 0,
+ sizeof(struct qed_simd_fp_handler));
+}
+
+static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
+{
+ tasklet_schedule((struct tasklet_struct *)tasklet);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qed_single_int(int irq, void *dev_instance)
+{
+ struct qed_dev *cdev = (struct qed_dev *)dev_instance;
+ struct qed_hwfn *hwfn;
+ irqreturn_t rc = IRQ_NONE;
+ u64 status;
+ int i, j;
+
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
+
+ if (!status)
+ continue;
+
+ hwfn = &cdev->hwfns[i];
+
+ /* Slowpath interrupt */
+ if (unlikely(status & 0x1)) {
+ tasklet_schedule(hwfn->sp_dpc);
+ status &= ~0x1;
+ rc = IRQ_HANDLED;
+ }
+
+ /* Fastpath interrupts */
+ for (j = 0; j < 64; j++) {
+ if ((0x2ULL << j) & status) {
+ hwfn->simd_proto_handler[j].func(
+ hwfn->simd_proto_handler[j].token);
+ status &= ~(0x2ULL << j);
+ rc = IRQ_HANDLED;
+ }
+ }
+
+ if (unlikely(status))
+ DP_VERBOSE(hwfn, NETIF_MSG_INTR,
+ "got an unknown interrupt status 0x%llx\n",
+ status);
+ }
+
+ return rc;
+}
+
+static int qed_slowpath_irq_req(struct qed_dev *cdev)
+{
+ int i = 0, rc = 0;
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ /* Request all the slowpath MSI-X vectors */
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ snprintf(cdev->hwfns[i].name, NAME_SIZE,
+ "sp-%d-%02x:%02x.%02x",
+ i, cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn),
+ cdev->hwfns[i].abs_pf_id);
+
+ rc = request_irq(cdev->int_params.msix_table[i].vector,
+ qed_msix_sp_int, 0,
+ cdev->hwfns[i].name,
+ cdev->hwfns[i].sp_dpc);
+ if (rc)
+ break;
+
+ DP_VERBOSE(&cdev->hwfns[i],
+ (NETIF_MSG_INTR | QED_MSG_SP),
+ "Requested slowpath MSI-X\n");
+ }
+
+ if (i != cdev->num_hwfns) {
+ /* Free already request MSI-X vectors */
+ for (i--; i >= 0; i--) {
+ unsigned int vec =
+ cdev->int_params.msix_table[i].vector;
+ synchronize_irq(vec);
+ free_irq(cdev->int_params.msix_table[i].vector,
+ cdev->hwfns[i].sp_dpc);
+ }
+ }
+ } else {
+ unsigned long flags = 0;
+
+ snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
+ cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
+ PCI_FUNC(cdev->pdev->devfn));
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
+ flags |= IRQF_SHARED;
+
+ rc = request_irq(cdev->pdev->irq, qed_single_int,
+ flags, cdev->name, cdev);
+ }
+
+ return rc;
+}
+
+static void qed_slowpath_irq_free(struct qed_dev *cdev)
+{
+ int i;
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ for_each_hwfn(cdev, i) {
+ synchronize_irq(cdev->int_params.msix_table[i].vector);
+ free_irq(cdev->int_params.msix_table[i].vector,
+ cdev->hwfns[i].sp_dpc);
+ }
+ } else {
+ free_irq(cdev->pdev->irq, cdev);
+ }
+}
+
+static int qed_nic_stop(struct qed_dev *cdev)
+{
+ int i, rc;
+
+ rc = qed_hw_stop(cdev);
+
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ if (p_hwfn->b_sp_dpc_enabled) {
+ tasklet_disable(p_hwfn->sp_dpc);
+ p_hwfn->b_sp_dpc_enabled = false;
+ DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
+ "Disabled sp taskelt [hwfn %d] at %p\n",
+ i, p_hwfn->sp_dpc);
+ }
+ }
+
+ return rc;
+}
+
+static int qed_nic_reset(struct qed_dev *cdev)
+{
+ int rc;
+
+ rc = qed_hw_reset(cdev);
+ if (rc)
+ return rc;
+
+ qed_resc_free(cdev);
+
+ return 0;
+}
+
+static int qed_nic_setup(struct qed_dev *cdev)
+{
+ int rc;
+
+ rc = qed_resc_alloc(cdev);
+ if (rc)
+ return rc;
+
+ DP_INFO(cdev, "Allocated qed resources\n");
+
+ qed_resc_setup(cdev);
+
+ return rc;
+}
+
+static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
+{
+ int limit = 0;
+
+ /* Mark the fastpath as free/used */
+ cdev->int_params.fp_initialized = cnt ? true : false;
+
+ if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
+ limit = cdev->num_hwfns * 63;
+ else if (cdev->int_params.fp_msix_cnt)
+ limit = cdev->int_params.fp_msix_cnt;
+
+ if (!limit)
+ return -ENOMEM;
+
+ return min_t(int, cnt, limit);
+}
+
+static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
+{
+ memset(info, 0, sizeof(struct qed_int_info));
+
+ if (!cdev->int_params.fp_initialized) {
+ DP_INFO(cdev,
+ "Protocol driver requested interrupt information, but its support is not yet configured\n");
+ return -EINVAL;
+ }
+
+ /* Need to expose only MSI-X information; Single IRQ is handled solely
+ * by qed.
+ */
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ int msix_base = cdev->int_params.fp_msix_base;
+
+ info->msix_cnt = cdev->int_params.fp_msix_cnt;
+ info->msix = &cdev->int_params.msix_table[msix_base];
+ }
+
+ return 0;
+}
+
+static int qed_slowpath_setup_int(struct qed_dev *cdev,
+ enum qed_int_mode int_mode)
+{
+ int rc, i;
+ u8 num_vectors = 0;
+
+ memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+
+ cdev->int_params.in.int_mode = int_mode;
+ for_each_hwfn(cdev, i)
+ num_vectors += qed_int_get_num_sbs(&cdev->hwfns[i], NULL) + 1;
+ cdev->int_params.in.num_vectors = num_vectors;
+
+ /* We want a minimum of one slowpath and one fastpath vector per hwfn */
+ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
+
+ rc = qed_set_int_mode(cdev, false);
+ if (rc) {
+ DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
+ return rc;
+ }
+
+ cdev->int_params.fp_msix_base = cdev->num_hwfns;
+ cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
+ cdev->num_hwfns;
+
+ return 0;
+}
+
+u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
+ u8 *input_buf, u32 max_size, u8 *unzip_buf)
+{
+ int rc;
+
+ p_hwfn->stream->next_in = input_buf;
+ p_hwfn->stream->avail_in = input_len;
+ p_hwfn->stream->next_out = unzip_buf;
+ p_hwfn->stream->avail_out = max_size;
+
+ rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
+
+ if (rc != Z_OK) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
+ rc);
+ return 0;
+ }
+
+ rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
+ zlib_inflateEnd(p_hwfn->stream);
+
+ if (rc != Z_OK && rc != Z_STREAM_END) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
+ p_hwfn->stream->msg, rc);
+ return 0;
+ }
+
+ return p_hwfn->stream->total_out / 4;
+}
+
+static int qed_alloc_stream_mem(struct qed_dev *cdev)
+{
+ int i;
+ void *workspace;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
+ if (!p_hwfn->stream)
+ return -ENOMEM;
+
+ workspace = vzalloc(zlib_inflate_workspacesize());
+ if (!workspace)
+ return -ENOMEM;
+ p_hwfn->stream->workspace = workspace;
+ }
+
+ return 0;
+}
+
+static void qed_free_stream_mem(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ if (!p_hwfn->stream)
+ return;
+
+ vfree(p_hwfn->stream->workspace);
+ kfree(p_hwfn->stream);
+ }
+}
+
+static void qed_update_pf_params(struct qed_dev *cdev,
+ struct qed_pf_params *params)
+{
+ int i;
+
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->pf_params = *params;
+ }
+}
+
+static int qed_slowpath_start(struct qed_dev *cdev,
+ struct qed_slowpath_params *params)
+{
+ struct qed_mcp_drv_version drv_version;
+ const u8 *data = NULL;
+ struct qed_hwfn *hwfn;
+ int rc;
+
+ rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
+ &cdev->pdev->dev);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to find fw file - /lib/firmware/%s\n",
+ QED_FW_FILE_NAME);
+ goto err;
+ }
+
+ rc = qed_nic_setup(cdev);
+ if (rc)
+ goto err;
+
+ rc = qed_slowpath_setup_int(cdev, params->int_mode);
+ if (rc)
+ goto err1;
+
+ /* Request the slowpath IRQ */
+ rc = qed_slowpath_irq_req(cdev);
+ if (rc)
+ goto err2;
+
+ /* Allocate stream for unzipping */
+ rc = qed_alloc_stream_mem(cdev);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to allocate stream memory\n");
+ goto err3;
+ }
+
+ /* Start the slowpath */
+ data = cdev->firmware->data;
+
+ rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
+ true, data);
+ if (rc)
+ goto err3;
+
+ DP_INFO(cdev,
+ "HW initialization and function start completed successfully\n");
+
+ hwfn = QED_LEADING_HWFN(cdev);
+ drv_version.version = (params->drv_major << 24) |
+ (params->drv_minor << 16) |
+ (params->drv_rev << 8) |
+ (params->drv_eng);
+ strlcpy(drv_version.name, params->name,
+ MCP_DRV_VER_STR_SIZE - 4);
+ rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+ &drv_version);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed sending drv version command\n");
+ return rc;
+ }
+
+ return 0;
+
+err3:
+ qed_free_stream_mem(cdev);
+ qed_slowpath_irq_free(cdev);
+err2:
+ qed_disable_msix(cdev);
+err1:
+ qed_resc_free(cdev);
+err:
+ release_firmware(cdev->firmware);
+
+ return rc;
+}
+
+static int qed_slowpath_stop(struct qed_dev *cdev)
+{
+ if (!cdev)
+ return -ENODEV;
+
+ qed_free_stream_mem(cdev);
+
+ qed_nic_stop(cdev);
+ qed_slowpath_irq_free(cdev);
+
+ qed_disable_msix(cdev);
+ qed_nic_reset(cdev);
+
+ release_firmware(cdev->firmware);
+
+ return 0;
+}
+
+static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
+ char ver_str[VER_SIZE])
+{
+ int i;
+
+ memcpy(cdev->name, name, NAME_SIZE);
+ for_each_hwfn(cdev, i)
+ snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
+
+ memcpy(cdev->ver_str, ver_str, VER_SIZE);
+ cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
+}
+
+static u32 qed_sb_init(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr, u16 sb_id,
+ enum qed_sb_type type)
+{
+ struct qed_hwfn *p_hwfn;
+ int hwfn_index;
+ u16 rel_sb_id;
+ u8 n_hwfns;
+ u32 rc;
+
+ /* RoCE uses single engine and CMT uses two engines. When using both
+ * we force only a single engine. Storage uses only engine 0 too.
+ */
+ if (type == QED_SB_TYPE_L2_QUEUE)
+ n_hwfns = cdev->num_hwfns;
+ else
+ n_hwfns = 1;
+
+ hwfn_index = sb_id % n_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+ rel_sb_id = sb_id / n_hwfns;
+
+ DP_VERBOSE(cdev, NETIF_MSG_INTR,
+ "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+ hwfn_index, rel_sb_id, sb_id);
+
+ rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
+ sb_virt_addr, sb_phy_addr, rel_sb_id);
+
+ return rc;
+}
+
+static u32 qed_sb_release(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ u16 sb_id)
+{
+ struct qed_hwfn *p_hwfn;
+ int hwfn_index;
+ u16 rel_sb_id;
+ u32 rc;
+
+ hwfn_index = sb_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+ rel_sb_id = sb_id / cdev->num_hwfns;
+
+ DP_VERBOSE(cdev, NETIF_MSG_INTR,
+ "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+ hwfn_index, rel_sb_id, sb_id);
+
+ rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
+
+ return rc;
+}
+
+static int qed_set_link(struct qed_dev *cdev,
+ struct qed_link_params *params)
+{
+ struct qed_hwfn *hwfn;
+ struct qed_mcp_link_params *link_params;
+ struct qed_ptt *ptt;
+ int rc;
+
+ if (!cdev)
+ return -ENODEV;
+
+ /* The link should be set only once per PF */
+ hwfn = &cdev->hwfns[0];
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ link_params = qed_mcp_get_link_params(hwfn);
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
+ link_params->speed.autoneg = params->autoneg;
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
+ link_params->speed.advertised_speeds = 0;
+ if ((params->adv_speeds & SUPPORTED_1000baseT_Half) ||
+ (params->adv_speeds & SUPPORTED_1000baseT_Full))
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ if (params->adv_speeds & SUPPORTED_10000baseKR_Full)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ if (params->adv_speeds & SUPPORTED_40000baseLR4_Full)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ if (params->adv_speeds & 0)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
+ if (params->adv_speeds & 0)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G;
+ }
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
+ link_params->speed.forced_speed = params->forced_speed;
+
+ rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_get_port_type(u32 media_type)
+{
+ int port_type;
+
+ switch (media_type) {
+ case MEDIA_SFPP_10G_FIBER:
+ case MEDIA_SFP_1G_FIBER:
+ case MEDIA_XFP_FIBER:
+ case MEDIA_KR:
+ port_type = PORT_FIBRE;
+ break;
+ case MEDIA_DA_TWINAX:
+ port_type = PORT_DA;
+ break;
+ case MEDIA_BASE_T:
+ port_type = PORT_TP;
+ break;
+ case MEDIA_NOT_PRESENT:
+ port_type = PORT_NONE;
+ break;
+ case MEDIA_UNSPECIFIED:
+ default:
+ port_type = PORT_OTHER;
+ break;
+ }
+ return port_type;
+}
+
+static void qed_fill_link(struct qed_hwfn *hwfn,
+ struct qed_link_output *if_link)
+{
+ struct qed_mcp_link_params params;
+ struct qed_mcp_link_state link;
+ struct qed_mcp_link_capabilities link_caps;
+ u32 media_type;
+
+ memset(if_link, 0, sizeof(*if_link));
+
+ /* Prepare source inputs */
+ memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
+ memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
+ sizeof(link_caps));
+
+ /* Set the link parameters to pass to protocol driver */
+ if (link.link_up)
+ if_link->link_up = true;
+
+ /* TODO - at the moment assume supported and advertised speed equal */
+ if_link->supported_caps = SUPPORTED_FIBRE;
+ if (params.speed.autoneg)
+ if_link->supported_caps |= SUPPORTED_Autoneg;
+ if (params.pause.autoneg ||
+ (params.pause.forced_rx && params.pause.forced_tx))
+ if_link->supported_caps |= SUPPORTED_Asym_Pause;
+ if (params.pause.autoneg || params.pause.forced_rx ||
+ params.pause.forced_tx)
+ if_link->supported_caps |= SUPPORTED_Pause;
+
+ if_link->advertised_caps = if_link->supported_caps;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ if_link->advertised_caps |= SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ if_link->advertised_caps |= SUPPORTED_10000baseKR_Full;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ if_link->advertised_caps |= SUPPORTED_40000baseLR4_Full;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ if_link->advertised_caps |= 0;
+ if (params.speed.advertised_speeds &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+ if_link->advertised_caps |= 0;
+
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ if_link->supported_caps |= SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ if_link->supported_caps |= SUPPORTED_10000baseKR_Full;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ if_link->supported_caps |= SUPPORTED_40000baseLR4_Full;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ if_link->supported_caps |= 0;
+ if (link_caps.speed_capabilities &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+ if_link->supported_caps |= 0;
+
+ if (link.link_up)
+ if_link->speed = link.speed;
+
+ /* TODO - fill duplex properly */
+ if_link->duplex = DUPLEX_FULL;
+ qed_mcp_get_media_type(hwfn->cdev, &media_type);
+ if_link->port = qed_get_port_type(media_type);
+
+ if_link->autoneg = params.speed.autoneg;
+
+ if (params.pause.autoneg)
+ if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+ if (params.pause.forced_rx)
+ if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+ if (params.pause.forced_tx)
+ if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+
+ /* Link partner capabilities */
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_1G_HD)
+ if_link->lp_caps |= SUPPORTED_1000baseT_Half;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_1G_FD)
+ if_link->lp_caps |= SUPPORTED_1000baseT_Full;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_10G)
+ if_link->lp_caps |= SUPPORTED_10000baseKR_Full;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_40G)
+ if_link->lp_caps |= SUPPORTED_40000baseLR4_Full;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_50G)
+ if_link->lp_caps |= 0;
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_100G)
+ if_link->lp_caps |= 0;
+
+ if (link.an_complete)
+ if_link->lp_caps |= SUPPORTED_Autoneg;
+
+ if (link.partner_adv_pause)
+ if_link->lp_caps |= SUPPORTED_Pause;
+ if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
+ link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
+ if_link->lp_caps |= SUPPORTED_Asym_Pause;
+}
+
+static void qed_get_current_link(struct qed_dev *cdev,
+ struct qed_link_output *if_link)
+{
+ qed_fill_link(&cdev->hwfns[0], if_link);
+}
+
+void qed_link_update(struct qed_hwfn *hwfn)
+{
+ void *cookie = hwfn->cdev->ops_cookie;
+ struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
+ struct qed_link_output if_link;
+
+ qed_fill_link(hwfn, &if_link);
+
+ if (IS_LEAD_HWFN(hwfn) && cookie)
+ op->link_update(cookie, &if_link);
+}
+
+static int qed_drain(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn;
+ struct qed_ptt *ptt;
+ int i, rc;
+
+ for_each_hwfn(cdev, i) {
+ hwfn = &cdev->hwfns[i];
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
+ return -EBUSY;
+ }
+ rc = qed_mcp_drain(hwfn, ptt);
+ if (rc)
+ return rc;
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ return 0;
+}
+
+const struct qed_common_ops qed_common_ops_pass = {
+ .probe = &qed_probe,
+ .remove = &qed_remove,
+ .set_power_state = &qed_set_power_state,
+ .set_id = &qed_set_id,
+ .update_pf_params = &qed_update_pf_params,
+ .slowpath_start = &qed_slowpath_start,
+ .slowpath_stop = &qed_slowpath_stop,
+ .set_fp_int = &qed_set_int_fp,
+ .get_fp_int = &qed_get_int_fp,
+ .sb_init = &qed_sb_init,
+ .sb_release = &qed_sb_release,
+ .simd_handler_config = &qed_simd_handler_config,
+ .simd_handler_clean = &qed_simd_handler_clean,
+ .set_link = &qed_set_link,
+ .get_link = &qed_get_current_link,
+ .drain = &qed_drain,
+ .update_msglvl = &qed_init_dp,
+ .chain_alloc = &qed_chain_alloc,
+ .chain_free = &qed_chain_free,
+};
+
+u32 qed_get_protocol_version(enum qed_protocol protocol)
+{
+ switch (protocol) {
+ case QED_PROTOCOL_ETH:
+ return QED_ETH_INTERFACE_VERSION;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(qed_get_protocol_version);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
new file mode 100644
index 000000000000..20d048cdcb88
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -0,0 +1,860 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#define CHIP_MCP_RESP_ITER_US 10
+
+#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
+#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
+
+#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
+ qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
+ _val)
+
+#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
+ qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
+
+#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
+ DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
+ offsetof(struct public_drv_mb, _field), _val)
+
+#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
+ DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
+ offsetof(struct public_drv_mb, _field))
+
+#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
+ DRV_ID_PDA_COMP_VER_SHIFT)
+
+#define MCP_BYTES_PER_MBIT_SHIFT 17
+
+bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
+ return false;
+ return true;
+}
+
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PORT);
+ u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
+
+ p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
+ MFW_PORT(p_hwfn));
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "port_addr = 0x%x, port_id 0x%02x\n",
+ p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
+}
+
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
+ u32 tmp, i;
+
+ if (!p_hwfn->mcp_info->public_base)
+ return;
+
+ for (i = 0; i < length; i++) {
+ tmp = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->mfw_mb_addr +
+ (i << 2) + sizeof(u32));
+
+ /* The MB data is actually BE; Need to force it to cpu */
+ ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
+ be32_to_cpu((__force __be32)tmp);
+ }
+}
+
+int qed_mcp_free(struct qed_hwfn *p_hwfn)
+{
+ if (p_hwfn->mcp_info) {
+ kfree(p_hwfn->mcp_info->mfw_mb_cur);
+ kfree(p_hwfn->mcp_info->mfw_mb_shadow);
+ }
+ kfree(p_hwfn->mcp_info);
+
+ return 0;
+}
+
+static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_info *p_info = p_hwfn->mcp_info;
+ u32 drv_mb_offsize, mfw_mb_offsize;
+ u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
+
+ p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
+ if (!p_info->public_base)
+ return 0;
+
+ p_info->public_base |= GRCBASE_MCP;
+
+ /* Calculate the driver and MFW mailbox address */
+ drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_DRV_MB));
+ p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
+ drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
+
+ /* Set the MFW MB address */
+ mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_MFW_MB));
+ p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+ p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
+
+ /* Get the current driver mailbox sequence before sending
+ * the first command
+ */
+ p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
+ /* Get current FW pulse sequence */
+ p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK;
+
+ p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+
+ return 0;
+}
+
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_info *p_info;
+ u32 size;
+
+ /* Allocate mcp_info structure */
+ p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_ATOMIC);
+ if (!p_hwfn->mcp_info)
+ goto err;
+ p_info = p_hwfn->mcp_info;
+
+ if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
+ DP_NOTICE(p_hwfn, "MCP is not initialized\n");
+ /* Do not free mcp_info here, since public_base indicate that
+ * the MCP is not initialized
+ */
+ return 0;
+ }
+
+ size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
+ p_info->mfw_mb_cur = kzalloc(size, GFP_ATOMIC);
+ p_info->mfw_mb_shadow =
+ kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
+ p_info->mfw_mb_length), GFP_ATOMIC);
+ if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
+ goto err;
+
+ /* Initialize the MFW mutex */
+ mutex_init(&p_info->mutex);
+
+ return 0;
+
+err:
+ DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n");
+ qed_mcp_free(p_hwfn);
+ return -ENOMEM;
+}
+
+int qed_mcp_reset(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
+ u8 delay = CHIP_MCP_RESP_ITER_US;
+ u32 org_mcp_reset_seq, cnt = 0;
+ int rc = 0;
+
+ /* Set drv command along with the updated sequence */
+ org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
+ (DRV_MSG_CODE_MCP_RESET | seq));
+
+ do {
+ /* Wait for MFW response */
+ udelay(delay);
+ /* Give the FW up to 500 second (50*1000*10usec) */
+ } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
+ MISCS_REG_GENERIC_POR_0)) &&
+ (cnt++ < QED_MCP_RESET_RETRIES));
+
+ if (org_mcp_reset_seq !=
+ qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "MCP was reset after %d usec\n", cnt * delay);
+ } else {
+ DP_ERR(p_hwfn, "Failed to reset MCP\n");
+ rc = -EAGAIN;
+ }
+
+ return rc;
+}
+
+static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ u8 delay = CHIP_MCP_RESP_ITER_US;
+ u32 seq, cnt = 1, actual_mb_seq;
+ int rc = 0;
+
+ /* Get actual driver mailbox sequence */
+ actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
+ /* Use MCP history register to check if MCP reset occurred between
+ * init time and now.
+ */
+ if (p_hwfn->mcp_info->mcp_hist !=
+ qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
+ qed_load_mcp_offsets(p_hwfn, p_ptt);
+ qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+ }
+ seq = ++p_hwfn->mcp_info->drv_mb_seq;
+
+ /* Set drv param */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+
+ /* Set drv command along with the updated sequence */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "wrote command (%x) to MFW MB param 0x%08x\n",
+ (cmd | seq), param);
+
+ do {
+ /* Wait for MFW response */
+ udelay(delay);
+ *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+
+ /* Give the FW up to 5 second (500*10ms) */
+ } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
+ (cnt++ < QED_DRV_MB_MAX_RETRIES));
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+ cnt * delay, *o_mcp_resp, seq);
+
+ /* Is this a reply to our command? */
+ if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
+ *o_mcp_resp &= FW_MSG_CODE_MASK;
+ /* Get the MCP param */
+ *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+ } else {
+ /* FW BUG! */
+ DP_ERR(p_hwfn, "MFW failed to respond!\n");
+ *o_mcp_resp = 0;
+ rc = -EAGAIN;
+ }
+ return rc;
+}
+
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ int rc = 0;
+
+ /* MCP not initialized */
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ /* Lock Mutex to ensure only single thread is
+ * accessing the MCP at one time
+ */
+ mutex_lock(&p_hwfn->mcp_info->mutex);
+ rc = qed_do_mcp_cmd(p_hwfn, p_ptt, cmd, param,
+ o_mcp_resp, o_mcp_param);
+ /* Release Mutex */
+ mutex_unlock(&p_hwfn->mcp_info->mutex);
+
+ return rc;
+}
+
+static void qed_mcp_set_drv_ver(struct qed_dev *cdev,
+ struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 i;
+
+ /* Copy version string to MCP */
+ for (i = 0; i < MCP_DRV_VER_STR_SIZE_DWORD; i++)
+ DRV_MB_WR(p_hwfn, p_ptt, union_data.ver_str[i],
+ *(u32 *)&cdev->ver_str[i * sizeof(u32)]);
+}
+
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_load_code)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 param;
+ int rc;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ /* Save driver's version to shmem */
+ qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
+ p_hwfn->mcp_info->drv_mb_seq,
+ p_hwfn->mcp_info->drv_pulse_seq);
+
+ /* Load Request */
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
+ (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
+ cdev->drv_type),
+ p_load_code, &param);
+
+ /* if mcp fails to respond we must abort */
+ if (rc) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ /* If MFW refused (e.g. other port is in diagnostic mode) we
+ * must abort. This can happen in the following cases:
+ * - Other port is in diagnostic mode
+ * - Previously loaded function on the engine is not compliant with
+ * the requester.
+ * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
+ * -
+ */
+ if (!(*p_load_code) ||
+ ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
+ ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
+ ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
+ DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_reset)
+{
+ struct qed_mcp_link_state *p_link;
+ u32 status = 0;
+
+ p_link = &p_hwfn->mcp_info->link_output;
+ memset(p_link, 0, sizeof(*p_link));
+ if (!b_reset) {
+ status = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, link_status));
+ DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
+ "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
+ status,
+ (u32)(p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ link_status)));
+ } else {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Resetting link indications\n");
+ return;
+ }
+
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+
+ p_link->full_duplex = true;
+ switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
+ case LINK_STATUS_SPEED_AND_DUPLEX_100G:
+ p_link->speed = 100000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_50G:
+ p_link->speed = 50000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_40G:
+ p_link->speed = 40000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_25G:
+ p_link->speed = 25000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_20G:
+ p_link->speed = 20000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_10G:
+ p_link->speed = 10000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
+ p_link->full_duplex = false;
+ /* Fall-through */
+ case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
+ p_link->speed = 1000;
+ break;
+ default:
+ p_link->speed = 0;
+ }
+
+ /* Correct speed according to bandwidth allocation */
+ if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
+ p_link->speed = p_link->speed *
+ p_hwfn->mcp_info->func_info.bandwidth_max /
+ 100;
+ qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_link->speed);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configured MAX bandwidth to be %08x Mb/sec\n",
+ p_link->speed);
+ }
+
+ p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
+ p_link->an_complete = !!(status &
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
+ p_link->parallel_detection = !!(status &
+ LINK_STATUS_PARALLEL_DETECTION_USED);
+ p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
+
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_1G_FD : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_1G_HD : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_10G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_20G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_40G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_50G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_100G : 0;
+
+ p_link->partner_tx_flow_ctrl_en =
+ !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
+ p_link->partner_rx_flow_ctrl_en =
+ !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
+
+ switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
+ case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
+ p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
+ break;
+ case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
+ p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
+ break;
+ case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
+ p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
+ break;
+ default:
+ p_link->partner_adv_pause = 0;
+ }
+
+ p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
+
+ qed_link_update(p_hwfn);
+}
+
+int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_up)
+{
+ struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
+ u32 param = 0, reply = 0, cmd;
+ struct pmm_phy_cfg phy_cfg;
+ int rc = 0;
+ u32 i;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ /* Set the shmem configuration according to params */
+ memset(&phy_cfg, 0, sizeof(phy_cfg));
+ cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
+ if (!params->speed.autoneg)
+ phy_cfg.speed = params->speed.forced_speed;
+ phy_cfg.pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
+ phy_cfg.pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
+ phy_cfg.pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
+ phy_cfg.adv_speed = params->speed.advertised_speeds;
+ phy_cfg.loopback_mode = params->loopback_mode;
+
+ /* Write the requested configuration to shmem */
+ for (i = 0; i < sizeof(phy_cfg); i += 4)
+ qed_wr(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->drv_mb_addr +
+ offsetof(struct public_drv_mb, union_data) + i,
+ ((u32 *)&phy_cfg)[i >> 2]);
+
+ if (b_up) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
+ phy_cfg.speed,
+ phy_cfg.pause,
+ phy_cfg.adv_speed,
+ phy_cfg.loopback_mode,
+ phy_cfg.feature_config_flags);
+ } else {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Resetting link\n");
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
+ p_hwfn->mcp_info->drv_mb_seq,
+ p_hwfn->mcp_info->drv_pulse_seq);
+
+ /* Load Request */
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, cmd, 0, &reply, &param);
+
+ /* if mcp fails to respond we must abort */
+ if (rc) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ /* Reset the link status if needed */
+ if (!b_up)
+ qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
+
+ return 0;
+}
+
+int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_info *info = p_hwfn->mcp_info;
+ int rc = 0;
+ bool found = false;
+ u16 i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
+
+ /* Read Messages from MFW */
+ qed_mcp_read_mb(p_hwfn, p_ptt);
+
+ /* Compare current messages to old ones */
+ for (i = 0; i < info->mfw_mb_length; i++) {
+ if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
+ continue;
+
+ found = true;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
+ i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
+
+ switch (i) {
+ case MFW_DRV_MSG_LINK_CHANGE:
+ qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
+ rc = -EINVAL;
+ }
+ }
+
+ /* ACK everything */
+ for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
+ __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
+
+ /* MFW expect answer in BE, so we force write in that format */
+ qed_wr(p_hwfn, p_ptt,
+ info->mfw_mb_addr + sizeof(u32) +
+ MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
+ sizeof(u32) + i * sizeof(u32),
+ (__force u32)val);
+ }
+
+ if (!found) {
+ DP_NOTICE(p_hwfn,
+ "Received an MFW message indication but no new message!\n");
+ rc = -EINVAL;
+ }
+
+ /* Copy the new mfw messages into the shadow */
+ memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
+
+ return rc;
+}
+
+int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
+ u32 *p_mfw_ver)
+{
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
+ struct qed_ptt *p_ptt;
+ u32 global_offsize;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ global_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
+ public_base,
+ PUBLIC_GLOBAL));
+ *p_mfw_ver = qed_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize, 0) +
+ offsetof(struct public_global, mfw_ver));
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+int qed_mcp_get_media_type(struct qed_dev *cdev,
+ u32 *p_media_type)
+{
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
+ struct qed_ptt *p_ptt;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ *p_media_type = MEDIA_UNSPECIFIED;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, media_type));
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct public_func *p_data,
+ int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ u32 i, size;
+
+ memset(p_data, 0, sizeof(*p_data));
+
+ size = min_t(u32, sizeof(*p_data),
+ QED_SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+
+ return size;
+}
+
+static int
+qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
+ struct public_func *p_info,
+ enum qed_pci_personality *p_proto)
+{
+ int rc = 0;
+
+ switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
+ case FUNC_MF_CFG_PROTOCOL_ETHERNET:
+ *p_proto = QED_PCI_ETH;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_function_info *info;
+ struct public_func shmem_info;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+ info = &p_hwfn->mcp_info->func_info;
+
+ info->pause_on_host = (shmem_info.config &
+ FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
+
+ if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
+ &info->protocol)) {
+ DP_ERR(p_hwfn, "Unknown personality %08x\n",
+ (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
+ return -EINVAL;
+ }
+
+ if (p_hwfn->cdev->mf_mode != SF) {
+ info->bandwidth_min = (shmem_info.config &
+ FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT;
+ if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ info->bandwidth_min);
+ info->bandwidth_min = 1;
+ }
+
+ info->bandwidth_max = (shmem_info.config &
+ FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ info->bandwidth_max);
+ info->bandwidth_max = 100;
+ }
+ }
+
+ if (shmem_info.mac_upper || shmem_info.mac_lower) {
+ info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
+ info->mac[1] = (u8)(shmem_info.mac_upper);
+ info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
+ info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
+ info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
+ info->mac[5] = (u8)(shmem_info.mac_lower);
+ } else {
+ DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
+ }
+
+ info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
+ (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
+ info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
+ (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
+
+ info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
+
+ DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
+ "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
+ info->pause_on_host, info->protocol,
+ info->bandwidth_min, info->bandwidth_max,
+ info->mac[0], info->mac[1], info->mac[2],
+ info->mac[3], info->mac[4], info->mac[5],
+ info->wwn_port, info->wwn_node, info->ovlan);
+
+ return 0;
+}
+
+struct qed_mcp_link_params
+*qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return NULL;
+ return &p_hwfn->mcp_info->link_input;
+}
+
+struct qed_mcp_link_state
+*qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return NULL;
+ return &p_hwfn->mcp_info->link_output;
+}
+
+struct qed_mcp_link_capabilities
+*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return NULL;
+ return &p_hwfn->mcp_info->link_capabilities;
+}
+
+int qed_mcp_drain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NIG_DRAIN, 100,
+ &resp, &param);
+
+ /* Wait for the drain to complete before returning */
+ msleep(120);
+
+ return rc;
+}
+
+int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_flash_size)
+{
+ u32 flash_size;
+
+ flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
+ flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
+ MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
+ flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
+
+ *p_flash_size = flash_size;
+
+ return 0;
+}
+
+int
+qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_drv_version *p_ver)
+{
+ int rc = 0;
+ u32 param = 0, reply = 0, i;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+ return -EBUSY;
+ }
+
+ DRV_MB_WR(p_hwfn, p_ptt, union_data.drv_version.version,
+ p_ver->version);
+ /* Copy version string to shmem */
+ for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / 4; i++) {
+ DRV_MB_WR(p_hwfn, p_ptt,
+ union_data.drv_version.name[i * sizeof(u32)],
+ *(u32 *)&p_ver->name[i * sizeof(u32)]);
+ }
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0, &reply,
+ &param);
+ if (rc) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
new file mode 100644
index 000000000000..dbaae586b4a7
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -0,0 +1,369 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_MCP_H
+#define _QED_MCP_H
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include "qed_hsi.h"
+
+struct qed_mcp_link_speed_params {
+ bool autoneg;
+ u32 advertised_speeds; /* bitmask of DRV_SPEED_CAPABILITY */
+ u32 forced_speed; /* In Mb/s */
+};
+
+struct qed_mcp_link_pause_params {
+ bool autoneg;
+ bool forced_rx;
+ bool forced_tx;
+};
+
+struct qed_mcp_link_params {
+ struct qed_mcp_link_speed_params speed;
+ struct qed_mcp_link_pause_params pause;
+ u32 loopback_mode;
+};
+
+struct qed_mcp_link_capabilities {
+ u32 speed_capabilities;
+};
+
+struct qed_mcp_link_state {
+ bool link_up;
+
+ u32 speed; /* In Mb/s */
+ bool full_duplex;
+
+ bool an;
+ bool an_complete;
+ bool parallel_detection;
+ bool pfc_enabled;
+
+#define QED_LINK_PARTNER_SPEED_1G_HD BIT(0)
+#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1)
+#define QED_LINK_PARTNER_SPEED_10G BIT(2)
+#define QED_LINK_PARTNER_SPEED_20G BIT(3)
+#define QED_LINK_PARTNER_SPEED_40G BIT(4)
+#define QED_LINK_PARTNER_SPEED_50G BIT(5)
+#define QED_LINK_PARTNER_SPEED_100G BIT(6)
+ u32 partner_adv_speed;
+
+ bool partner_tx_flow_ctrl_en;
+ bool partner_rx_flow_ctrl_en;
+
+#define QED_LINK_PARTNER_SYMMETRIC_PAUSE (1)
+#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE (2)
+#define QED_LINK_PARTNER_BOTH_PAUSE (3)
+ u8 partner_adv_pause;
+
+ bool sfp_tx_fault;
+};
+
+struct qed_mcp_function_info {
+ u8 pause_on_host;
+
+ enum qed_pci_personality protocol;
+
+ u8 bandwidth_min;
+ u8 bandwidth_max;
+
+ u8 mac[ETH_ALEN];
+
+ u64 wwn_port;
+ u64 wwn_node;
+
+#define QED_MCP_VLAN_UNSET (0xffff)
+ u16 ovlan;
+};
+
+struct qed_mcp_nvm_common {
+ u32 offset;
+ u32 param;
+ u32 resp;
+ u32 cmd;
+};
+
+struct qed_mcp_drv_version {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+/**
+ * @brief - returns the link params of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link params
+ */
+struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *);
+
+/**
+ * @brief - return the link state of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link state
+ */
+struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *);
+
+/**
+ * @brief - return the link capabilities of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link capabilities
+ */
+struct qed_mcp_link_capabilities
+ *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief Request the MFW to set the the link according to 'link_input'.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_up - raise link if `true'. Reset link if `false'.
+ *
+ * @return int
+ */
+int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_up);
+
+/**
+ * @brief Get the management firmware version value
+ *
+ * @param cdev - qed dev pointer
+ * @param mfw_ver - mfw version value
+ *
+ * @return int - 0 - operation was successul.
+ */
+int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
+ u32 *mfw_ver);
+
+/**
+ * @brief Get media type value of the port.
+ *
+ * @param cdev - qed dev pointer
+ * @param mfw_ver - media type value
+ *
+ * @return int -
+ * 0 - Operation was successul.
+ * -EBUSY - Operation failed
+ */
+int qed_mcp_get_media_type(struct qed_dev *cdev,
+ u32 *media_type);
+
+/**
+ * @brief General function for sending commands to the MCP
+ * mailbox. It acquire mutex lock for the entire
+ * operation, from sending the request until the MCP
+ * response. Waiting for MCP response will be checked up
+ * to 5 seconds every 5ms.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param cmd - command to be sent to the MCP.
+ * @param param - Optional param
+ * @param o_mcp_resp - The MCP response code (exclude sequence).
+ * @param o_mcp_param- Optional parameter provided by the MCP
+ * response
+ * @return int - 0 - operation
+ * was successul.
+ */
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param);
+
+/**
+ * @brief - drains the nig, allowing completion to pass in case of pauses.
+ * (Should be called only from sleepable context)
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int qed_mcp_drain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Get the flash size value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_flash_size - flash size in bytes to be filled.
+ *
+ * @return int - 0 - operation was successul.
+ */
+int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_flash_size);
+
+/**
+ * @brief Send driver version to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param version - Version value
+ * @param name - Protocol driver name
+ *
+ * @return int - 0 - operation was successul.
+ */
+int
+qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_drv_version *p_ver);
+
+/* Using hwfn number (and not pf_num) is required since in CMT mode,
+ * same pf_num may be used by two different hwfn
+ * TODO - this shouldn't really be in .h file, but until all fields
+ * required during hw-init will be placed in their correct place in shmem
+ * we need it in qed_dev.c [for readin the nvram reflection in shmem].
+ */
+#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (QED_IS_BB((p_hwfn)->cdev) ? \
+ ((rel_pfid) | \
+ ((p_hwfn)->abs_pf_id & 1) << 3) : \
+ rel_pfid)
+#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
+
+/* TODO - this is only correct as long as only BB is supported, and
+ * no port-swapping is implemented; Afterwards we'll need to fix it.
+ */
+#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
+ ((_p_hwfn)->cdev->num_ports_in_engines * 2))
+struct qed_mcp_info {
+ struct mutex mutex; /* MCP access lock */
+ u32 public_base;
+ u32 drv_mb_addr;
+ u32 mfw_mb_addr;
+ u32 port_addr;
+ u16 drv_mb_seq;
+ u16 drv_pulse_seq;
+ struct qed_mcp_link_params link_input;
+ struct qed_mcp_link_state link_output;
+ struct qed_mcp_link_capabilities link_capabilities;
+ struct qed_mcp_function_info func_info;
+ u8 *mfw_mb_cur;
+ u8 *mfw_mb_shadow;
+ u16 mfw_mb_length;
+ u16 mcp_hist;
+};
+
+/**
+ * @brief Initialize the interface with the MCP
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int
+ */
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Initialize the port interface with the MCP
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * Can only be called after `num_ports_in_engines' is set
+ */
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+/**
+ * @brief Releases resources allocated during the init process.
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int
+ */
+
+int qed_mcp_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief This function is called from the DPC context. After
+ * pointing PTT to the mfw mb, check for events sent by the MCP
+ * to the driver and ack them. In case a critical event
+ * detected, it will be handled here, otherwise the work will be
+ * queued to a sleepable work-queue.
+ *
+ * @param p_hwfn - HW function
+ * @param p_ptt - PTT required for register access
+ * @return int - 0 - operation
+ * was successul.
+ */
+int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Sends a LOAD_REQ to the MFW, and in case operation
+ * succeed, returns whether this PF is the first on the
+ * chip/engine/port or function. This function should be
+ * called when driver is ready to accept MFW events after
+ * Storms initializations are done.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param p_load_code - The MCP response param containing one
+ * of the following:
+ * FW_MSG_CODE_DRV_LOAD_ENGINE
+ * FW_MSG_CODE_DRV_LOAD_PORT
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION
+ * @return int -
+ * 0 - Operation was successul.
+ * -EBUSY - Operation failed
+ */
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_load_code);
+
+/**
+ * @brief Read the MFW mailbox into Current buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief - calls during init to read shmem of all function-related info.
+ *
+ * @param p_hwfn
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Reset the MCP using mailbox command.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_reset(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief indicates whether the MFW objects [under mcp_info] are accessible
+ *
+ * @param p_hwfn
+ *
+ * @return true iff MFW is running and mcp_info is initialized
+ */
+bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
new file mode 100644
index 000000000000..7a5ce5914ace
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -0,0 +1,366 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef REG_ADDR_H
+#define REG_ADDR_H
+
+#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
+ 0
+
+#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE ( \
+ 0xfff << 0)
+
+#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \
+ 12
+
+#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE ( \
+ 0xfff << 12)
+
+#define CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \
+ 24
+
+#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \
+ 0xff << 24)
+
+#define XSDM_REG_OPERATION_GEN \
+ 0xf80408UL
+#define NIG_REG_RX_BRB_OUT_EN \
+ 0x500e18UL
+#define NIG_REG_STORM_OUT_EN \
+ 0x500e08UL
+#define PSWRQ2_REG_L2P_VALIDATE_VFID \
+ 0x240c50UL
+#define PGLUE_B_REG_USE_CLIENTID_IN_TAG \
+ 0x2aae04UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
+ 0x2aa16cUL
+#define BAR0_MAP_REG_MSDM_RAM \
+ 0x1d00000UL
+#define BAR0_MAP_REG_USDM_RAM \
+ 0x1d80000UL
+#define BAR0_MAP_REG_PSDM_RAM \
+ 0x1f00000UL
+#define BAR0_MAP_REG_TSDM_RAM \
+ 0x1c80000UL
+#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
+ 0x5011f4UL
+#define PRS_REG_SEARCH_TCP \
+ 0x1f0400UL
+#define PRS_REG_SEARCH_UDP \
+ 0x1f0404UL
+#define PRS_REG_SEARCH_FCOE \
+ 0x1f0408UL
+#define PRS_REG_SEARCH_ROCE \
+ 0x1f040cUL
+#define PRS_REG_SEARCH_OPENFLOW \
+ 0x1f0434UL
+#define TM_REG_PF_ENABLE_CONN \
+ 0x2c043cUL
+#define TM_REG_PF_ENABLE_TASK \
+ 0x2c0444UL
+#define TM_REG_PF_SCAN_ACTIVE_CONN \
+ 0x2c04fcUL
+#define TM_REG_PF_SCAN_ACTIVE_TASK \
+ 0x2c0500UL
+#define IGU_REG_LEADING_EDGE_LATCH \
+ 0x18082cUL
+#define IGU_REG_TRAILING_EDGE_LATCH \
+ 0x180830UL
+#define QM_REG_USG_CNT_PF_TX \
+ 0x2f2eacUL
+#define QM_REG_USG_CNT_PF_OTHER \
+ 0x2f2eb0UL
+#define DORQ_REG_PF_DB_ENABLE \
+ 0x100508UL
+#define QM_REG_PF_EN \
+ 0x2f2ea4UL
+#define TCFC_REG_STRONG_ENABLE_PF \
+ 0x2d0708UL
+#define CCFC_REG_STRONG_ENABLE_PF \
+ 0x2e0708UL
+#define PGLUE_B_REG_PGL_ADDR_88_F0 \
+ 0x2aa404UL
+#define PGLUE_B_REG_PGL_ADDR_8C_F0 \
+ 0x2aa408UL
+#define PGLUE_B_REG_PGL_ADDR_90_F0 \
+ 0x2aa40cUL
+#define PGLUE_B_REG_PGL_ADDR_94_F0 \
+ 0x2aa410UL
+#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
+ 0x2aa138UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
+ 0x2aa174UL
+#define MISC_REG_GEN_PURP_CR0 \
+ 0x008c80UL
+#define MCP_REG_SCRATCH \
+ 0xe20000UL
+#define CNIG_REG_NW_PORT_MODE_BB_B0 \
+ 0x218200UL
+#define MISCS_REG_CHIP_NUM \
+ 0x00976cUL
+#define MISCS_REG_CHIP_REV \
+ 0x009770UL
+#define MISCS_REG_CMT_ENABLED_FOR_PAIR \
+ 0x00971cUL
+#define MISCS_REG_CHIP_TEST_REG \
+ 0x009778UL
+#define MISCS_REG_CHIP_METAL \
+ 0x009774UL
+#define BRB_REG_HEADER_SIZE \
+ 0x340804UL
+#define BTB_REG_HEADER_SIZE \
+ 0xdb0804UL
+#define CAU_REG_LONG_TIMEOUT_THRESHOLD \
+ 0x1c0708UL
+#define CCFC_REG_ACTIVITY_COUNTER \
+ 0x2e8800UL
+#define CDU_REG_CID_ADDR_PARAMS \
+ 0x580900UL
+#define DBG_REG_CLIENT_ENABLE \
+ 0x010004UL
+#define DMAE_REG_INIT \
+ 0x00c000UL
+#define DORQ_REG_IFEN \
+ 0x100040UL
+#define GRC_REG_TIMEOUT_EN \
+ 0x050404UL
+#define IGU_REG_BLOCK_CONFIGURATION \
+ 0x180040UL
+#define MCM_REG_INIT \
+ 0x1200000UL
+#define MCP2_REG_DBG_DWORD_ENABLE \
+ 0x052404UL
+#define MISC_REG_PORT_MODE \
+ 0x008c00UL
+#define MISCS_REG_CLK_100G_MODE \
+ 0x009070UL
+#define MSDM_REG_ENABLE_IN1 \
+ 0xfc0004UL
+#define MSEM_REG_ENABLE_IN \
+ 0x1800004UL
+#define NIG_REG_CM_HDR \
+ 0x500840UL
+#define NCSI_REG_CONFIG \
+ 0x040200UL
+#define PBF_REG_INIT \
+ 0xd80000UL
+#define PTU_REG_ATC_INIT_ARRAY \
+ 0x560000UL
+#define PCM_REG_INIT \
+ 0x1100000UL
+#define PGLUE_B_REG_ADMIN_PER_PF_REGION \
+ 0x2a9000UL
+#define PRM_REG_DISABLE_PRM \
+ 0x230000UL
+#define PRS_REG_SOFT_RST \
+ 0x1f0000UL
+#define PSDM_REG_ENABLE_IN1 \
+ 0xfa0004UL
+#define PSEM_REG_ENABLE_IN \
+ 0x1600004UL
+#define PSWRQ_REG_DBG_SELECT \
+ 0x280020UL
+#define PSWRQ2_REG_CDUT_P_SIZE \
+ 0x24000cUL
+#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
+ 0x2a0040UL
+#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
+ 0x29e050UL
+#define PSWRD_REG_DBG_SELECT \
+ 0x29c040UL
+#define PSWRD2_REG_CONF11 \
+ 0x29d064UL
+#define PSWWR_REG_USDM_FULL_TH \
+ 0x29a040UL
+#define PSWWR2_REG_CDU_FULL_TH2 \
+ 0x29b040UL
+#define QM_REG_MAXPQSIZE_0 \
+ 0x2f0434UL
+#define RSS_REG_RSS_INIT_EN \
+ 0x238804UL
+#define RDIF_REG_STOP_ON_ERROR \
+ 0x300040UL
+#define SRC_REG_SOFT_RST \
+ 0x23874cUL
+#define TCFC_REG_ACTIVITY_COUNTER \
+ 0x2d8800UL
+#define TCM_REG_INIT \
+ 0x1180000UL
+#define TM_REG_PXP_READ_DATA_FIFO_INIT \
+ 0x2c0014UL
+#define TSDM_REG_ENABLE_IN1 \
+ 0xfb0004UL
+#define TSEM_REG_ENABLE_IN \
+ 0x1700004UL
+#define TDIF_REG_STOP_ON_ERROR \
+ 0x310040UL
+#define UCM_REG_INIT \
+ 0x1280000UL
+#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
+ 0x051004UL
+#define USDM_REG_ENABLE_IN1 \
+ 0xfd0004UL
+#define USEM_REG_ENABLE_IN \
+ 0x1900004UL
+#define XCM_REG_INIT \
+ 0x1000000UL
+#define XSDM_REG_ENABLE_IN1 \
+ 0xf80004UL
+#define XSEM_REG_ENABLE_IN \
+ 0x1400004UL
+#define YCM_REG_INIT \
+ 0x1080000UL
+#define YSDM_REG_ENABLE_IN1 \
+ 0xf90004UL
+#define YSEM_REG_ENABLE_IN \
+ 0x1500004UL
+#define XYLD_REG_SCBD_STRICT_PRIO \
+ 0x4c0000UL
+#define TMLD_REG_SCBD_STRICT_PRIO \
+ 0x4d0000UL
+#define MULD_REG_SCBD_STRICT_PRIO \
+ 0x4e0000UL
+#define YULD_REG_SCBD_STRICT_PRIO \
+ 0x4c8000UL
+#define MISC_REG_SHARED_MEM_ADDR \
+ 0x008c20UL
+#define DMAE_REG_GO_C0 \
+ 0x00c048UL
+#define DMAE_REG_GO_C1 \
+ 0x00c04cUL
+#define DMAE_REG_GO_C2 \
+ 0x00c050UL
+#define DMAE_REG_GO_C3 \
+ 0x00c054UL
+#define DMAE_REG_GO_C4 \
+ 0x00c058UL
+#define DMAE_REG_GO_C5 \
+ 0x00c05cUL
+#define DMAE_REG_GO_C6 \
+ 0x00c060UL
+#define DMAE_REG_GO_C7 \
+ 0x00c064UL
+#define DMAE_REG_GO_C8 \
+ 0x00c068UL
+#define DMAE_REG_GO_C9 \
+ 0x00c06cUL
+#define DMAE_REG_GO_C10 \
+ 0x00c070UL
+#define DMAE_REG_GO_C11 \
+ 0x00c074UL
+#define DMAE_REG_GO_C12 \
+ 0x00c078UL
+#define DMAE_REG_GO_C13 \
+ 0x00c07cUL
+#define DMAE_REG_GO_C14 \
+ 0x00c080UL
+#define DMAE_REG_GO_C15 \
+ 0x00c084UL
+#define DMAE_REG_GO_C16 \
+ 0x00c088UL
+#define DMAE_REG_GO_C17 \
+ 0x00c08cUL
+#define DMAE_REG_GO_C18 \
+ 0x00c090UL
+#define DMAE_REG_GO_C19 \
+ 0x00c094UL
+#define DMAE_REG_GO_C20 \
+ 0x00c098UL
+#define DMAE_REG_GO_C21 \
+ 0x00c09cUL
+#define DMAE_REG_GO_C22 \
+ 0x00c0a0UL
+#define DMAE_REG_GO_C23 \
+ 0x00c0a4UL
+#define DMAE_REG_GO_C24 \
+ 0x00c0a8UL
+#define DMAE_REG_GO_C25 \
+ 0x00c0acUL
+#define DMAE_REG_GO_C26 \
+ 0x00c0b0UL
+#define DMAE_REG_GO_C27 \
+ 0x00c0b4UL
+#define DMAE_REG_GO_C28 \
+ 0x00c0b8UL
+#define DMAE_REG_GO_C29 \
+ 0x00c0bcUL
+#define DMAE_REG_GO_C30 \
+ 0x00c0c0UL
+#define DMAE_REG_GO_C31 \
+ 0x00c0c4UL
+#define DMAE_REG_CMD_MEM \
+ 0x00c800UL
+#define QM_REG_MAXPQSIZETXSEL_0 \
+ 0x2f0440UL
+#define QM_REG_SDMCMDREADY \
+ 0x2f1e10UL
+#define QM_REG_SDMCMDADDR \
+ 0x2f1e04UL
+#define QM_REG_SDMCMDDATALSB \
+ 0x2f1e08UL
+#define QM_REG_SDMCMDDATAMSB \
+ 0x2f1e0cUL
+#define QM_REG_SDMCMDGO \
+ 0x2f1e14UL
+#define QM_REG_RLPFCRD \
+ 0x2f4d80UL
+#define QM_REG_RLPFINCVAL \
+ 0x2f4c80UL
+#define QM_REG_RLGLBLCRD \
+ 0x2f4400UL
+#define QM_REG_RLGLBLINCVAL \
+ 0x2f3400UL
+#define IGU_REG_ATTENTION_ENABLE \
+ 0x18083cUL
+#define IGU_REG_ATTN_MSG_ADDR_L \
+ 0x180820UL
+#define IGU_REG_ATTN_MSG_ADDR_H \
+ 0x180824UL
+#define MISC_REG_AEU_GENERAL_ATTN_0 \
+ 0x008400UL
+#define CAU_REG_SB_ADDR_MEMORY \
+ 0x1c8000UL
+#define CAU_REG_SB_VAR_MEMORY \
+ 0x1c6000UL
+#define CAU_REG_PI_MEMORY \
+ 0x1d0000UL
+#define IGU_REG_PF_CONFIGURATION \
+ 0x180800UL
+#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
+ 0x00849cUL
+#define MISC_REG_AEU_MASK_ATTN_IGU \
+ 0x008494UL
+#define IGU_REG_CLEANUP_STATUS_0 \
+ 0x180980UL
+#define IGU_REG_CLEANUP_STATUS_1 \
+ 0x180a00UL
+#define IGU_REG_CLEANUP_STATUS_2 \
+ 0x180a80UL
+#define IGU_REG_CLEANUP_STATUS_3 \
+ 0x180b00UL
+#define IGU_REG_CLEANUP_STATUS_4 \
+ 0x180b80UL
+#define IGU_REG_COMMAND_REG_32LSB_DATA \
+ 0x180840UL
+#define IGU_REG_COMMAND_REG_CTRL \
+ 0x180848UL
+#define IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN ( \
+ 0x1 << 1)
+#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \
+ 0x1 << 0)
+#define IGU_REG_MAPPING_MEMORY \
+ 0x184000UL
+#define MISCS_REG_GENERIC_POR_0 \
+ 0x0096d4UL
+#define MCP_REG_NVM_CFG4 \
+ 0xe0642cUL
+#define MCP_REG_NVM_CFG4_FLASH_SIZE ( \
+ 0x7 << 0)
+#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
+ 0
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
new file mode 100644
index 000000000000..31a1f1eb4f56
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -0,0 +1,360 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_SP_H
+#define _QED_SP_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+
+enum spq_mode {
+ QED_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */
+ QED_SPQ_MODE_CB, /* Client supplies a callback */
+ QED_SPQ_MODE_EBLOCK, /* QED should block until completion */
+};
+
+struct qed_spq_comp_cb {
+ void (*function)(struct qed_hwfn *,
+ void *,
+ union event_ring_data *,
+ u8 fw_return_code);
+ void *cookie;
+};
+
+/**
+ * @brief qed_eth_cqe_completion - handles the completion of a
+ * ramrod on the cqe ring
+ *
+ * @param p_hwfn
+ * @param cqe
+ *
+ * @return int
+ */
+int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe);
+
+/**
+ * @file
+ *
+ * QED Slow-hwfn queue interface
+ */
+
+union ramrod_data {
+ struct pf_start_ramrod_data pf_start;
+ struct rx_queue_start_ramrod_data rx_queue_start;
+ struct rx_queue_update_ramrod_data rx_queue_update;
+ struct rx_queue_stop_ramrod_data rx_queue_stop;
+ struct tx_queue_start_ramrod_data tx_queue_start;
+ struct tx_queue_stop_ramrod_data tx_queue_stop;
+ struct vport_start_ramrod_data vport_start;
+ struct vport_stop_ramrod_data vport_stop;
+ struct vport_update_ramrod_data vport_update;
+ struct vport_filter_update_ramrod_data vport_filter_update;
+};
+
+#define EQ_MAX_CREDIT 0xffffffff
+
+enum spq_priority {
+ QED_SPQ_PRIORITY_NORMAL,
+ QED_SPQ_PRIORITY_HIGH,
+};
+
+union qed_spq_req_comp {
+ struct qed_spq_comp_cb cb;
+ u64 *done_addr;
+};
+
+struct qed_spq_comp_done {
+ u64 done;
+ u8 fw_return_code;
+};
+
+struct qed_spq_entry {
+ struct list_head list;
+
+ u8 flags;
+
+ /* HSI slow path element */
+ struct slow_path_element elem;
+
+ union ramrod_data ramrod;
+
+ enum spq_priority priority;
+
+ /* pending queue for this entry */
+ struct list_head *queue;
+
+ enum spq_mode comp_mode;
+ struct qed_spq_comp_cb comp_cb;
+ struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
+};
+
+struct qed_eq {
+ struct qed_chain chain;
+ u8 eq_sb_index; /* index within the SB */
+ __le16 *p_fw_cons; /* ptr to index value */
+};
+
+struct qed_consq {
+ struct qed_chain chain;
+};
+
+struct qed_spq {
+ spinlock_t lock; /* SPQ lock */
+
+ struct list_head unlimited_pending;
+ struct list_head pending;
+ struct list_head completion_pending;
+ struct list_head free_pool;
+
+ struct qed_chain chain;
+
+ /* allocated dma-able memory for spq entries (+ramrod data) */
+ dma_addr_t p_phys;
+ struct qed_spq_entry *p_virt;
+
+ /* Used as index for completions (returns on EQ by FW) */
+ u16 echo_idx;
+
+ /* Statistics */
+ u32 unlimited_pending_count;
+ u32 normal_count;
+ u32 high_count;
+ u32 comp_sent_count;
+ u32 comp_count;
+
+ u32 cid;
+};
+
+/**
+ * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that
+ * Pends it to the future list.
+ *
+ * @param p_hwfn
+ * @param p_req
+ *
+ * @return int
+ */
+int qed_spq_post(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *fw_return_code);
+
+/**
+ * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_spq_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ */
+void qed_spq_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_deallocate - Deallocates the given SPQ struct.
+ *
+ * @param p_hwfn
+ */
+void qed_spq_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_get_entry - Obtain an entrry from the spq
+ * free pool list.
+ *
+ *
+ *
+ * @param p_hwfn
+ * @param pp_ent
+ *
+ * @return int
+ */
+int
+qed_spq_get_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent);
+
+/**
+ * @brief qed_spq_return_entry - Return an entry to spq free
+ * pool list
+ *
+ * @param p_hwfn
+ * @param p_ent
+ */
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent);
+/**
+ * @brief qed_eq_allocate - Allocates & initializes an EQ struct
+ *
+ * @param p_hwfn
+ * @param num_elem number of elements in the eq
+ *
+ * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ */
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
+ u16 num_elem);
+
+/**
+ * @brief qed_eq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_eq_setup(struct qed_hwfn *p_hwfn,
+ struct qed_eq *p_eq);
+
+/**
+ * @brief qed_eq_deallocate - deallocates the given EQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_eq_free(struct qed_hwfn *p_hwfn,
+ struct qed_eq *p_eq);
+
+/**
+ * @brief qed_eq_prod_update - update the FW with default EQ producer
+ *
+ * @param p_hwfn
+ * @param prod
+ */
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
+ u16 prod);
+
+/**
+ * @brief qed_eq_completion - Completes currently pending EQ elements
+ *
+ * @param p_hwfn
+ * @param cookie
+ *
+ * @return int
+ */
+int qed_eq_completion(struct qed_hwfn *p_hwfn,
+ void *cookie);
+
+/**
+ * @brief qed_spq_completion - Completes a single event
+ *
+ * @param p_hwfn
+ * @param echo - echo value from cookie (used for determining completion)
+ * @param p_data - data from cookie (used in callback function if applicable)
+ *
+ * @return int
+ */
+int qed_spq_completion(struct qed_hwfn *p_hwfn,
+ __le16 echo,
+ u8 fw_return_code,
+ union event_ring_data *p_data);
+
+/**
+ * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
+ *
+ * @param p_hwfn
+ *
+ * @return u32 - SPQ CID
+ */
+u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_consq_alloc - Allocates & initializes an ConsQ
+ * struct
+ *
+ * @param p_hwfn
+ *
+ * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ */
+struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_consq_setup - Reset the ConsQ to its start
+ * state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_consq_setup(struct qed_hwfn *p_hwfn,
+ struct qed_consq *p_consq);
+
+/**
+ * @brief qed_consq_free - deallocates the given ConsQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_consq_free(struct qed_hwfn *p_hwfn,
+ struct qed_consq *p_consq);
+
+/**
+ * @file
+ *
+ * @brief Slow-hwfn low-level commands (Ramrods) function definitions.
+ */
+
+#define QED_SP_EQ_COMPLETION 0x01
+#define QED_SP_CQE_COMPLETION 0x02
+
+struct qed_sp_init_request_params {
+ size_t ramrod_data_size;
+ enum spq_mode comp_mode;
+ struct qed_spq_comp_cb *p_comp_data;
+};
+
+int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent,
+ u32 cid,
+ u16 opaque_fid,
+ u8 cmd,
+ u8 protocol,
+ struct qed_sp_init_request_params *p_params);
+
+/**
+ * @brief qed_sp_pf_start - PF Function Start Ramrod
+ *
+ * This ramrod is sent to initialize a physical function (PF). It will
+ * configure the function related parameters and write its completion to the
+ * event ring specified in the parameters.
+ *
+ * Ramrods complete on the common event ring for the PF. This ring is
+ * allocated by the driver on host memory and its parameters are written
+ * to the internal RAM of the UStorm by the Function Start Ramrod.
+ *
+ * @param p_hwfn
+ * @param mode
+ *
+ * @return int
+ */
+
+int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+ enum mf_mode mode);
+
+/**
+ * @brief qed_sp_pf_stop - PF Function Stop Ramrod
+ *
+ * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
+ * sent and the last completion written to the PFs Event Ring. This ramrod also
+ * deletes the context for the Slowhwfn connection on this PF.
+ *
+ * @note Not required for first packet.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
new file mode 100644
index 000000000000..6f7879136633
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -0,0 +1,170 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include "qed.h"
+#include <linux/qed/qed_chain.h>
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent,
+ u32 cid,
+ u16 opaque_fid,
+ u8 cmd,
+ u8 protocol,
+ struct qed_sp_init_request_params *p_params)
+{
+ int rc = -EINVAL;
+ struct qed_spq_entry *p_ent = NULL;
+ u32 opaque_cid = opaque_fid << 16 | cid;
+
+ if (!pp_ent)
+ return -ENOMEM;
+
+ rc = qed_spq_get_entry(p_hwfn, pp_ent);
+
+ if (rc != 0)
+ return rc;
+
+ p_ent = *pp_ent;
+
+ p_ent->elem.hdr.cid = cpu_to_le32(opaque_cid);
+ p_ent->elem.hdr.cmd_id = cmd;
+ p_ent->elem.hdr.protocol_id = protocol;
+
+ p_ent->priority = QED_SPQ_PRIORITY_NORMAL;
+ p_ent->comp_mode = p_params->comp_mode;
+ p_ent->comp_done.done = 0;
+
+ switch (p_ent->comp_mode) {
+ case QED_SPQ_MODE_EBLOCK:
+ p_ent->comp_cb.cookie = &p_ent->comp_done;
+ break;
+
+ case QED_SPQ_MODE_BLOCK:
+ if (!p_params->p_comp_data)
+ return -EINVAL;
+
+ p_ent->comp_cb.cookie = p_params->p_comp_data->cookie;
+ break;
+
+ case QED_SPQ_MODE_CB:
+ if (!p_params->p_comp_data)
+ p_ent->comp_cb.function = NULL;
+ else
+ p_ent->comp_cb = *p_params->p_comp_data;
+ break;
+
+ default:
+ DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
+ p_ent->comp_mode);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
+ opaque_cid, cmd, protocol,
+ (unsigned long)&p_ent->ramrod,
+ D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
+ QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+ "MODE_CB"));
+ if (p_params->ramrod_data_size)
+ memset(&p_ent->ramrod, 0, p_params->ramrod_data_size);
+
+ return 0;
+}
+
+int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+ enum mf_mode mode)
+{
+ struct qed_sp_init_request_params params;
+ struct pf_start_ramrod_data *p_ramrod = NULL;
+ u16 sb = qed_int_get_sp_sb_id(p_hwfn);
+ u8 sb_index = p_hwfn->p_eq->eq_sb_index;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = -EINVAL;
+
+ /* update initial eq producer */
+ qed_eq_prod_update(p_hwfn,
+ qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
+
+ memset(&params, 0, sizeof(params));
+ params.ramrod_data_size = sizeof(*p_ramrod);
+ params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn,
+ &p_ent,
+ qed_spq_get_cid(p_hwfn),
+ p_hwfn->hw_info.opaque_fid,
+ COMMON_RAMROD_PF_START,
+ PROTOCOLID_COMMON,
+ &params);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.pf_start;
+
+ p_ramrod->event_ring_sb_id = cpu_to_le16(sb);
+ p_ramrod->event_ring_sb_index = sb_index;
+ p_ramrod->path_id = QED_PATH_ID(p_hwfn);
+ p_ramrod->dont_log_ramrods = 0;
+ p_ramrod->log_type_mask = cpu_to_le16(0xf);
+ p_ramrod->mf_mode = mode;
+ p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+ /* Place EQ address in RAMROD */
+ p_ramrod->event_ring_pbl_addr.hi =
+ DMA_HI_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
+ p_ramrod->event_ring_pbl_addr.lo =
+ DMA_LO_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
+ p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt;
+
+ p_ramrod->consolid_q_pbl_addr.hi =
+ DMA_HI_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
+ p_ramrod->consolid_q_pbl_addr.lo =
+ DMA_LO_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
+
+ p_hwfn->hw_info.personality = PERSONALITY_ETH;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Setting event_ring_sb [id %04x index %02x], mf [%s] outer_tag [%d]\n",
+ sb, sb_index,
+ (p_ramrod->mf_mode == SF) ? "SF" : "Multi-Pf",
+ p_ramrod->outer_tag);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
+{
+ struct qed_sp_init_request_params params;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = -EINVAL;
+
+ memset(&params, 0, sizeof(params));
+ params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, qed_spq_get_cid(p_hwfn),
+ p_hwfn->hw_info.opaque_fid,
+ COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
+ &params);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
new file mode 100644
index 000000000000..7c0b8459666e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -0,0 +1,860 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+/***************************************************************************
+* Structures & Definitions
+***************************************************************************/
+
+#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
+#define SPQ_BLOCK_SLEEP_LENGTH (1000)
+
+/***************************************************************************
+* Blocking Imp. (BLOCK/EBLOCK mode)
+***************************************************************************/
+static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
+ void *cookie,
+ union event_ring_data *data,
+ u8 fw_return_code)
+{
+ struct qed_spq_comp_done *comp_done;
+
+ comp_done = (struct qed_spq_comp_done *)cookie;
+
+ comp_done->done = 0x1;
+ comp_done->fw_return_code = fw_return_code;
+
+ /* make update visible to waiting thread */
+ smp_wmb();
+}
+
+static int qed_spq_block(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *p_fw_ret)
+{
+ int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+ struct qed_spq_comp_done *comp_done;
+ int rc;
+
+ comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
+ while (sleep_count) {
+ /* validate we receive completion update */
+ smp_rmb();
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return 0;
+ }
+ usleep_range(5000, 10000);
+ sleep_count--;
+ }
+
+ DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
+ rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc != 0)
+ DP_NOTICE(p_hwfn, "MCP drain failed\n");
+
+ /* Retry after drain */
+ sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+ while (sleep_count) {
+ /* validate we receive completion update */
+ smp_rmb();
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return 0;
+ }
+ usleep_range(5000, 10000);
+ sleep_count--;
+ }
+
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return 0;
+ }
+
+ DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
+
+ return -EBUSY;
+}
+
+/***************************************************************************
+* SPQ entries inner API
+***************************************************************************/
+static int
+qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ p_ent->elem.hdr.echo = 0;
+ p_hwfn->p_spq->echo_idx++;
+ p_ent->flags = 0;
+
+ switch (p_ent->comp_mode) {
+ case QED_SPQ_MODE_EBLOCK:
+ case QED_SPQ_MODE_BLOCK:
+ p_ent->comp_cb.function = qed_spq_blocking_cb;
+ break;
+ case QED_SPQ_MODE_CB:
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
+ p_ent->comp_mode);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
+ p_ent->elem.hdr.cid,
+ p_ent->elem.hdr.cmd_id,
+ p_ent->elem.hdr.protocol_id,
+ p_ent->elem.data_ptr.hi,
+ p_ent->elem.data_ptr.lo,
+ D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
+ QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+ "MODE_CB"));
+
+ return 0;
+}
+
+/***************************************************************************
+* HSI access
+***************************************************************************/
+static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
+ struct qed_spq *p_spq)
+{
+ u16 pq;
+ struct qed_cxt_info cxt_info;
+ struct core_conn_context *p_cxt;
+ union qed_qm_pq_params pq_params;
+ int rc;
+
+ cxt_info.iid = p_spq->cid;
+
+ rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
+
+ if (rc < 0) {
+ DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
+ p_spq->cid);
+ return;
+ }
+
+ p_cxt = cxt_info.p_cxt;
+
+ SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ SET_FIELD(p_cxt->xstorm_ag_context.flags1,
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ SET_FIELD(p_cxt->xstorm_ag_context.flags9,
+ XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+
+ /* QM physical queue */
+ memset(&pq_params, 0, sizeof(pq_params));
+ pq_params.core.tc = LB_TC;
+ pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+ p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
+
+ p_cxt->xstorm_st_context.spq_base_lo =
+ DMA_LO_LE(p_spq->chain.p_phys_addr);
+ p_cxt->xstorm_st_context.spq_base_hi =
+ DMA_HI_LE(p_spq->chain.p_phys_addr);
+
+ p_cxt->xstorm_st_context.consolid_base_addr.lo =
+ DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
+ p_cxt->xstorm_st_context.consolid_base_addr.hi =
+ DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
+}
+
+static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
+ struct qed_spq *p_spq,
+ struct qed_spq_entry *p_ent)
+{
+ struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
+ struct slow_path_element *elem;
+ struct core_db_data db;
+
+ elem = qed_chain_produce(p_chain);
+ if (!elem) {
+ DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
+ return -EINVAL;
+ }
+
+ *elem = p_ent->elem; /* struct assignment */
+
+ /* send a doorbell on the slow hwfn session */
+ memset(&db, 0, sizeof(db));
+ SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_SPQ_PROD_CMD);
+ db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+ /* validate producer is up to-date */
+ rmb();
+
+ db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
+
+ /* do not reorder */
+ barrier();
+
+ DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
+
+ /* make sure doorbell is rang */
+ mmiowb();
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
+ qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
+ p_spq->cid, db.params, db.agg_flags,
+ qed_chain_get_prod_idx(p_chain));
+
+ return 0;
+}
+
+/***************************************************************************
+* Asynchronous events
+***************************************************************************/
+static int
+qed_async_event_completion(struct qed_hwfn *p_hwfn,
+ struct event_ring_entry *p_eqe)
+{
+ DP_NOTICE(p_hwfn,
+ "Unknown Async completion for protocol: %d\n",
+ p_eqe->protocol_id);
+ return -EINVAL;
+}
+
+/***************************************************************************
+* EQ API
+***************************************************************************/
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
+ u16 prod)
+{
+ u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
+
+ REG_WR16(p_hwfn, addr, prod);
+
+ /* keep prod updates ordered */
+ mmiowb();
+}
+
+int qed_eq_completion(struct qed_hwfn *p_hwfn,
+ void *cookie)
+
+{
+ struct qed_eq *p_eq = cookie;
+ struct qed_chain *p_chain = &p_eq->chain;
+ int rc = 0;
+
+ /* take a snapshot of the FW consumer */
+ u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
+
+ /* Need to guarantee the fw_cons index we use points to a usuable
+ * element (to comply with our chain), so our macros would comply
+ */
+ if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
+ qed_chain_get_usable_per_page(p_chain))
+ fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
+
+ /* Complete current segment of eq entries */
+ while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
+ struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
+
+ if (!p_eqe) {
+ rc = -EINVAL;
+ break;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
+ p_eqe->opcode,
+ p_eqe->protocol_id,
+ p_eqe->reserved0,
+ le16_to_cpu(p_eqe->echo),
+ p_eqe->fw_return_code,
+ p_eqe->flags);
+
+ if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
+ if (qed_async_event_completion(p_hwfn, p_eqe))
+ rc = -EINVAL;
+ } else if (qed_spq_completion(p_hwfn,
+ p_eqe->echo,
+ p_eqe->fw_return_code,
+ &p_eqe->data)) {
+ rc = -EINVAL;
+ }
+
+ qed_chain_recycle_consumed(p_chain);
+ }
+
+ qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
+
+ return rc;
+}
+
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
+ u16 num_elem)
+{
+ struct qed_eq *p_eq;
+
+ /* Allocate EQ struct */
+ p_eq = kzalloc(sizeof(*p_eq), GFP_ATOMIC);
+ if (!p_eq) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
+ return NULL;
+ }
+
+ /* Allocate and initialize EQ chain*/
+ if (qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ num_elem,
+ sizeof(union event_ring_element),
+ &p_eq->chain)) {
+ DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
+ goto eq_allocate_fail;
+ }
+
+ /* register EQ completion on the SP SB */
+ qed_int_register_cb(p_hwfn,
+ qed_eq_completion,
+ p_eq,
+ &p_eq->eq_sb_index,
+ &p_eq->p_fw_cons);
+
+ return p_eq;
+
+eq_allocate_fail:
+ qed_eq_free(p_hwfn, p_eq);
+ return NULL;
+}
+
+void qed_eq_setup(struct qed_hwfn *p_hwfn,
+ struct qed_eq *p_eq)
+{
+ qed_chain_reset(&p_eq->chain);
+}
+
+void qed_eq_free(struct qed_hwfn *p_hwfn,
+ struct qed_eq *p_eq)
+{
+ if (!p_eq)
+ return;
+ qed_chain_free(p_hwfn->cdev, &p_eq->chain);
+ kfree(p_eq);
+}
+
+/***************************************************************************
+* CQE API - manipulate EQ functionality
+***************************************************************************/
+static int qed_cqe_completion(
+ struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe,
+ enum protocol_type protocol)
+{
+ /* @@@tmp - it's possible we'll eventually want to handle some
+ * actual commands that can arrive here, but for now this is only
+ * used to complete the ramrod using the echo value on the cqe
+ */
+ return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
+}
+
+int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe)
+{
+ int rc;
+
+ rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Failed to handle RXQ CQE [cmd 0x%02x]\n",
+ cqe->ramrod_cmd_id);
+
+ return rc;
+}
+
+/***************************************************************************
+* Slow hwfn Queue (spq)
+***************************************************************************/
+void qed_spq_setup(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_virt = NULL;
+ dma_addr_t p_phys = 0;
+ unsigned int i = 0;
+
+ INIT_LIST_HEAD(&p_spq->pending);
+ INIT_LIST_HEAD(&p_spq->completion_pending);
+ INIT_LIST_HEAD(&p_spq->free_pool);
+ INIT_LIST_HEAD(&p_spq->unlimited_pending);
+ spin_lock_init(&p_spq->lock);
+
+ /* SPQ empty pool */
+ p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
+ p_virt = p_spq->p_virt;
+
+ for (i = 0; i < p_spq->chain.capacity; i++) {
+ p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
+ p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
+
+ list_add_tail(&p_virt->list, &p_spq->free_pool);
+
+ p_virt++;
+ p_phys += sizeof(struct qed_spq_entry);
+ }
+
+ /* Statistics */
+ p_spq->normal_count = 0;
+ p_spq->comp_count = 0;
+ p_spq->comp_sent_count = 0;
+ p_spq->unlimited_pending_count = 0;
+ p_spq->echo_idx = 0;
+
+ /* SPQ cid, cannot fail */
+ qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
+ qed_spq_hw_initialize(p_hwfn, p_spq);
+
+ /* reset the chain itself */
+ qed_chain_reset(&p_spq->chain);
+}
+
+int qed_spq_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = NULL;
+ dma_addr_t p_phys = 0;
+ struct qed_spq_entry *p_virt = NULL;
+
+ /* SPQ struct */
+ p_spq =
+ kzalloc(sizeof(struct qed_spq), GFP_ATOMIC);
+ if (!p_spq) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
+ return -ENOMEM;
+ }
+
+ /* SPQ ring */
+ if (qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_SINGLE,
+ 0, /* N/A when the mode is SINGLE */
+ sizeof(struct slow_path_element),
+ &p_spq->chain)) {
+ DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
+ goto spq_allocate_fail;
+ }
+
+ /* allocate and fill the SPQ elements (incl. ramrod data list) */
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_spq->chain.capacity *
+ sizeof(struct qed_spq_entry),
+ &p_phys,
+ GFP_KERNEL);
+
+ if (!p_virt)
+ goto spq_allocate_fail;
+
+ p_spq->p_virt = p_virt;
+ p_spq->p_phys = p_phys;
+ p_hwfn->p_spq = p_spq;
+
+ return 0;
+
+spq_allocate_fail:
+ qed_chain_free(p_hwfn->cdev, &p_spq->chain);
+ kfree(p_spq);
+ return -ENOMEM;
+}
+
+void qed_spq_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+
+ if (!p_spq)
+ return;
+
+ if (p_spq->p_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_spq->chain.capacity *
+ sizeof(struct qed_spq_entry),
+ p_spq->p_virt,
+ p_spq->p_phys);
+
+ qed_chain_free(p_hwfn->cdev, &p_spq->chain);
+ ;
+ kfree(p_spq);
+}
+
+int
+qed_spq_get_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = 0;
+
+ spin_lock_bh(&p_spq->lock);
+
+ if (list_empty(&p_spq->free_pool)) {
+ p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
+ if (!p_ent) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ p_ent->queue = &p_spq->unlimited_pending;
+ } else {
+ p_ent = list_first_entry(&p_spq->free_pool,
+ struct qed_spq_entry,
+ list);
+ list_del(&p_ent->list);
+ p_ent->queue = &p_spq->pending;
+ }
+
+ *pp_ent = p_ent;
+
+out_unlock:
+ spin_unlock_bh(&p_spq->lock);
+ return rc;
+}
+
+/* Locked variant; Should be called while the SPQ lock is taken */
+static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
+}
+
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ spin_lock_bh(&p_hwfn->p_spq->lock);
+ __qed_spq_return_entry(p_hwfn, p_ent);
+ spin_unlock_bh(&p_hwfn->p_spq->lock);
+}
+
+/**
+ * @brief qed_spq_add_entry - adds a new entry to the pending
+ * list. Should be used while lock is being held.
+ *
+ * Addes an entry to the pending list is there is room (en empty
+ * element is available in the free_pool), or else places the
+ * entry in the unlimited_pending pool.
+ *
+ * @param p_hwfn
+ * @param p_ent
+ * @param priority
+ *
+ * @return int
+ */
+static int
+qed_spq_add_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ enum spq_priority priority)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+ struct qed_spq_entry *p_en2;
+
+ if (list_empty(&p_spq->free_pool)) {
+ list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
+ p_spq->unlimited_pending_count++;
+
+ return 0;
+ }
+
+ p_en2 = list_first_entry(&p_spq->free_pool,
+ struct qed_spq_entry,
+ list);
+ list_del(&p_en2->list);
+
+ /* Strcut assignment */
+ *p_en2 = *p_ent;
+
+ kfree(p_ent);
+
+ p_ent = p_en2;
+ }
+
+ /* entry is to be placed in 'pending' queue */
+ switch (priority) {
+ case QED_SPQ_PRIORITY_NORMAL:
+ list_add_tail(&p_ent->list, &p_spq->pending);
+ p_spq->normal_count++;
+ break;
+ case QED_SPQ_PRIORITY_HIGH:
+ list_add(&p_ent->list, &p_spq->pending);
+ p_spq->high_count++;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/***************************************************************************
+* Accessor
+***************************************************************************/
+u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_spq)
+ return 0xffffffff; /* illegal */
+ return p_hwfn->p_spq->cid;
+}
+
+/***************************************************************************
+* Posting new Ramrods
+***************************************************************************/
+static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
+ struct list_head *head,
+ u32 keep_reserve)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ int rc;
+
+ while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
+ !list_empty(head)) {
+ struct qed_spq_entry *p_ent =
+ list_first_entry(head, struct qed_spq_entry, list);
+ list_del(&p_ent->list);
+ list_add_tail(&p_ent->list, &p_spq->completion_pending);
+ p_spq->comp_sent_count++;
+
+ rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
+ if (rc) {
+ list_del(&p_ent->list);
+ __qed_spq_return_entry(p_hwfn, p_ent);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_ent = NULL;
+
+ while (!list_empty(&p_spq->free_pool)) {
+ if (list_empty(&p_spq->unlimited_pending))
+ break;
+
+ p_ent = list_first_entry(&p_spq->unlimited_pending,
+ struct qed_spq_entry,
+ list);
+ if (!p_ent)
+ return -EINVAL;
+
+ list_del(&p_ent->list);
+
+ qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+ }
+
+ return qed_spq_post_list(p_hwfn, &p_spq->pending,
+ SPQ_HIGH_PRI_RESERVE_DEFAULT);
+}
+
+int qed_spq_post(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *fw_return_code)
+{
+ int rc = 0;
+ struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
+ bool b_ret_ent = true;
+
+ if (!p_hwfn)
+ return -EINVAL;
+
+ if (!p_ent) {
+ DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
+ return -EINVAL;
+ }
+
+ /* Complete the entry */
+ rc = qed_spq_fill_entry(p_hwfn, p_ent);
+
+ spin_lock_bh(&p_spq->lock);
+
+ /* Check return value after LOCK is taken for cleaner error flow */
+ if (rc)
+ goto spq_post_fail;
+
+ /* Add the request to the pending queue */
+ rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+ if (rc)
+ goto spq_post_fail;
+
+ rc = qed_spq_pend_post(p_hwfn);
+ if (rc) {
+ /* Since it's possible that pending failed for a different
+ * entry [although unlikely], the failed entry was already
+ * dealt with; No need to return it here.
+ */
+ b_ret_ent = false;
+ goto spq_post_fail;
+ }
+
+ spin_unlock_bh(&p_spq->lock);
+
+ if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
+ /* For entries in QED BLOCK mode, the completion code cannot
+ * perform the necessary cleanup - if it did, we couldn't
+ * access p_ent here to see whether it's successful or not.
+ * Thus, after gaining the answer perform the cleanup here.
+ */
+ rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+ if (rc)
+ goto spq_post_fail2;
+
+ /* return to pool */
+ qed_spq_return_entry(p_hwfn, p_ent);
+ }
+ return rc;
+
+spq_post_fail2:
+ spin_lock_bh(&p_spq->lock);
+ list_del(&p_ent->list);
+ qed_chain_return_produced(&p_spq->chain);
+
+spq_post_fail:
+ /* return to the free pool */
+ if (b_ret_ent)
+ __qed_spq_return_entry(p_hwfn, p_ent);
+ spin_unlock_bh(&p_spq->lock);
+
+ return rc;
+}
+
+int qed_spq_completion(struct qed_hwfn *p_hwfn,
+ __le16 echo,
+ u8 fw_return_code,
+ union event_ring_data *p_data)
+{
+ struct qed_spq *p_spq;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_spq_entry *tmp;
+ struct qed_spq_entry *found = NULL;
+ int rc;
+
+ if (!p_hwfn)
+ return -EINVAL;
+
+ p_spq = p_hwfn->p_spq;
+ if (!p_spq)
+ return -EINVAL;
+
+ spin_lock_bh(&p_spq->lock);
+ list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
+ list) {
+ if (p_ent->elem.hdr.echo == echo) {
+ list_del(&p_ent->list);
+
+ qed_chain_return_produced(&p_spq->chain);
+ p_spq->comp_count++;
+ found = p_ent;
+ break;
+ }
+ }
+
+ /* Release lock before callback, as callback may post
+ * an additional ramrod.
+ */
+ spin_unlock_bh(&p_spq->lock);
+
+ if (!found) {
+ DP_NOTICE(p_hwfn,
+ "Failed to find an entry this EQE completes\n");
+ return -EEXIST;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
+ p_ent->comp_cb.function, p_ent->comp_cb.cookie);
+ if (found->comp_cb.function)
+ found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
+ fw_return_code);
+
+ if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
+ /* EBLOCK is responsible for freeing its own entry */
+ qed_spq_return_entry(p_hwfn, found);
+
+ /* Attempt to post pending requests */
+ spin_lock_bh(&p_spq->lock);
+ rc = qed_spq_pend_post(p_hwfn);
+ spin_unlock_bh(&p_spq->lock);
+
+ return rc;
+}
+
+struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_consq *p_consq;
+
+ /* Allocate ConsQ struct */
+ p_consq = kzalloc(sizeof(*p_consq), GFP_ATOMIC);
+ if (!p_consq) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
+ return NULL;
+ }
+
+ /* Allocate and initialize EQ chain*/
+ if (qed_chain_alloc(p_hwfn->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_PAGE_SIZE / 0x80,
+ 0x80,
+ &p_consq->chain)) {
+ DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
+ goto consq_allocate_fail;
+ }
+
+ return p_consq;
+
+consq_allocate_fail:
+ qed_consq_free(p_hwfn, p_consq);
+ return NULL;
+}
+
+void qed_consq_setup(struct qed_hwfn *p_hwfn,
+ struct qed_consq *p_consq)
+{
+ qed_chain_reset(&p_consq->chain);
+}
+
+void qed_consq_free(struct qed_hwfn *p_hwfn,
+ struct qed_consq *p_consq)
+{
+ if (!p_consq)
+ return;
+ qed_chain_free(p_hwfn->cdev, &p_consq->chain);
+ kfree(p_consq);
+}
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
new file mode 100644
index 000000000000..06ff90d87572
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_QEDE) := qede.o
+
+qede-y := qede_main.o qede_ethtool.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
new file mode 100644
index 000000000000..ea00d5f3bab4
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -0,0 +1,285 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#ifndef _QEDE_H_
+#define _QEDE_H_
+#include <linux/compiler.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/bitmap.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/eth_common.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_eth_if.h>
+
+#define QEDE_MAJOR_VERSION 8
+#define QEDE_MINOR_VERSION 4
+#define QEDE_REVISION_VERSION 0
+#define QEDE_ENGINEERING_VERSION 0
+#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
+ __stringify(QEDE_MINOR_VERSION) "." \
+ __stringify(QEDE_REVISION_VERSION) "." \
+ __stringify(QEDE_ENGINEERING_VERSION)
+
+#define QEDE_ETH_INTERFACE_VERSION 300
+
+#define DRV_MODULE_SYM qede
+
+struct qede_stats {
+ u64 no_buff_discards;
+ u64 rx_ucast_bytes;
+ u64 rx_mcast_bytes;
+ u64 rx_bcast_bytes;
+ u64 rx_ucast_pkts;
+ u64 rx_mcast_pkts;
+ u64 rx_bcast_pkts;
+ u64 mftag_filter_discards;
+ u64 mac_filter_discards;
+ u64 tx_ucast_bytes;
+ u64 tx_mcast_bytes;
+ u64 tx_bcast_bytes;
+ u64 tx_ucast_pkts;
+ u64 tx_mcast_pkts;
+ u64 tx_bcast_pkts;
+ u64 tx_err_drop_pkts;
+ u64 coalesced_pkts;
+ u64 coalesced_events;
+ u64 coalesced_aborts_num;
+ u64 non_coalesced_pkts;
+ u64 coalesced_bytes;
+
+ /* port */
+ u64 rx_64_byte_packets;
+ u64 rx_127_byte_packets;
+ u64 rx_255_byte_packets;
+ u64 rx_511_byte_packets;
+ u64 rx_1023_byte_packets;
+ u64 rx_1518_byte_packets;
+ u64 rx_1522_byte_packets;
+ u64 rx_2047_byte_packets;
+ u64 rx_4095_byte_packets;
+ u64 rx_9216_byte_packets;
+ u64 rx_16383_byte_packets;
+ u64 rx_crc_errors;
+ u64 rx_mac_crtl_frames;
+ u64 rx_pause_frames;
+ u64 rx_pfc_frames;
+ u64 rx_align_errors;
+ u64 rx_carrier_errors;
+ u64 rx_oversize_packets;
+ u64 rx_jabbers;
+ u64 rx_undersize_packets;
+ u64 rx_fragments;
+ u64 tx_64_byte_packets;
+ u64 tx_65_to_127_byte_packets;
+ u64 tx_128_to_255_byte_packets;
+ u64 tx_256_to_511_byte_packets;
+ u64 tx_512_to_1023_byte_packets;
+ u64 tx_1024_to_1518_byte_packets;
+ u64 tx_1519_to_2047_byte_packets;
+ u64 tx_2048_to_4095_byte_packets;
+ u64 tx_4096_to_9216_byte_packets;
+ u64 tx_9217_to_16383_byte_packets;
+ u64 tx_pause_frames;
+ u64 tx_pfc_frames;
+ u64 tx_lpi_entry_count;
+ u64 tx_total_collisions;
+ u64 brb_truncates;
+ u64 brb_discards;
+ u64 tx_mac_ctrl_frames;
+};
+
+struct qede_dev {
+ struct qed_dev *cdev;
+ struct net_device *ndev;
+ struct pci_dev *pdev;
+
+ u32 dp_module;
+ u8 dp_level;
+
+ const struct qed_eth_ops *ops;
+
+ struct qed_dev_eth_info dev_info;
+#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \
+ (edev)->dev_info.num_tc)
+
+ struct qede_fastpath *fp_array;
+ u16 num_rss;
+ u8 num_tc;
+#define QEDE_RSS_CNT(edev) ((edev)->num_rss)
+#define QEDE_TSS_CNT(edev) ((edev)->num_rss * \
+ (edev)->num_tc)
+#define QEDE_TSS_IDX(edev, txqidx) ((txqidx) % (edev)->num_rss)
+#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / (edev)->num_rss)
+#define QEDE_TX_QUEUE(edev, txqidx) \
+ (&(edev)->fp_array[QEDE_TSS_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX( \
+ (edev), (txqidx))])
+
+ struct qed_int_info int_info;
+ unsigned char primary_mac[ETH_ALEN];
+
+ /* Smaller private varaiant of the RTNL lock */
+ struct mutex qede_lock;
+ u32 state; /* Protected by qede_lock */
+ u16 rx_buf_size;
+ /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
+ /* Max supported alignment is 256 (8 shift)
+ * minimal alignment shift 6 is optimal for 57xxx HW performance
+ */
+#define QEDE_RX_ALIGN_SHIFT max(6, min(8, L1_CACHE_SHIFT))
+ /* We assume skb_build() uses sizeof(struct skb_shared_info) bytes
+ * at the end of skb->data, to avoid wasting a full cache line.
+ * This reduces memory use (skb->truesize).
+ */
+#define QEDE_FW_RX_ALIGN_END \
+ max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT, \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+ struct qede_stats stats;
+ struct qed_update_vport_rss_params rss_params;
+ u16 q_num_rx_buffers; /* Must be a power of two */
+ u16 q_num_tx_buffers; /* Must be a power of two */
+
+ struct delayed_work sp_task;
+ unsigned long sp_flags;
+};
+
+enum QEDE_STATE {
+ QEDE_STATE_CLOSED,
+ QEDE_STATE_OPEN,
+};
+
+#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
+
+#define MAX_NUM_TC 8
+#define MAX_NUM_PRI 8
+
+/* The driver supports the new build_skb() API:
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after the frame was DMA-ed.
+ */
+struct sw_rx_data {
+ u8 *data;
+
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct qede_rx_queue {
+ __le16 *hw_cons_ptr;
+ struct sw_rx_data *sw_rx_ring;
+ u16 sw_rx_cons;
+ u16 sw_rx_prod;
+ struct qed_chain rx_bd_ring;
+ struct qed_chain rx_comp_ring;
+ void __iomem *hw_rxq_prod_addr;
+
+ int rx_buf_size;
+
+ u16 num_rx_buffers;
+ u16 rxq_id;
+
+ u64 rx_hw_errors;
+ u64 rx_alloc_errors;
+};
+
+union db_prod {
+ struct eth_db_data data;
+ u32 raw;
+};
+
+struct sw_tx_bd {
+ struct sk_buff *skb;
+ u8 flags;
+/* Set on the first BD descriptor when there is a split BD */
+#define QEDE_TSO_SPLIT_BD BIT(0)
+};
+
+struct qede_tx_queue {
+ int index; /* Queue index */
+ __le16 *hw_cons_ptr;
+ struct sw_tx_bd *sw_tx_ring;
+ u16 sw_tx_cons;
+ u16 sw_tx_prod;
+ struct qed_chain tx_pbl;
+ void __iomem *doorbell_addr;
+ union db_prod tx_db;
+
+ u16 num_tx_buffers;
+};
+
+#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
+ le32_to_cpu((bd)->addr.lo))
+#define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
+ do { \
+ (bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr)); \
+ (bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr)); \
+ (bd)->nbytes = cpu_to_le16(len); \
+ } while (0)
+#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
+
+struct qede_fastpath {
+ struct qede_dev *edev;
+ u8 rss_id;
+ struct napi_struct napi;
+ struct qed_sb_info *sb_info;
+ struct qede_rx_queue *rxq;
+ struct qede_tx_queue *txqs;
+
+#define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
+ char name[VEC_NAME_SIZE];
+};
+
+/* Debug print definitions */
+#define DP_NAME(edev) ((edev)->ndev->name)
+
+#define XMIT_PLAIN 0
+#define XMIT_L4_CSUM BIT(0)
+#define XMIT_LSO BIT(1)
+#define XMIT_ENC BIT(2)
+
+#define QEDE_CSUM_ERROR BIT(0)
+#define QEDE_CSUM_UNNECESSARY BIT(1)
+
+#define QEDE_SP_RX_MODE 1
+
+union qede_reload_args {
+ u16 mtu;
+};
+
+void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
+void qede_set_ethtool_ops(struct net_device *netdev);
+void qede_reload(struct qede_dev *edev,
+ void (*func)(struct qede_dev *edev,
+ union qede_reload_args *args),
+ union qede_reload_args *args);
+int qede_change_mtu(struct net_device *dev, int new_mtu);
+void qede_fill_by_demand_stats(struct qede_dev *edev);
+
+#define RX_RING_SIZE_POW 13
+#define RX_RING_SIZE BIT(RX_RING_SIZE_POW)
+#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
+#define NUM_RX_BDS_MIN 128
+#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
+
+#define TX_RING_SIZE_POW 13
+#define TX_RING_SIZE BIT(TX_RING_SIZE_POW)
+#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
+#define NUM_TX_BDS_MIN 128
+#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
+
+#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
+
+#endif /* _QEDE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
new file mode 100644
index 000000000000..3a362476a22c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -0,0 +1,385 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/capability.h>
+#include "qede.h"
+
+#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
+#define QEDE_STAT_STRING(stat_name) (#stat_name)
+#define _QEDE_STAT(stat_name, pf_only) \
+ {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
+#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true)
+#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false)
+
+#define QEDE_RQSTAT_OFFSET(stat_name) \
+ (offsetof(struct qede_rx_queue, stat_name))
+#define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
+#define QEDE_RQSTAT(stat_name) \
+ {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
+static const struct {
+ u64 offset;
+ char string[ETH_GSTRING_LEN];
+} qede_rqstats_arr[] = {
+ QEDE_RQSTAT(rx_hw_errors),
+ QEDE_RQSTAT(rx_alloc_errors),
+};
+
+#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
+#define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \
+ (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\
+ qede_rqstats_arr[(sindex)].offset)))
+static const struct {
+ u64 offset;
+ char string[ETH_GSTRING_LEN];
+ bool pf_only;
+} qede_stats_arr[] = {
+ QEDE_STAT(rx_ucast_bytes),
+ QEDE_STAT(rx_mcast_bytes),
+ QEDE_STAT(rx_bcast_bytes),
+ QEDE_STAT(rx_ucast_pkts),
+ QEDE_STAT(rx_mcast_pkts),
+ QEDE_STAT(rx_bcast_pkts),
+
+ QEDE_STAT(tx_ucast_bytes),
+ QEDE_STAT(tx_mcast_bytes),
+ QEDE_STAT(tx_bcast_bytes),
+ QEDE_STAT(tx_ucast_pkts),
+ QEDE_STAT(tx_mcast_pkts),
+ QEDE_STAT(tx_bcast_pkts),
+
+ QEDE_PF_STAT(rx_64_byte_packets),
+ QEDE_PF_STAT(rx_127_byte_packets),
+ QEDE_PF_STAT(rx_255_byte_packets),
+ QEDE_PF_STAT(rx_511_byte_packets),
+ QEDE_PF_STAT(rx_1023_byte_packets),
+ QEDE_PF_STAT(rx_1518_byte_packets),
+ QEDE_PF_STAT(rx_1522_byte_packets),
+ QEDE_PF_STAT(rx_2047_byte_packets),
+ QEDE_PF_STAT(rx_4095_byte_packets),
+ QEDE_PF_STAT(rx_9216_byte_packets),
+ QEDE_PF_STAT(rx_16383_byte_packets),
+ QEDE_PF_STAT(tx_64_byte_packets),
+ QEDE_PF_STAT(tx_65_to_127_byte_packets),
+ QEDE_PF_STAT(tx_128_to_255_byte_packets),
+ QEDE_PF_STAT(tx_256_to_511_byte_packets),
+ QEDE_PF_STAT(tx_512_to_1023_byte_packets),
+ QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
+ QEDE_PF_STAT(tx_1519_to_2047_byte_packets),
+ QEDE_PF_STAT(tx_2048_to_4095_byte_packets),
+ QEDE_PF_STAT(tx_4096_to_9216_byte_packets),
+ QEDE_PF_STAT(tx_9217_to_16383_byte_packets),
+
+ QEDE_PF_STAT(rx_mac_crtl_frames),
+ QEDE_PF_STAT(tx_mac_ctrl_frames),
+ QEDE_PF_STAT(rx_pause_frames),
+ QEDE_PF_STAT(tx_pause_frames),
+ QEDE_PF_STAT(rx_pfc_frames),
+ QEDE_PF_STAT(tx_pfc_frames),
+
+ QEDE_PF_STAT(rx_crc_errors),
+ QEDE_PF_STAT(rx_align_errors),
+ QEDE_PF_STAT(rx_carrier_errors),
+ QEDE_PF_STAT(rx_oversize_packets),
+ QEDE_PF_STAT(rx_jabbers),
+ QEDE_PF_STAT(rx_undersize_packets),
+ QEDE_PF_STAT(rx_fragments),
+ QEDE_PF_STAT(tx_lpi_entry_count),
+ QEDE_PF_STAT(tx_total_collisions),
+ QEDE_PF_STAT(brb_truncates),
+ QEDE_PF_STAT(brb_discards),
+ QEDE_STAT(no_buff_discards),
+ QEDE_PF_STAT(mftag_filter_discards),
+ QEDE_PF_STAT(mac_filter_discards),
+ QEDE_STAT(tx_err_drop_pkts),
+
+ QEDE_STAT(coalesced_pkts),
+ QEDE_STAT(coalesced_events),
+ QEDE_STAT(coalesced_aborts_num),
+ QEDE_STAT(non_coalesced_pkts),
+ QEDE_STAT(coalesced_bytes),
+};
+
+#define QEDE_STATS_DATA(dev, index) \
+ (*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \
+ + qede_stats_arr[(index)].offset)))
+
+#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+
+static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
+{
+ int i, j, k;
+
+ for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
+ strcpy(buf + j * ETH_GSTRING_LEN,
+ qede_stats_arr[i].string);
+ j++;
+ }
+
+ for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++)
+ strcpy(buf + j * ETH_GSTRING_LEN,
+ qede_rqstats_arr[k].string);
+}
+
+static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ qede_get_strings_stats(edev, buf);
+ break;
+ default:
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Unsupported stringset 0x%08x\n", stringset);
+ }
+}
+
+static void qede_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int sidx, cnt = 0;
+ int qid;
+
+ qede_fill_by_demand_stats(edev);
+
+ mutex_lock(&edev->qede_lock);
+
+ for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++)
+ buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
+
+ for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
+ buf[cnt] = 0;
+ for (qid = 0; qid < edev->num_rss; qid++)
+ buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid);
+ cnt++;
+ }
+
+ mutex_unlock(&edev->qede_lock);
+}
+
+static int qede_get_sset_count(struct net_device *dev, int stringset)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int num_stats = QEDE_NUM_STATS;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ return num_stats + QEDE_NUM_RQSTATS;
+
+ default:
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Unsupported stringset 0x%08x\n", stringset);
+ return -EINVAL;
+ }
+}
+
+static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+
+ memset(&current_link, 0, sizeof(current_link));
+ edev->ops->common->get_link(edev->cdev, &current_link);
+
+ cmd->supported = current_link.supported_caps;
+ cmd->advertising = current_link.advertised_caps;
+ if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) {
+ ethtool_cmd_speed_set(cmd, current_link.speed);
+ cmd->duplex = current_link.duplex;
+ } else {
+ cmd->duplex = DUPLEX_UNKNOWN;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ }
+ cmd->port = current_link.port;
+ cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
+ AUTONEG_DISABLE;
+ cmd->lp_advertising = current_link.lp_caps;
+
+ return 0;
+}
+
+static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+ struct qed_link_params params;
+ u32 speed;
+
+ if (edev->dev_info.common.is_mf) {
+ DP_INFO(edev,
+ "Link parameters can not be changed in MF mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&current_link, 0, sizeof(current_link));
+ memset(&params, 0, sizeof(params));
+ edev->ops->common->get_link(edev->cdev, &current_link);
+
+ speed = ethtool_cmd_speed(cmd);
+ params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS;
+ params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG;
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ params.autoneg = true;
+ params.forced_speed = 0;
+ params.adv_speeds = cmd->advertising;
+ } else { /* forced speed */
+ params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED;
+ params.autoneg = false;
+ params.forced_speed = speed;
+ switch (speed) {
+ case SPEED_10000:
+ if (!(current_link.supported_caps &
+ SUPPORTED_10000baseKR_Full)) {
+ DP_INFO(edev, "10G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = SUPPORTED_10000baseKR_Full;
+ break;
+ case SPEED_40000:
+ if (!(current_link.supported_caps &
+ SUPPORTED_40000baseLR4_Full)) {
+ DP_INFO(edev, "40G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = SUPPORTED_40000baseLR4_Full;
+ break;
+ default:
+ DP_INFO(edev, "Unsupported speed %u\n", speed);
+ return -EINVAL;
+ }
+ }
+
+ params.link_up = true;
+ edev->ops->common->set_link(edev->cdev, &params);
+
+ return 0;
+}
+
+static void qede_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN];
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ strlcpy(info->driver, "qede", sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+
+ snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
+ edev->dev_info.common.fw_major,
+ edev->dev_info.common.fw_minor,
+ edev->dev_info.common.fw_rev,
+ edev->dev_info.common.fw_eng);
+
+ snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
+ (edev->dev_info.common.mfw_rev >> 24) & 0xFF,
+ (edev->dev_info.common.mfw_rev >> 16) & 0xFF,
+ (edev->dev_info.common.mfw_rev >> 8) & 0xFF,
+ edev->dev_info.common.mfw_rev & 0xFF);
+
+ if ((strlen(storm) + strlen(mfw) + strlen("mfw storm ")) <
+ sizeof(info->fw_version)) {
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "mfw %s storm %s", mfw, storm);
+ } else {
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%s %s", mfw, storm);
+ }
+
+ strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
+}
+
+static u32 qede_get_msglevel(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) |
+ edev->dp_module;
+}
+
+static void qede_set_msglevel(struct net_device *ndev, u32 level)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ u32 dp_module = 0;
+ u8 dp_level = 0;
+
+ qede_config_debug(level, &dp_module, &dp_level);
+
+ edev->dp_level = dp_level;
+ edev->dp_module = dp_module;
+ edev->ops->common->update_msglvl(edev->cdev,
+ dp_module, dp_level);
+}
+
+static u32 qede_get_link(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+
+ memset(&current_link, 0, sizeof(current_link));
+ edev->ops->common->get_link(edev->cdev, &current_link);
+
+ return current_link.link_up;
+}
+
+static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args)
+{
+ edev->ndev->mtu = args->mtu;
+}
+
+/* Netdevice NDOs */
+#define ETH_MAX_JUMBO_PACKET_SIZE 9600
+#define ETH_MIN_PACKET_SIZE 60
+int qede_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ union qede_reload_args args;
+
+ if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
+ ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
+ DP_ERR(edev, "Can't support requested MTU size\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "Configuring MTU size of %d\n", new_mtu);
+
+ /* Set the mtu field and re-start the interface if needed*/
+ args.mtu = new_mtu;
+
+ if (netif_running(edev->ndev))
+ qede_reload(edev, &qede_update_mtu, &args);
+
+ qede_update_mtu(edev, &args);
+
+ return 0;
+}
+
+static const struct ethtool_ops qede_ethtool_ops = {
+ .get_settings = qede_get_settings,
+ .set_settings = qede_set_settings,
+ .get_drvinfo = qede_get_drvinfo,
+ .get_msglevel = qede_get_msglevel,
+ .set_msglevel = qede_set_msglevel,
+ .get_link = qede_get_link,
+ .get_strings = qede_get_strings,
+ .get_ethtool_stats = qede_get_ethtool_stats,
+ .get_sset_count = qede_get_sset_count,
+
+};
+
+void qede_set_ethtool_ops(struct net_device *dev)
+{
+ dev->ethtool_ops = &qede_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
new file mode 100644
index 000000000000..f4657a2e730a
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -0,0 +1,2584 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/version.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/io.h>
+#include <linux/netdev_features.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <net/vxlan.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/pkt_sched.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/random.h>
+#include <net/ip6_checksum.h>
+#include <linux/bitops.h>
+
+#include "qede.h"
+
+static const char version[] = "QLogic QL4xxx 40G/100G Ethernet Driver qede "
+ DRV_MODULE_VERSION "\n";
+
+MODULE_DESCRIPTION("QLogic 40G/100G Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static uint debug;
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, " Default debug msglevel");
+
+static const struct qed_eth_ops *qed_ops;
+
+#define CHIP_NUM_57980S_40 0x1634
+#define CHIP_NUM_57980S_10 0x1635
+#define CHIP_NUM_57980S_MF 0x1636
+#define CHIP_NUM_57980S_100 0x1644
+#define CHIP_NUM_57980S_50 0x1654
+#define CHIP_NUM_57980S_25 0x1656
+
+#ifndef PCI_DEVICE_ID_NX2_57980E
+#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
+#define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
+#define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
+#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
+#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
+#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
+#endif
+
+static const struct pci_device_id qede_pci_tbl[] = {
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 },
+ { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
+
+static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+
+#define TX_TIMEOUT (5 * HZ)
+
+static void qede_remove(struct pci_dev *pdev);
+static int qede_alloc_rx_buffer(struct qede_dev *edev,
+ struct qede_rx_queue *rxq);
+static void qede_link_update(void *dev, struct qed_link_output *link);
+
+static struct pci_driver qede_pci_driver = {
+ .name = "qede",
+ .id_table = qede_pci_tbl,
+ .probe = qede_probe,
+ .remove = qede_remove,
+};
+
+static struct qed_eth_cb_ops qede_ll_ops = {
+ {
+ .link_update = qede_link_update,
+ },
+};
+
+static int qede_netdev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct ethtool_drvinfo drvinfo;
+ struct qede_dev *edev;
+
+ /* Currently only support name change */
+ if (event != NETDEV_CHANGENAME)
+ goto done;
+
+ /* Check whether this is a qede device */
+ if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
+ goto done;
+
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
+ if (strcmp(drvinfo.driver, "qede"))
+ goto done;
+ edev = netdev_priv(ndev);
+
+ /* Notify qed of the name change */
+ if (!edev->ops || !edev->ops->common)
+ goto done;
+ edev->ops->common->set_id(edev->cdev, edev->ndev->name,
+ "qede");
+
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block qede_netdev_notifier = {
+ .notifier_call = qede_netdev_event,
+};
+
+static
+int __init qede_init(void)
+{
+ int ret;
+ u32 qed_ver;
+
+ pr_notice("qede_init: %s\n", version);
+
+ qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
+ if (qed_ver != QEDE_ETH_INTERFACE_VERSION) {
+ pr_notice("Version mismatch [%08x != %08x]\n",
+ qed_ver,
+ QEDE_ETH_INTERFACE_VERSION);
+ return -EINVAL;
+ }
+
+ qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION);
+ if (!qed_ops) {
+ pr_notice("Failed to get qed ethtool operations\n");
+ return -EINVAL;
+ }
+
+ /* Must register notifier before pci ops, since we might miss
+ * interface rename after pci probe and netdev registeration.
+ */
+ ret = register_netdevice_notifier(&qede_netdev_notifier);
+ if (ret) {
+ pr_notice("Failed to register netdevice_notifier\n");
+ qed_put_eth_ops();
+ return -EINVAL;
+ }
+
+ ret = pci_register_driver(&qede_pci_driver);
+ if (ret) {
+ pr_notice("Failed to register driver\n");
+ unregister_netdevice_notifier(&qede_netdev_notifier);
+ qed_put_eth_ops();
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void __exit qede_cleanup(void)
+{
+ pr_notice("qede_cleanup called\n");
+
+ unregister_netdevice_notifier(&qede_netdev_notifier);
+ pci_unregister_driver(&qede_pci_driver);
+ qed_put_eth_ops();
+}
+
+module_init(qede_init);
+module_exit(qede_cleanup);
+
+/* -------------------------------------------------------------------------
+ * START OF FAST-PATH
+ * -------------------------------------------------------------------------
+ */
+
+/* Unmap the data and free skb */
+static int qede_free_tx_pkt(struct qede_dev *edev,
+ struct qede_tx_queue *txq,
+ int *len)
+{
+ u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
+ struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+ struct eth_tx_1st_bd *first_bd;
+ struct eth_tx_bd *tx_data_bd;
+ int bds_consumed = 0;
+ int nbds;
+ bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
+ int i, split_bd_len = 0;
+
+ if (unlikely(!skb)) {
+ DP_ERR(edev,
+ "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
+ idx, txq->sw_tx_cons, txq->sw_tx_prod);
+ return -1;
+ }
+
+ *len = skb->len;
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+
+ bds_consumed++;
+
+ nbds = first_bd->data.nbds;
+
+ if (data_split) {
+ struct eth_tx_bd *split = (struct eth_tx_bd *)
+ qed_chain_consume(&txq->tx_pbl);
+ split_bd_len = BD_UNMAP_LEN(split);
+ bds_consumed++;
+ }
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+ /* Unmap the data of the skb frags */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_consume(&txq->tx_pbl);
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+ }
+
+ while (bds_consumed++ < nbds)
+ qed_chain_consume(&txq->tx_pbl);
+
+ /* Free skb */
+ dev_kfree_skb_any(skb);
+ txq->sw_tx_ring[idx].skb = NULL;
+ txq->sw_tx_ring[idx].flags = 0;
+
+ return 0;
+}
+
+/* Unmap the data and free skb when mapping failed during start_xmit */
+static void qede_free_failed_tx_pkt(struct qede_dev *edev,
+ struct qede_tx_queue *txq,
+ struct eth_tx_1st_bd *first_bd,
+ int nbd,
+ bool data_split)
+{
+ u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+ struct eth_tx_bd *tx_data_bd;
+ int i, split_bd_len = 0;
+
+ /* Return prod to its position before this skb was handled */
+ qed_chain_set_prod(&txq->tx_pbl,
+ le16_to_cpu(txq->tx_db.data.bd_prod),
+ first_bd);
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+ if (data_split) {
+ struct eth_tx_bd *split = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ split_bd_len = BD_UNMAP_LEN(split);
+ nbd--;
+ }
+
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+ /* Unmap the data of the skb frags */
+ for (i = 0; i < nbd; i++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ if (tx_data_bd->nbytes)
+ dma_unmap_page(&edev->pdev->dev,
+ BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+ }
+
+ /* Return again prod to its position before this skb was handled */
+ qed_chain_set_prod(&txq->tx_pbl,
+ le16_to_cpu(txq->tx_db.data.bd_prod),
+ first_bd);
+
+ /* Free skb */
+ dev_kfree_skb_any(skb);
+ txq->sw_tx_ring[idx].skb = NULL;
+ txq->sw_tx_ring[idx].flags = 0;
+}
+
+static u32 qede_xmit_type(struct qede_dev *edev,
+ struct sk_buff *skb,
+ int *ipv6_ext)
+{
+ u32 rc = XMIT_L4_CSUM;
+ __be16 l3_proto;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return XMIT_PLAIN;
+
+ l3_proto = vlan_get_protocol(skb);
+ if (l3_proto == htons(ETH_P_IPV6) &&
+ (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+ *ipv6_ext = 1;
+
+ if (skb_is_gso(skb))
+ rc |= XMIT_LSO;
+
+ return rc;
+}
+
+static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
+ struct eth_tx_2nd_bd *second_bd,
+ struct eth_tx_3rd_bd *third_bd)
+{
+ u8 l4_proto;
+ u16 bd2_bits = 0, bd2_bits2 = 0;
+
+ bd2_bits2 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
+
+ bd2_bits |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
+ << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
+
+ bd2_bits2 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+ ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
+
+ if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
+ l4_proto = ipv6_hdr(skb)->nexthdr;
+ else
+ l4_proto = ip_hdr(skb)->protocol;
+
+ if (l4_proto == IPPROTO_UDP)
+ bd2_bits2 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+
+ if (third_bd) {
+ third_bd->data.bitfields |=
+ ((tcp_hdrlen(skb) / 4) &
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT;
+ }
+
+ second_bd->data.bitfields = cpu_to_le16(bd2_bits);
+ second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
+}
+
+static int map_frag_to_bd(struct qede_dev *edev,
+ skb_frag_t *frag,
+ struct eth_tx_bd *bd)
+{
+ dma_addr_t mapping;
+
+ /* Map skb non-linear frag data for DMA */
+ mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
+ return -ENOMEM;
+ }
+
+ /* Setup the data pointer of the frag data */
+ BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
+
+ return 0;
+}
+
+/* Main transmit function */
+static
+netdev_tx_t qede_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct netdev_queue *netdev_txq;
+ struct qede_tx_queue *txq;
+ struct eth_tx_1st_bd *first_bd;
+ struct eth_tx_2nd_bd *second_bd = NULL;
+ struct eth_tx_3rd_bd *third_bd = NULL;
+ struct eth_tx_bd *tx_data_bd = NULL;
+ u16 txq_index;
+ u8 nbd = 0;
+ dma_addr_t mapping;
+ int rc, frag_idx = 0, ipv6_ext = 0;
+ u8 xmit_type;
+ u16 idx;
+ u16 hlen;
+ bool data_split;
+
+ /* Get tx-queue context and netdev index */
+ txq_index = skb_get_queue_mapping(skb);
+ WARN_ON(txq_index >= QEDE_TSS_CNT(edev));
+ txq = QEDE_TX_QUEUE(edev, txq_index);
+ netdev_txq = netdev_get_tx_queue(ndev, txq_index);
+
+ /* Current code doesn't support SKB linearization, since the max number
+ * of skb frags can be passed in the FW HSI.
+ */
+ BUILD_BUG_ON(MAX_SKB_FRAGS > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET);
+
+ WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
+ (MAX_SKB_FRAGS + 1));
+
+ xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
+
+ /* Fill the entry in the SW ring and the BDs in the FW ring */
+ idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ txq->sw_tx_ring[idx].skb = skb;
+ first_bd = (struct eth_tx_1st_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(first_bd, 0, sizeof(*first_bd));
+ first_bd->data.bd_flags.bitfields =
+ 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+
+ /* Map skb linear data for DMA and set in the first BD */
+ mapping = dma_map_single(&edev->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ DP_NOTICE(edev, "SKB mapping failed\n");
+ qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
+ return NETDEV_TX_OK;
+ }
+ nbd++;
+ BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+ /* In case there is IPv6 with extension headers or LSO we need 2nd and
+ * 3rd BDs.
+ */
+ if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
+ second_bd = (struct eth_tx_2nd_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(second_bd, 0, sizeof(*second_bd));
+
+ nbd++;
+ third_bd = (struct eth_tx_3rd_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(third_bd, 0, sizeof(*third_bd));
+
+ nbd++;
+ /* We need to fill in additional data in second_bd... */
+ tx_data_bd = (struct eth_tx_bd *)second_bd;
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
+ }
+
+ /* Fill the parsing flags & params according to the requested offload */
+ if (xmit_type & XMIT_L4_CSUM) {
+ /* We don't re-calculate IP checksum as it is already done by
+ * the upper stack
+ */
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+
+ /* If the packet is IPv6 with extension header, indicate that
+ * to FW and pass few params, since the device cracker doesn't
+ * support parsing IPv6 with extension header/s.
+ */
+ if (unlikely(ipv6_ext))
+ qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
+ }
+
+ if (xmit_type & XMIT_LSO) {
+ first_bd->data.bd_flags.bitfields |=
+ (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
+ third_bd->data.lso_mss =
+ cpu_to_le16(skb_shinfo(skb)->gso_size);
+
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ hlen = skb_transport_header(skb) +
+ tcp_hdrlen(skb) - skb->data;
+
+ /* @@@TBD - if will not be removed need to check */
+ third_bd->data.bitfields |=
+ (1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+
+ /* Make life easier for FW guys who can't deal with header and
+ * data on same BD. If we need to split, use the second bd...
+ */
+ if (unlikely(skb_headlen(skb) > hlen)) {
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "TSO split header size is %d (%x:%x)\n",
+ first_bd->nbytes, first_bd->addr.hi,
+ first_bd->addr.lo);
+
+ mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
+ le32_to_cpu(first_bd->addr.lo)) +
+ hlen;
+
+ BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
+ le16_to_cpu(first_bd->nbytes) -
+ hlen);
+
+ /* this marks the BD as one that has no
+ * individual mapping
+ */
+ txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
+
+ first_bd->nbytes = cpu_to_le16(hlen);
+
+ tx_data_bd = (struct eth_tx_bd *)third_bd;
+ data_split = true;
+ }
+ }
+
+ /* Handle fragmented skb */
+ /* special handle for frags inside 2nd and 3rd bds.. */
+ while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
+ rc = map_frag_to_bd(edev,
+ &skb_shinfo(skb)->frags[frag_idx],
+ tx_data_bd);
+ if (rc) {
+ qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
+ data_split);
+ return NETDEV_TX_OK;
+ }
+
+ if (tx_data_bd == (struct eth_tx_bd *)second_bd)
+ tx_data_bd = (struct eth_tx_bd *)third_bd;
+ else
+ tx_data_bd = NULL;
+
+ frag_idx++;
+ }
+
+ /* map last frags into 4th, 5th .... */
+ for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+
+ memset(tx_data_bd, 0, sizeof(*tx_data_bd));
+
+ rc = map_frag_to_bd(edev,
+ &skb_shinfo(skb)->frags[frag_idx],
+ tx_data_bd);
+ if (rc) {
+ qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
+ data_split);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ /* update the first BD with the actual num BDs */
+ first_bd->data.nbds = nbd;
+
+ netdev_tx_sent_queue(netdev_txq, skb->len);
+
+ skb_tx_timestamp(skb);
+
+ /* Advance packet producer only before sending the packet since mapping
+ * of pages may fail.
+ */
+ txq->sw_tx_prod++;
+
+ /* 'next page' entries are counted in the producer value */
+ txq->tx_db.data.bd_prod =
+ cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+
+ /* wmb makes sure that the BDs data is updated before updating the
+ * producer, otherwise FW may read old data from the BDs.
+ */
+ wmb();
+ barrier();
+ writel(txq->tx_db.raw, txq->doorbell_addr);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the queue lock is released and another start_xmit is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+
+ if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
+ < (MAX_SKB_FRAGS + 1))) {
+ netif_tx_stop_queue(netdev_txq);
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "Stop queue was called\n");
+ /* paired memory barrier is in qede_tx_int(), we have to keep
+ * ordering of set_bit() in netif_tx_stop_queue() and read of
+ * fp->bd_tx_cons
+ */
+ smp_mb();
+
+ if (qed_chain_get_elem_left(&txq->tx_pbl)
+ >= (MAX_SKB_FRAGS + 1) &&
+ (edev->state == QEDE_STATE_OPEN)) {
+ netif_tx_wake_queue(netdev_txq);
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "Wake queue was called\n");
+ }
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int qede_txq_has_work(struct qede_tx_queue *txq)
+{
+ u16 hw_bd_cons;
+
+ /* Tell compiler that consumer and producer can change */
+ barrier();
+ hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+ if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
+ return 0;
+
+ return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
+}
+
+static int qede_tx_int(struct qede_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ struct netdev_queue *netdev_txq;
+ u16 hw_bd_cons;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ int rc;
+
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+
+ hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+ barrier();
+
+ while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+ int len = 0;
+
+ rc = qede_free_tx_pkt(edev, txq, &len);
+ if (rc) {
+ DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
+ hw_bd_cons,
+ qed_chain_get_cons_idx(&txq->tx_pbl));
+ break;
+ }
+
+ bytes_compl += len;
+ pkts_compl++;
+ txq->sw_tx_cons++;
+ }
+
+ netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
+
+ /* Need to make the tx_bd_cons update visible to start_xmit()
+ * before checking for netif_tx_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that
+ * start_xmit() will miss it and cause the queue to be stopped
+ * forever.
+ * On the other hand we need an rmb() here to ensure the proper
+ * ordering of bit testing in the following
+ * netif_tx_queue_stopped(txq) call.
+ */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
+ /* Taking tx_lock is needed to prevent reenabling the queue
+ * while it's empty. This could have happen if rx_action() gets
+ * suspended in qede_tx_int() after the condition before
+ * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
+ *
+ * stops the queue->sees fresh tx_bd_cons->releases the queue->
+ * sends some packets consuming the whole queue again->
+ * stops the queue
+ */
+
+ __netif_tx_lock(netdev_txq, smp_processor_id());
+
+ if ((netif_tx_queue_stopped(netdev_txq)) &&
+ (edev->state == QEDE_STATE_OPEN) &&
+ (qed_chain_get_elem_left(&txq->tx_pbl)
+ >= (MAX_SKB_FRAGS + 1))) {
+ netif_tx_wake_queue(netdev_txq);
+ DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
+ "Wake queue was called\n");
+ }
+
+ __netif_tx_unlock(netdev_txq);
+ }
+
+ return 0;
+}
+
+static bool qede_has_rx_work(struct qede_rx_queue *rxq)
+{
+ u16 hw_comp_cons, sw_comp_cons;
+
+ /* Tell compiler that status block fields can change */
+ barrier();
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ return hw_comp_cons != sw_comp_cons;
+}
+
+static bool qede_has_tx_work(struct qede_fastpath *fp)
+{
+ u8 tc;
+
+ for (tc = 0; tc < fp->edev->num_tc; tc++)
+ if (qede_txq_has_work(&fp->txqs[tc]))
+ return true;
+ return false;
+}
+
+/* This function copies the Rx buffer from the CONS position to the PROD
+ * position, since we failed to allocate a new Rx buffer.
+ */
+static void qede_reuse_rx_data(struct qede_rx_queue *rxq)
+{
+ struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
+ struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+ struct sw_rx_data *sw_rx_data_cons =
+ &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+ struct sw_rx_data *sw_rx_data_prod =
+ &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+
+ dma_unmap_addr_set(sw_rx_data_prod, mapping,
+ dma_unmap_addr(sw_rx_data_cons, mapping));
+
+ sw_rx_data_prod->data = sw_rx_data_cons->data;
+ memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
+
+ rxq->sw_rx_cons++;
+ rxq->sw_rx_prod++;
+}
+
+static inline void qede_update_rx_prod(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
+ u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
+ struct eth_rx_prod_data rx_prods = {0};
+
+ /* Update producers */
+ rx_prods.bd_prod = cpu_to_le16(bd_prod);
+ rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
+
+ /* Make sure that the BD and SGE data is updated before updating the
+ * producers since FW might read the BD/SGE right after the producer
+ * is updated.
+ */
+ wmb();
+
+ internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
+ (u32 *)&rx_prods);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the napi lock is released and another qede_poll is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+}
+
+static u32 qede_get_rxhash(struct qede_dev *edev,
+ u8 bitfields,
+ __le32 rss_hash,
+ enum pkt_hash_types *rxhash_type)
+{
+ enum rss_hash_type htype;
+
+ htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+
+ if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
+ *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
+ (htype == RSS_HASH_TYPE_IPV6)) ?
+ PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
+ return le32_to_cpu(rss_hash);
+ }
+ *rxhash_type = PKT_HASH_TYPE_NONE;
+ return 0;
+}
+
+static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
+{
+ skb_checksum_none_assert(skb);
+
+ if (csum_flag & QEDE_CSUM_UNNECESSARY)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static inline void qede_skb_receive(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct sk_buff *skb,
+ u16 vlan_tag)
+{
+ if (vlan_tag)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vlan_tag);
+
+ napi_gro_receive(&fp->napi, skb);
+}
+
+static u8 qede_check_csum(u16 flag)
+{
+ u16 csum_flag = 0;
+ u8 csum = 0;
+
+ if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+ csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+ csum = QEDE_CSUM_UNNECESSARY;
+ }
+
+ csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+ if (csum_flag & flag)
+ return QEDE_CSUM_ERROR;
+
+ return csum;
+}
+
+static int qede_rx_int(struct qede_fastpath *fp, int budget)
+{
+ struct qede_dev *edev = fp->edev;
+ struct qede_rx_queue *rxq = fp->rxq;
+
+ u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag;
+ int rx_pkt = 0;
+ u8 csum_flag;
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+ * / BD in the while-loop before reading hw_comp_cons. If the CQE is
+ * read before it is written by FW, then FW writes CQE and SB, and then
+ * the CPU reads the hw_comp_cons, it will use an old CQE.
+ */
+ rmb();
+
+ /* Loop to complete all indicated BDs */
+ while (sw_comp_cons != hw_comp_cons) {
+ struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ enum pkt_hash_types rxhash_type;
+ enum eth_rx_cqe_type cqe_type;
+ struct sw_rx_data *sw_rx_data;
+ union eth_rx_cqe *cqe;
+ struct sk_buff *skb;
+ u16 len, pad;
+ u32 rx_hash;
+ u8 *data;
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)
+ qed_chain_consume(&rxq->rx_comp_ring);
+ cqe_type = cqe->fast_path_regular.type;
+
+ if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
+ edev->ops->eth_cqe_completion(
+ edev->cdev, fp->rss_id,
+ (struct eth_slow_path_rx_cqe *)cqe);
+ goto next_cqe;
+ }
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ data = sw_rx_data->data;
+
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->pkt_len);
+ pad = fp_cqe->placement_offset;
+
+ /* For every Rx BD consumed, we allocate a new BD so the BD ring
+ * is always with a fixed size. If allocation fails, we take the
+ * consumed BD and return it to the ring in the PROD position.
+ * The packet that was received on that BD will be dropped (and
+ * not passed to the upper stack).
+ */
+ if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) {
+ dma_unmap_single(&edev->pdev->dev,
+ dma_unmap_addr(sw_rx_data, mapping),
+ rxq->rx_buf_size, DMA_FROM_DEVICE);
+
+ /* If this is an error packet then drop it */
+ parse_flag =
+ le16_to_cpu(cqe->fast_path_regular.pars_flags.flags);
+ csum_flag = qede_check_csum(parse_flag);
+ if (csum_flag == QEDE_CSUM_ERROR) {
+ DP_NOTICE(edev,
+ "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
+ sw_comp_cons, parse_flag);
+ rxq->rx_hw_errors++;
+ kfree(data);
+ goto next_rx;
+ }
+
+ skb = build_skb(data, 0);
+
+ if (unlikely(!skb)) {
+ DP_NOTICE(edev,
+ "Build_skb failed, dropping incoming packet\n");
+ kfree(data);
+ rxq->rx_alloc_errors++;
+ goto next_rx;
+ }
+
+ skb_reserve(skb, pad);
+
+ } else {
+ DP_NOTICE(edev,
+ "New buffer allocation failed, dropping incoming packet and reusing its buffer\n");
+ qede_reuse_rx_data(rxq);
+ rxq->rx_alloc_errors++;
+ goto next_cqe;
+ }
+
+ sw_rx_data->data = NULL;
+
+ skb_put(skb, len);
+
+ skb->protocol = eth_type_trans(skb, edev->ndev);
+
+ rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
+ fp_cqe->rss_hash,
+ &rxhash_type);
+
+ skb_set_hash(skb, rx_hash, rxhash_type);
+
+ qede_set_skb_csum(skb, csum_flag);
+
+ skb_record_rx_queue(skb, fp->rss_id);
+
+ qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
+
+ qed_chain_consume(&rxq->rx_bd_ring);
+
+next_rx:
+ rxq->sw_rx_cons++;
+ rx_pkt++;
+
+next_cqe: /* don't consume bd rx buffer */
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+ /* CR TPA - revisit how to handle budget in TPA perhaps
+ * increase on "end"
+ */
+ if (rx_pkt == budget)
+ break;
+ } /* repeat while sw_comp_cons != hw_comp_cons... */
+
+ /* Update producers */
+ qede_update_rx_prod(edev, rxq);
+
+ return rx_pkt;
+}
+
+static int qede_poll(struct napi_struct *napi, int budget)
+{
+ int work_done = 0;
+ struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
+ napi);
+ struct qede_dev *edev = fp->edev;
+
+ while (1) {
+ u8 tc;
+
+ for (tc = 0; tc < edev->num_tc; tc++)
+ if (qede_txq_has_work(&fp->txqs[tc]))
+ qede_tx_int(edev, &fp->txqs[tc]);
+
+ if (qede_has_rx_work(fp->rxq)) {
+ work_done += qede_rx_int(fp, budget - work_done);
+
+ /* must not complete if we consumed full budget */
+ if (work_done >= budget)
+ break;
+ }
+
+ /* Fall out from the NAPI loop if needed */
+ if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) {
+ qed_sb_update_sb_idx(fp->sb_info);
+ /* *_has_*_work() reads the status block,
+ * thus we need to ensure that status block indices
+ * have been actually read (qed_sb_update_sb_idx)
+ * prior to this check (*_has_*_work) so that
+ * we won't write the "newer" value of the status block
+ * to HW (if there was a DMA right after
+ * qede_has_rx_work and if there is no rmb, the memory
+ * reading (qed_sb_update_sb_idx) may be postponed
+ * to right before *_ack_sb). In this case there
+ * will never be another interrupt until there is
+ * another update of the status block, while there
+ * is still unhandled work.
+ */
+ rmb();
+
+ if (!(qede_has_rx_work(fp->rxq) ||
+ qede_has_tx_work(fp))) {
+ napi_complete(napi);
+ /* Update and reenable interrupts */
+ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
+ 1 /*update*/);
+ break;
+ }
+ }
+ }
+
+ return work_done;
+}
+
+static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
+{
+ struct qede_fastpath *fp = fp_cookie;
+
+ qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
+
+ napi_schedule_irqoff(&fp->napi);
+ return IRQ_HANDLED;
+}
+
+/* -------------------------------------------------------------------------
+ * END OF FAST-PATH
+ * -------------------------------------------------------------------------
+ */
+
+static int qede_open(struct net_device *ndev);
+static int qede_close(struct net_device *ndev);
+static int qede_set_mac_addr(struct net_device *ndev, void *p);
+static void qede_set_rx_mode(struct net_device *ndev);
+static void qede_config_rx_mode(struct net_device *ndev);
+
+static int qede_set_ucast_rx_mac(struct qede_dev *edev,
+ enum qed_filter_xcast_params_type opcode,
+ unsigned char mac[ETH_ALEN])
+{
+ struct qed_filter_params filter_cmd;
+
+ memset(&filter_cmd, 0, sizeof(filter_cmd));
+ filter_cmd.type = QED_FILTER_TYPE_UCAST;
+ filter_cmd.filter.ucast.type = opcode;
+ filter_cmd.filter.ucast.mac_valid = 1;
+ ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
+
+ return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+void qede_fill_by_demand_stats(struct qede_dev *edev)
+{
+ struct qed_eth_stats stats;
+
+ edev->ops->get_vport_stats(edev->cdev, &stats);
+ edev->stats.no_buff_discards = stats.no_buff_discards;
+ edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
+ edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
+ edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
+ edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
+ edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
+ edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
+ edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
+ edev->stats.mac_filter_discards = stats.mac_filter_discards;
+
+ edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
+ edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
+ edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
+ edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
+ edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
+ edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
+ edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
+ edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
+ edev->stats.coalesced_events = stats.tpa_coalesced_events;
+ edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
+ edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
+ edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
+
+ edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
+ edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets;
+ edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets;
+ edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets;
+ edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets;
+ edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets;
+ edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets;
+ edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets;
+ edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets;
+ edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets;
+ edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets;
+ edev->stats.rx_crc_errors = stats.rx_crc_errors;
+ edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
+ edev->stats.rx_pause_frames = stats.rx_pause_frames;
+ edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
+ edev->stats.rx_align_errors = stats.rx_align_errors;
+ edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
+ edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
+ edev->stats.rx_jabbers = stats.rx_jabbers;
+ edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
+ edev->stats.rx_fragments = stats.rx_fragments;
+ edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
+ edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
+ edev->stats.tx_128_to_255_byte_packets =
+ stats.tx_128_to_255_byte_packets;
+ edev->stats.tx_256_to_511_byte_packets =
+ stats.tx_256_to_511_byte_packets;
+ edev->stats.tx_512_to_1023_byte_packets =
+ stats.tx_512_to_1023_byte_packets;
+ edev->stats.tx_1024_to_1518_byte_packets =
+ stats.tx_1024_to_1518_byte_packets;
+ edev->stats.tx_1519_to_2047_byte_packets =
+ stats.tx_1519_to_2047_byte_packets;
+ edev->stats.tx_2048_to_4095_byte_packets =
+ stats.tx_2048_to_4095_byte_packets;
+ edev->stats.tx_4096_to_9216_byte_packets =
+ stats.tx_4096_to_9216_byte_packets;
+ edev->stats.tx_9217_to_16383_byte_packets =
+ stats.tx_9217_to_16383_byte_packets;
+ edev->stats.tx_pause_frames = stats.tx_pause_frames;
+ edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
+ edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
+ edev->stats.tx_total_collisions = stats.tx_total_collisions;
+ edev->stats.brb_truncates = stats.brb_truncates;
+ edev->stats.brb_discards = stats.brb_discards;
+ edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
+}
+
+static struct rtnl_link_stats64 *qede_get_stats64(
+ struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ qede_fill_by_demand_stats(edev);
+
+ stats->rx_packets = edev->stats.rx_ucast_pkts +
+ edev->stats.rx_mcast_pkts +
+ edev->stats.rx_bcast_pkts;
+ stats->tx_packets = edev->stats.tx_ucast_pkts +
+ edev->stats.tx_mcast_pkts +
+ edev->stats.tx_bcast_pkts;
+
+ stats->rx_bytes = edev->stats.rx_ucast_bytes +
+ edev->stats.rx_mcast_bytes +
+ edev->stats.rx_bcast_bytes;
+
+ stats->tx_bytes = edev->stats.tx_ucast_bytes +
+ edev->stats.tx_mcast_bytes +
+ edev->stats.tx_bcast_bytes;
+
+ stats->tx_errors = edev->stats.tx_err_drop_pkts;
+ stats->multicast = edev->stats.rx_mcast_pkts +
+ edev->stats.rx_bcast_pkts;
+
+ stats->rx_fifo_errors = edev->stats.no_buff_discards;
+
+ stats->collisions = edev->stats.tx_total_collisions;
+ stats->rx_crc_errors = edev->stats.rx_crc_errors;
+ stats->rx_frame_errors = edev->stats.rx_align_errors;
+
+ return stats;
+}
+
+static const struct net_device_ops qede_netdev_ops = {
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_get_stats64 = qede_get_stats64,
+};
+
+/* -------------------------------------------------------------------------
+ * START OF PROBE / REMOVE
+ * -------------------------------------------------------------------------
+ */
+
+static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
+ struct pci_dev *pdev,
+ struct qed_dev_eth_info *info,
+ u32 dp_module,
+ u8 dp_level)
+{
+ struct net_device *ndev;
+ struct qede_dev *edev;
+
+ ndev = alloc_etherdev_mqs(sizeof(*edev),
+ info->num_queues,
+ info->num_queues);
+ if (!ndev) {
+ pr_err("etherdev allocation failed\n");
+ return NULL;
+ }
+
+ edev = netdev_priv(ndev);
+ edev->ndev = ndev;
+ edev->cdev = cdev;
+ edev->pdev = pdev;
+ edev->dp_module = dp_module;
+ edev->dp_level = dp_level;
+ edev->ops = qed_ops;
+ edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
+ edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+
+ DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n");
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ memset(&edev->stats, 0, sizeof(edev->stats));
+ memcpy(&edev->dev_info, info, sizeof(*info));
+
+ edev->num_tc = edev->dev_info.num_tc;
+
+ return edev;
+}
+
+static void qede_init_ndev(struct qede_dev *edev)
+{
+ struct net_device *ndev = edev->ndev;
+ struct pci_dev *pdev = edev->pdev;
+ u32 hw_features;
+
+ pci_set_drvdata(pdev, ndev);
+
+ ndev->mem_start = edev->dev_info.common.pci_mem_start;
+ ndev->base_addr = ndev->mem_start;
+ ndev->mem_end = edev->dev_info.common.pci_mem_end;
+ ndev->irq = edev->dev_info.common.pci_irq;
+
+ ndev->watchdog_timeo = TX_TIMEOUT;
+
+ ndev->netdev_ops = &qede_netdev_ops;
+
+ qede_set_ethtool_ops(ndev);
+
+ /* user-changeble features */
+ hw_features = NETIF_F_GRO | NETIF_F_SG |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
+ ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
+ NETIF_F_HIGHDMA;
+ ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
+ ndev->hw_features = hw_features;
+
+ /* Set network device HW mac */
+ ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
+}
+
+/* This function converts from 32b param to two params of level and module
+ * Input 32b decoding:
+ * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
+ * 'happy' flow, e.g. memory allocation failed.
+ * b30 - enable all INFO prints. INFO prints are for major steps in the flow
+ * and provide important parameters.
+ * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
+ * module. VERBOSE prints are for tracking the specific flow in low level.
+ *
+ * Notice that the level should be that of the lowest required logs.
+ */
+void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
+{
+ *p_dp_level = QED_LEVEL_NOTICE;
+ *p_dp_module = 0;
+
+ if (debug & QED_LOG_VERBOSE_MASK) {
+ *p_dp_level = QED_LEVEL_VERBOSE;
+ *p_dp_module = (debug & 0x3FFFFFFF);
+ } else if (debug & QED_LOG_INFO_MASK) {
+ *p_dp_level = QED_LEVEL_INFO;
+ } else if (debug & QED_LOG_NOTICE_MASK) {
+ *p_dp_level = QED_LEVEL_NOTICE;
+ }
+}
+
+static void qede_free_fp_array(struct qede_dev *edev)
+{
+ if (edev->fp_array) {
+ struct qede_fastpath *fp;
+ int i;
+
+ for_each_rss(i) {
+ fp = &edev->fp_array[i];
+
+ kfree(fp->sb_info);
+ kfree(fp->rxq);
+ kfree(fp->txqs);
+ }
+ kfree(edev->fp_array);
+ }
+ edev->num_rss = 0;
+}
+
+static int qede_alloc_fp_array(struct qede_dev *edev)
+{
+ struct qede_fastpath *fp;
+ int i;
+
+ edev->fp_array = kcalloc(QEDE_RSS_CNT(edev),
+ sizeof(*edev->fp_array), GFP_KERNEL);
+ if (!edev->fp_array) {
+ DP_NOTICE(edev, "fp array allocation failed\n");
+ goto err;
+ }
+
+ for_each_rss(i) {
+ fp = &edev->fp_array[i];
+
+ fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
+ if (!fp->sb_info) {
+ DP_NOTICE(edev, "sb info struct allocation failed\n");
+ goto err;
+ }
+
+ fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
+ if (!fp->rxq) {
+ DP_NOTICE(edev, "RXQ struct allocation failed\n");
+ goto err;
+ }
+
+ fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL);
+ if (!fp->txqs) {
+ DP_NOTICE(edev, "TXQ array allocation failed\n");
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ qede_free_fp_array(edev);
+ return -ENOMEM;
+}
+
+static void qede_sp_task(struct work_struct *work)
+{
+ struct qede_dev *edev = container_of(work, struct qede_dev,
+ sp_task.work);
+ mutex_lock(&edev->qede_lock);
+
+ if (edev->state == QEDE_STATE_OPEN) {
+ if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
+ qede_config_rx_mode(edev->ndev);
+ }
+
+ mutex_unlock(&edev->qede_lock);
+}
+
+static void qede_update_pf_params(struct qed_dev *cdev)
+{
+ struct qed_pf_params pf_params;
+
+ /* 16 rx + 16 tx */
+ memset(&pf_params, 0, sizeof(struct qed_pf_params));
+ pf_params.eth_pf_params.num_cons = 32;
+ qed_ops->common->update_pf_params(cdev, &pf_params);
+}
+
+enum qede_probe_mode {
+ QEDE_PROBE_NORMAL,
+};
+
+static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
+ enum qede_probe_mode mode)
+{
+ struct qed_slowpath_params params;
+ struct qed_dev_eth_info dev_info;
+ struct qede_dev *edev;
+ struct qed_dev *cdev;
+ int rc;
+
+ if (unlikely(dp_level & QED_LEVEL_INFO))
+ pr_notice("Starting qede probe\n");
+
+ cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH,
+ dp_module, dp_level);
+ if (!cdev) {
+ rc = -ENODEV;
+ goto err0;
+ }
+
+ qede_update_pf_params(cdev);
+
+ /* Start the Slowpath-process */
+ memset(&params, 0, sizeof(struct qed_slowpath_params));
+ params.int_mode = QED_INT_MODE_MSIX;
+ params.drv_major = QEDE_MAJOR_VERSION;
+ params.drv_minor = QEDE_MINOR_VERSION;
+ params.drv_rev = QEDE_REVISION_VERSION;
+ params.drv_eng = QEDE_ENGINEERING_VERSION;
+ strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+ rc = qed_ops->common->slowpath_start(cdev, &params);
+ if (rc) {
+ pr_notice("Cannot start slowpath\n");
+ goto err1;
+ }
+
+ /* Learn information crucial for qede to progress */
+ rc = qed_ops->fill_dev_info(cdev, &dev_info);
+ if (rc)
+ goto err2;
+
+ edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
+ dp_level);
+ if (!edev) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ qede_init_ndev(edev);
+
+ rc = register_netdev(edev->ndev);
+ if (rc) {
+ DP_NOTICE(edev, "Cannot register net-device\n");
+ goto err3;
+ }
+
+ edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
+
+ edev->ops->register_ops(cdev, &qede_ll_ops, edev);
+
+ INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
+ mutex_init(&edev->qede_lock);
+
+ DP_INFO(edev, "Ending successfully qede probe\n");
+
+ return 0;
+
+err3:
+ free_netdev(edev->ndev);
+err2:
+ qed_ops->common->slowpath_stop(cdev);
+err1:
+ qed_ops->common->remove(cdev);
+err0:
+ return rc;
+}
+
+static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ u32 dp_module = 0;
+ u8 dp_level = 0;
+
+ qede_config_debug(debug, &dp_module, &dp_level);
+
+ return __qede_probe(pdev, dp_module, dp_level,
+ QEDE_PROBE_NORMAL);
+}
+
+enum qede_remove_mode {
+ QEDE_REMOVE_NORMAL,
+};
+
+static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct qed_dev *cdev = edev->cdev;
+
+ DP_INFO(edev, "Starting qede_remove\n");
+
+ cancel_delayed_work_sync(&edev->sp_task);
+ unregister_netdev(ndev);
+
+ edev->ops->common->set_power_state(cdev, PCI_D0);
+
+ pci_set_drvdata(pdev, NULL);
+
+ free_netdev(ndev);
+
+ /* Use global ops since we've freed edev */
+ qed_ops->common->slowpath_stop(cdev);
+ qed_ops->common->remove(cdev);
+
+ pr_notice("Ending successfully qede_remove\n");
+}
+
+static void qede_remove(struct pci_dev *pdev)
+{
+ __qede_remove(pdev, QEDE_REMOVE_NORMAL);
+}
+
+/* -------------------------------------------------------------------------
+ * START OF LOAD / UNLOAD
+ * -------------------------------------------------------------------------
+ */
+
+static int qede_set_num_queues(struct qede_dev *edev)
+{
+ int rc;
+ u16 rss_num;
+
+ /* Setup queues according to possible resources*/
+ rss_num = netif_get_num_default_rss_queues() *
+ edev->dev_info.common.num_hwfns;
+
+ rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
+
+ rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
+ if (rc > 0) {
+ /* Managed to request interrupts for our queues */
+ edev->num_rss = rc;
+ DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
+ QEDE_RSS_CNT(edev), rss_num);
+ rc = 0;
+ }
+ return rc;
+}
+
+static void qede_free_mem_sb(struct qede_dev *edev,
+ struct qed_sb_info *sb_info)
+{
+ if (sb_info->sb_virt)
+ dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
+ (void *)sb_info->sb_virt, sb_info->sb_phys);
+}
+
+/* This function allocates fast-path status block memory */
+static int qede_alloc_mem_sb(struct qede_dev *edev,
+ struct qed_sb_info *sb_info,
+ u16 sb_id)
+{
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ int rc;
+
+ sb_virt = dma_alloc_coherent(&edev->pdev->dev,
+ sizeof(*sb_virt),
+ &sb_phys, GFP_KERNEL);
+ if (!sb_virt) {
+ DP_ERR(edev, "Status block allocation failed\n");
+ return -ENOMEM;
+ }
+
+ rc = edev->ops->common->sb_init(edev->cdev, sb_info,
+ sb_virt, sb_phys, sb_id,
+ QED_SB_TYPE_L2_QUEUE);
+ if (rc) {
+ DP_ERR(edev, "Status block initialization failed\n");
+ dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
+ sb_virt, sb_phys);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void qede_free_rx_buffers(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ u16 i;
+
+ for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
+ struct sw_rx_data *rx_buf;
+ u8 *data;
+
+ rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
+ data = rx_buf->data;
+
+ dma_unmap_single(&edev->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ rxq->rx_buf_size, DMA_FROM_DEVICE);
+
+ rx_buf->data = NULL;
+ kfree(data);
+ }
+}
+
+static void qede_free_mem_rxq(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ /* Free rx buffers */
+ qede_free_rx_buffers(edev, rxq);
+
+ /* Free the parallel SW ring */
+ kfree(rxq->sw_rx_ring);
+
+ /* Free the real RQ ring used by FW */
+ edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
+ edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
+}
+
+static int qede_alloc_rx_buffer(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ struct sw_rx_data *sw_rx_data;
+ struct eth_rx_bd *rx_bd;
+ dma_addr_t mapping;
+ u16 rx_buf_size;
+ u8 *data;
+
+ rx_buf_size = rxq->rx_buf_size;
+
+ data = kmalloc(rx_buf_size, GFP_ATOMIC);
+ if (unlikely(!data)) {
+ DP_NOTICE(edev, "Failed to allocate Rx data\n");
+ return -ENOMEM;
+ }
+
+ mapping = dma_map_single(&edev->pdev->dev, data,
+ rx_buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ kfree(data);
+ DP_NOTICE(edev, "Failed to map Rx buffer\n");
+ return -ENOMEM;
+ }
+
+ sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ sw_rx_data->data = data;
+
+ dma_unmap_addr_set(sw_rx_data, mapping, mapping);
+
+ /* Advance PROD and get BD pointer */
+ rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
+ WARN_ON(!rx_bd);
+ rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+ rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+
+ rxq->sw_rx_prod++;
+
+ return 0;
+}
+
+/* This function allocates all memory needed per Rx queue */
+static int qede_alloc_mem_rxq(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ int i, rc, size, num_allocated;
+
+ rxq->num_rx_buffers = edev->q_num_rx_buffers;
+
+ rxq->rx_buf_size = NET_IP_ALIGN +
+ ETH_OVERHEAD +
+ edev->ndev->mtu +
+ QEDE_FW_RX_ALIGN_END;
+
+ /* Allocate the parallel driver ring for Rx buffers */
+ size = sizeof(*rxq->sw_rx_ring) * NUM_RX_BDS_MAX;
+ rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
+ if (!rxq->sw_rx_ring) {
+ DP_ERR(edev, "Rx buffers ring allocation failed\n");
+ goto err;
+ }
+
+ /* Allocate FW Rx ring */
+ rc = edev->ops->common->chain_alloc(edev->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_NEXT_PTR,
+ NUM_RX_BDS_MAX,
+ sizeof(struct eth_rx_bd),
+ &rxq->rx_bd_ring);
+
+ if (rc)
+ goto err;
+
+ /* Allocate FW completion ring */
+ rc = edev->ops->common->chain_alloc(edev->cdev,
+ QED_CHAIN_USE_TO_CONSUME,
+ QED_CHAIN_MODE_PBL,
+ NUM_RX_BDS_MAX,
+ sizeof(union eth_rx_cqe),
+ &rxq->rx_comp_ring);
+ if (rc)
+ goto err;
+
+ /* Allocate buffers for the Rx ring */
+ for (i = 0; i < rxq->num_rx_buffers; i++) {
+ rc = qede_alloc_rx_buffer(edev, rxq);
+ if (rc)
+ break;
+ }
+ num_allocated = i;
+ if (!num_allocated) {
+ DP_ERR(edev, "Rx buffers allocation failed\n");
+ goto err;
+ } else if (num_allocated < rxq->num_rx_buffers) {
+ DP_NOTICE(edev,
+ "Allocated less buffers than desired (%d allocated)\n",
+ num_allocated);
+ }
+
+ return 0;
+
+err:
+ qede_free_mem_rxq(edev, rxq);
+ return -ENOMEM;
+}
+
+static void qede_free_mem_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ /* Free the parallel SW ring */
+ kfree(txq->sw_tx_ring);
+
+ /* Free the real RQ ring used by FW */
+ edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
+}
+
+/* This function allocates all memory needed per Tx queue */
+static int qede_alloc_mem_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ int size, rc;
+ union eth_tx_bd_types *p_virt;
+
+ txq->num_tx_buffers = edev->q_num_tx_buffers;
+
+ /* Allocate the parallel driver ring for Tx buffers */
+ size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
+ txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
+ if (!txq->sw_tx_ring) {
+ DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
+ goto err;
+ }
+
+ rc = edev->ops->common->chain_alloc(edev->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ NUM_TX_BDS_MAX,
+ sizeof(*p_virt),
+ &txq->tx_pbl);
+ if (rc)
+ goto err;
+
+ return 0;
+
+err:
+ qede_free_mem_txq(edev, txq);
+ return -ENOMEM;
+}
+
+/* This function frees all memory of a single fp */
+static void qede_free_mem_fp(struct qede_dev *edev,
+ struct qede_fastpath *fp)
+{
+ int tc;
+
+ qede_free_mem_sb(edev, fp->sb_info);
+
+ qede_free_mem_rxq(edev, fp->rxq);
+
+ for (tc = 0; tc < edev->num_tc; tc++)
+ qede_free_mem_txq(edev, &fp->txqs[tc]);
+}
+
+/* This function allocates all memory needed for a single fp (i.e. an entity
+ * which contains status block, one rx queue and multiple per-TC tx queues.
+ */
+static int qede_alloc_mem_fp(struct qede_dev *edev,
+ struct qede_fastpath *fp)
+{
+ int rc, tc;
+
+ rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id);
+ if (rc)
+ goto err;
+
+ rc = qede_alloc_mem_rxq(edev, fp->rxq);
+ if (rc)
+ goto err;
+
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
+ if (rc)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ qede_free_mem_fp(edev, fp);
+ return -ENOMEM;
+}
+
+static void qede_free_mem_load(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_rss(i) {
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ qede_free_mem_fp(edev, fp);
+ }
+}
+
+/* This function allocates all qede memory at NIC load. */
+static int qede_alloc_mem_load(struct qede_dev *edev)
+{
+ int rc = 0, rss_id;
+
+ for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) {
+ struct qede_fastpath *fp = &edev->fp_array[rss_id];
+
+ rc = qede_alloc_mem_fp(edev, fp);
+ if (rc)
+ break;
+ }
+
+ if (rss_id != QEDE_RSS_CNT(edev)) {
+ /* Failed allocating memory for all the queues */
+ if (!rss_id) {
+ DP_ERR(edev,
+ "Failed to allocate memory for the leading queue\n");
+ rc = -ENOMEM;
+ } else {
+ DP_NOTICE(edev,
+ "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
+ QEDE_RSS_CNT(edev), rss_id);
+ }
+ edev->num_rss = rss_id;
+ }
+
+ return 0;
+}
+
+/* This function inits fp content and resets the SB, RXQ and TXQ structures */
+static void qede_init_fp(struct qede_dev *edev)
+{
+ int rss_id, txq_index, tc;
+ struct qede_fastpath *fp;
+
+ for_each_rss(rss_id) {
+ fp = &edev->fp_array[rss_id];
+
+ fp->edev = edev;
+ fp->rss_id = rss_id;
+
+ memset((void *)&fp->napi, 0, sizeof(fp->napi));
+
+ memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
+
+ memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
+ fp->rxq->rxq_id = rss_id;
+
+ memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs)));
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ txq_index = tc * QEDE_RSS_CNT(edev) + rss_id;
+ fp->txqs[tc].index = txq_index;
+ }
+
+ snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+ edev->ndev->name, rss_id);
+ }
+}
+
+static int qede_set_real_num_queues(struct qede_dev *edev)
+{
+ int rc = 0;
+
+ rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev));
+ if (rc) {
+ DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
+ return rc;
+ }
+ rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev));
+ if (rc) {
+ DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static void qede_napi_disable_remove(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_rss(i) {
+ napi_disable(&edev->fp_array[i].napi);
+
+ netif_napi_del(&edev->fp_array[i].napi);
+ }
+}
+
+static void qede_napi_add_enable(struct qede_dev *edev)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_rss(i) {
+ netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
+ qede_poll, NAPI_POLL_WEIGHT);
+ napi_enable(&edev->fp_array[i].napi);
+ }
+}
+
+static void qede_sync_free_irqs(struct qede_dev *edev)
+{
+ int i;
+
+ for (i = 0; i < edev->int_info.used_cnt; i++) {
+ if (edev->int_info.msix_cnt) {
+ synchronize_irq(edev->int_info.msix[i].vector);
+ free_irq(edev->int_info.msix[i].vector,
+ &edev->fp_array[i]);
+ } else {
+ edev->ops->common->simd_handler_clean(edev->cdev, i);
+ }
+ }
+
+ edev->int_info.used_cnt = 0;
+}
+
+static int qede_req_msix_irqs(struct qede_dev *edev)
+{
+ int i, rc;
+
+ /* Sanitize number of interrupts == number of prepared RSS queues */
+ if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) {
+ DP_ERR(edev,
+ "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
+ QEDE_RSS_CNT(edev), edev->int_info.msix_cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < QEDE_RSS_CNT(edev); i++) {
+ rc = request_irq(edev->int_info.msix[i].vector,
+ qede_msix_fp_int, 0, edev->fp_array[i].name,
+ &edev->fp_array[i]);
+ if (rc) {
+ DP_ERR(edev, "Request fp %d irq failed\n", i);
+ qede_sync_free_irqs(edev);
+ return rc;
+ }
+ DP_VERBOSE(edev, NETIF_MSG_INTR,
+ "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
+ edev->fp_array[i].name, i,
+ &edev->fp_array[i]);
+ edev->int_info.used_cnt++;
+ }
+
+ return 0;
+}
+
+static void qede_simd_fp_handler(void *cookie)
+{
+ struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
+
+ napi_schedule_irqoff(&fp->napi);
+}
+
+static int qede_setup_irqs(struct qede_dev *edev)
+{
+ int i, rc = 0;
+
+ /* Learn Interrupt configuration */
+ rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
+ if (rc)
+ return rc;
+
+ if (edev->int_info.msix_cnt) {
+ rc = qede_req_msix_irqs(edev);
+ if (rc)
+ return rc;
+ edev->ndev->irq = edev->int_info.msix[0].vector;
+ } else {
+ const struct qed_common_ops *ops;
+
+ /* qed should learn receive the RSS ids and callbacks */
+ ops = edev->ops->common;
+ for (i = 0; i < QEDE_RSS_CNT(edev); i++)
+ ops->simd_handler_config(edev->cdev,
+ &edev->fp_array[i], i,
+ qede_simd_fp_handler);
+ edev->int_info.used_cnt = QEDE_RSS_CNT(edev);
+ }
+ return 0;
+}
+
+static int qede_drain_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq,
+ bool allow_drain)
+{
+ int rc, cnt = 1000;
+
+ while (txq->sw_tx_cons != txq->sw_tx_prod) {
+ if (!cnt) {
+ if (allow_drain) {
+ DP_NOTICE(edev,
+ "Tx queue[%d] is stuck, requesting MCP to drain\n",
+ txq->index);
+ rc = edev->ops->common->drain(edev->cdev);
+ if (rc)
+ return rc;
+ return qede_drain_txq(edev, txq, false);
+ }
+ DP_NOTICE(edev,
+ "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
+ txq->index, txq->sw_tx_prod,
+ txq->sw_tx_cons);
+ return -ENODEV;
+ }
+ cnt--;
+ usleep_range(1000, 2000);
+ barrier();
+ }
+
+ /* FW finished processing, wait for HW to transmit all tx packets */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+static int qede_stop_queues(struct qede_dev *edev)
+{
+ struct qed_update_vport_params vport_update_params;
+ struct qed_dev *cdev = edev->cdev;
+ int rc, tc, i;
+
+ /* Disable the vport */
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = 0;
+ vport_update_params.update_vport_active_flg = 1;
+ vport_update_params.vport_active_flg = 0;
+ vport_update_params.update_rss_flg = 0;
+
+ rc = edev->ops->vport_update(cdev, &vport_update_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to update vport\n");
+ return rc;
+ }
+
+ /* Flush Tx queues. If needed, request drain from MCP */
+ for_each_rss(i) {
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qede_tx_queue *txq = &fp->txqs[tc];
+
+ rc = qede_drain_txq(edev, txq, true);
+ if (rc)
+ return rc;
+ }
+ }
+
+ /* Stop all Queues in reverse order*/
+ for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) {
+ struct qed_stop_rxq_params rx_params;
+
+ /* Stop the Tx Queue(s)*/
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qed_stop_txq_params tx_params;
+
+ tx_params.rss_id = i;
+ tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i;
+ rc = edev->ops->q_tx_stop(cdev, &tx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop TXQ #%d\n",
+ tx_params.tx_queue_id);
+ return rc;
+ }
+ }
+
+ /* Stop the Rx Queue*/
+ memset(&rx_params, 0, sizeof(rx_params));
+ rx_params.rss_id = i;
+ rx_params.rx_queue_id = i;
+
+ rc = edev->ops->q_rx_stop(cdev, &rx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
+ return rc;
+ }
+ }
+
+ /* Stop the vport */
+ rc = edev->ops->vport_stop(cdev, 0);
+ if (rc)
+ DP_ERR(edev, "Failed to stop VPORT\n");
+
+ return rc;
+}
+
+static int qede_start_queues(struct qede_dev *edev)
+{
+ int rc, tc, i;
+ int vport_id = 0, drop_ttl0_flg = 1, vlan_removal_en = 1;
+ struct qed_dev *cdev = edev->cdev;
+ struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
+ struct qed_update_vport_params vport_update_params;
+ struct qed_queue_start_common_params q_params;
+
+ if (!edev->num_rss) {
+ DP_ERR(edev,
+ "Cannot update V-VPORT as active as there are no Rx queues\n");
+ return -EINVAL;
+ }
+
+ rc = edev->ops->vport_start(cdev, vport_id,
+ edev->ndev->mtu,
+ drop_ttl0_flg,
+ vlan_removal_en);
+
+ if (rc) {
+ DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(edev, NETIF_MSG_IFUP,
+ "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
+ vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
+
+ for_each_rss(i) {
+ struct qede_fastpath *fp = &edev->fp_array[i];
+ dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table;
+
+ memset(&q_params, 0, sizeof(q_params));
+ q_params.rss_id = i;
+ q_params.queue_id = i;
+ q_params.vport_id = 0;
+ q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.sb_idx = RX_PI;
+
+ rc = edev->ops->q_rx_start(cdev, &q_params,
+ fp->rxq->rx_buf_size,
+ fp->rxq->rx_bd_ring.p_phys_addr,
+ phys_table,
+ fp->rxq->rx_comp_ring.page_cnt,
+ &fp->rxq->hw_rxq_prod_addr);
+ if (rc) {
+ DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
+ return rc;
+ }
+
+ fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+
+ qede_update_rx_prod(edev, fp->rxq);
+
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ struct qede_tx_queue *txq = &fp->txqs[tc];
+ int txq_index = tc * QEDE_RSS_CNT(edev) + i;
+
+ memset(&q_params, 0, sizeof(q_params));
+ q_params.rss_id = i;
+ q_params.queue_id = txq_index;
+ q_params.vport_id = 0;
+ q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.sb_idx = TX_PI(tc);
+
+ rc = edev->ops->q_tx_start(cdev, &q_params,
+ txq->tx_pbl.pbl.p_phys_table,
+ txq->tx_pbl.page_cnt,
+ &txq->doorbell_addr);
+ if (rc) {
+ DP_ERR(edev, "Start TXQ #%d failed %d\n",
+ txq_index, rc);
+ return rc;
+ }
+
+ txq->hw_cons_ptr =
+ &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
+ SET_FIELD(txq->tx_db.data.params,
+ ETH_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
+ DB_AGG_CMD_SET);
+ SET_FIELD(txq->tx_db.data.params,
+ ETH_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_ETH_TX_BD_PROD_CMD);
+
+ txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ }
+ }
+
+ /* Prepare and send the vport enable */
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = vport_id;
+ vport_update_params.update_vport_active_flg = 1;
+ vport_update_params.vport_active_flg = 1;
+
+ /* Fill struct with RSS params */
+ if (QEDE_RSS_CNT(edev) > 1) {
+ vport_update_params.update_rss_flg = 1;
+ for (i = 0; i < 128; i++)
+ rss_params->rss_ind_table[i] =
+ ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev));
+ netdev_rss_key_fill(rss_params->rss_key,
+ sizeof(rss_params->rss_key));
+ } else {
+ memset(rss_params, 0, sizeof(*rss_params));
+ }
+ memcpy(&vport_update_params.rss_params, rss_params,
+ sizeof(*rss_params));
+
+ rc = edev->ops->vport_update(cdev, &vport_update_params);
+ if (rc) {
+ DP_ERR(edev, "Update V-PORT failed %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qede_set_mcast_rx_mac(struct qede_dev *edev,
+ enum qed_filter_xcast_params_type opcode,
+ unsigned char *mac, int num_macs)
+{
+ struct qed_filter_params filter_cmd;
+ int i;
+
+ memset(&filter_cmd, 0, sizeof(filter_cmd));
+ filter_cmd.type = QED_FILTER_TYPE_MCAST;
+ filter_cmd.filter.mcast.type = opcode;
+ filter_cmd.filter.mcast.num = num_macs;
+
+ for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
+ ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
+
+ return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+enum qede_unload_mode {
+ QEDE_UNLOAD_NORMAL,
+};
+
+static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
+{
+ struct qed_link_params link_params;
+ int rc;
+
+ DP_INFO(edev, "Starting qede unload\n");
+
+ mutex_lock(&edev->qede_lock);
+ edev->state = QEDE_STATE_CLOSED;
+
+ /* Close OS Tx */
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+
+ /* Reset the link */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = false;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+ rc = qede_stop_queues(edev);
+ if (rc) {
+ qede_sync_free_irqs(edev);
+ goto out;
+ }
+
+ DP_INFO(edev, "Stopped Queues\n");
+
+ edev->ops->fastpath_stop(edev->cdev);
+
+ /* Release the interrupts */
+ qede_sync_free_irqs(edev);
+ edev->ops->common->set_fp_int(edev->cdev, 0);
+
+ qede_napi_disable_remove(edev);
+
+ qede_free_mem_load(edev);
+ qede_free_fp_array(edev);
+
+out:
+ mutex_unlock(&edev->qede_lock);
+ DP_INFO(edev, "Ending qede unload\n");
+}
+
+enum qede_load_mode {
+ QEDE_LOAD_NORMAL,
+};
+
+static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
+{
+ struct qed_link_params link_params;
+ struct qed_link_output link_output;
+ int rc;
+
+ DP_INFO(edev, "Starting qede load\n");
+
+ rc = qede_set_num_queues(edev);
+ if (rc)
+ goto err0;
+
+ rc = qede_alloc_fp_array(edev);
+ if (rc)
+ goto err0;
+
+ qede_init_fp(edev);
+
+ rc = qede_alloc_mem_load(edev);
+ if (rc)
+ goto err1;
+ DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
+ QEDE_RSS_CNT(edev), edev->num_tc);
+
+ rc = qede_set_real_num_queues(edev);
+ if (rc)
+ goto err2;
+
+ qede_napi_add_enable(edev);
+ DP_INFO(edev, "Napi added and enabled\n");
+
+ rc = qede_setup_irqs(edev);
+ if (rc)
+ goto err3;
+ DP_INFO(edev, "Setup IRQs succeeded\n");
+
+ rc = qede_start_queues(edev);
+ if (rc)
+ goto err4;
+ DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
+
+ /* Add primary mac and set Rx filters */
+ ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
+
+ mutex_lock(&edev->qede_lock);
+ edev->state = QEDE_STATE_OPEN;
+ mutex_unlock(&edev->qede_lock);
+
+ /* Ask for link-up using current configuration */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ /* Query whether link is already-up */
+ memset(&link_output, 0, sizeof(link_output));
+ edev->ops->common->get_link(edev->cdev, &link_output);
+ qede_link_update(edev, &link_output);
+
+ DP_INFO(edev, "Ending successfully qede load\n");
+
+ return 0;
+
+err4:
+ qede_sync_free_irqs(edev);
+ memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
+err3:
+ qede_napi_disable_remove(edev);
+err2:
+ qede_free_mem_load(edev);
+err1:
+ edev->ops->common->set_fp_int(edev->cdev, 0);
+ qede_free_fp_array(edev);
+ edev->num_rss = 0;
+err0:
+ return rc;
+}
+
+void qede_reload(struct qede_dev *edev,
+ void (*func)(struct qede_dev *, union qede_reload_args *),
+ union qede_reload_args *args)
+{
+ qede_unload(edev, QEDE_UNLOAD_NORMAL);
+ /* Call function handler to update parameters
+ * needed for function load.
+ */
+ if (func)
+ func(edev, args);
+
+ qede_load(edev, QEDE_LOAD_NORMAL);
+
+ mutex_lock(&edev->qede_lock);
+ qede_config_rx_mode(edev->ndev);
+ mutex_unlock(&edev->qede_lock);
+}
+
+/* called with rtnl_lock */
+static int qede_open(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ netif_carrier_off(ndev);
+
+ edev->ops->common->set_power_state(edev->cdev, PCI_D0);
+
+ return qede_load(edev, QEDE_LOAD_NORMAL);
+}
+
+static int qede_close(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ qede_unload(edev, QEDE_UNLOAD_NORMAL);
+
+ return 0;
+}
+
+static void qede_link_update(void *dev, struct qed_link_output *link)
+{
+ struct qede_dev *edev = dev;
+
+ if (!netif_running(edev->ndev)) {
+ DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
+ return;
+ }
+
+ if (link->link_up) {
+ DP_NOTICE(edev, "Link is up\n");
+ netif_tx_start_all_queues(edev->ndev);
+ netif_carrier_on(edev->ndev);
+ } else {
+ DP_NOTICE(edev, "Link is down\n");
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+ }
+}
+
+static int qede_set_mac_addr(struct net_device *ndev, void *p)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct sockaddr *addr = p;
+ int rc;
+
+ ASSERT_RTNL(); /* @@@TBD To be removed */
+
+ DP_INFO(edev, "Set_mac_addr called\n");
+
+ if (!is_valid_ether_addr(addr->sa_data)) {
+ DP_NOTICE(edev, "The MAC address is not valid\n");
+ return -EFAULT;
+ }
+
+ ether_addr_copy(ndev->dev_addr, addr->sa_data);
+
+ if (!netif_running(ndev)) {
+ DP_NOTICE(edev, "The device is currently down\n");
+ return 0;
+ }
+
+ /* Remove the previous primary mac */
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+ edev->primary_mac);
+ if (rc)
+ return rc;
+
+ /* Add MAC filter according to the new unicast HW MAC address */
+ ether_addr_copy(edev->primary_mac, ndev->dev_addr);
+ return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+ edev->primary_mac);
+}
+
+static int
+qede_configure_mcast_filtering(struct net_device *ndev,
+ enum qed_filter_rx_mode_type *accept_flags)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ unsigned char *mc_macs, *temp;
+ struct netdev_hw_addr *ha;
+ int rc = 0, mc_count;
+ size_t size;
+
+ size = 64 * ETH_ALEN;
+
+ mc_macs = kzalloc(size, GFP_KERNEL);
+ if (!mc_macs) {
+ DP_NOTICE(edev,
+ "Failed to allocate memory for multicast MACs\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ temp = mc_macs;
+
+ /* Remove all previously configured MAC filters */
+ rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+ mc_macs, 1);
+ if (rc)
+ goto exit;
+
+ netif_addr_lock_bh(ndev);
+
+ mc_count = netdev_mc_count(ndev);
+ if (mc_count < 64) {
+ netdev_for_each_mc_addr(ha, ndev) {
+ ether_addr_copy(temp, ha->addr);
+ temp += ETH_ALEN;
+ }
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Check for all multicast @@@TBD resource allocation */
+ if ((ndev->flags & IFF_ALLMULTI) ||
+ (mc_count > 64)) {
+ if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
+ *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+ } else {
+ /* Add all multicast MAC filters */
+ rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+ mc_macs, mc_count);
+ }
+
+exit:
+ kfree(mc_macs);
+ return rc;
+}
+
+static void qede_set_rx_mode(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ DP_INFO(edev, "qede_set_rx_mode called\n");
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ DP_INFO(edev,
+ "qede_set_rx_mode called while interface is down\n");
+ } else {
+ set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+ }
+}
+
+/* Must be called with qede_lock held */
+static void qede_config_rx_mode(struct net_device *ndev)
+{
+ enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST;
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct qed_filter_params rx_mode;
+ unsigned char *uc_macs, *temp;
+ struct netdev_hw_addr *ha;
+ int rc, uc_count;
+ size_t size;
+
+ netif_addr_lock_bh(ndev);
+
+ uc_count = netdev_uc_count(ndev);
+ size = uc_count * ETH_ALEN;
+
+ uc_macs = kzalloc(size, GFP_ATOMIC);
+ if (!uc_macs) {
+ DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
+ netif_addr_unlock_bh(ndev);
+ return;
+ }
+
+ temp = uc_macs;
+ netdev_for_each_uc_addr(ha, ndev) {
+ ether_addr_copy(temp, ha->addr);
+ temp += ETH_ALEN;
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Configure the struct for the Rx mode */
+ memset(&rx_mode, 0, sizeof(struct qed_filter_params));
+ rx_mode.type = QED_FILTER_TYPE_RX_MODE;
+
+ /* Remove all previous unicast secondary macs and multicast macs
+ * (configrue / leave the primary mac)
+ */
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
+ edev->primary_mac);
+ if (rc)
+ goto out;
+
+ /* Check for promiscuous */
+ if ((ndev->flags & IFF_PROMISC) ||
+ (uc_count > 15)) { /* @@@TBD resource allocation - 1 */
+ accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+ } else {
+ /* Add MAC filters according to the unicast secondary macs */
+ int i;
+
+ temp = uc_macs;
+ for (i = 0; i < uc_count; i++) {
+ rc = qede_set_ucast_rx_mac(edev,
+ QED_FILTER_XCAST_TYPE_ADD,
+ temp);
+ if (rc)
+ goto out;
+
+ temp += ETH_ALEN;
+ }
+
+ rc = qede_configure_mcast_filtering(ndev, &accept_flags);
+ if (rc)
+ goto out;
+ }
+
+ rx_mode.filter.accept_flags = accept_flags;
+ edev->ops->filter_config(edev->cdev, &rx_mode);
+out:
+ kfree(uc_macs);
+}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 4847713211ca..b09a6b80d107 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1736,8 +1736,6 @@ static void ql_get_drvinfo(struct net_device *ndev,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->regdump_len = 0;
- drvinfo->eedump_len = 0;
}
static u32 ql_get_msglevel(struct net_device *ndev)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 055f3763e577..46bbea8e023c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -24,9 +24,7 @@
#include <linux/mii.h>
#include <linux/timer.h>
#include <linux/irq.h>
-
#include <linux/vmalloc.h>
-
#include <linux/io.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
@@ -39,8 +37,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 62
-#define QLCNIC_LINUX_VERSIONID "5.3.62"
+#define _QLCNIC_LINUX_SUBVERSION 63
+#define QLCNIC_LINUX_VERSIONID "5.3.63"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -538,6 +536,7 @@ struct qlcnic_hardware_context {
u8 extend_lb_time;
u8 phys_port_id[ETH_ALEN];
u8 lb_mode;
+ u8 vxlan_port_count;
u16 vxlan_port;
struct device *hwmon_dev;
u32 post_mode;
@@ -926,6 +925,7 @@ struct qlcnic_mac_vlan_list {
#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5
#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9
+#define QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP BIT_13
#define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD BIT_0
#define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD BIT_1
@@ -1092,7 +1092,7 @@ struct qlcnic_filter_hash {
struct qlcnic_mailbox {
struct workqueue_struct *work_q;
struct qlcnic_adapter *adapter;
- struct qlcnic_mbx_ops *ops;
+ const struct qlcnic_mbx_ops *ops;
struct work_struct work;
struct completion completion;
struct list_head cmd_q;
@@ -2291,8 +2291,9 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
-#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830
#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
+#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE8C30 0x8C30
#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040
#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440
@@ -2319,7 +2320,8 @@ static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
(device == PCI_DEVICE_ID_QLOGIC_QLE8830) ||
(device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
(device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
- (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
return status;
}
@@ -2335,7 +2337,8 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
bool status;
status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
- (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
return status;
}
@@ -2351,7 +2354,8 @@ static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
{
unsigned short device = adapter->pdev->device;
- return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+ return ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
}
static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 840bf36b5e9d..37a731be7d39 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -5,14 +5,15 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
-#include "qlcnic.h"
-#include "qlcnic_sriov.h"
#include <linux/if_vlan.h>
#include <linux/ipv6.h>
#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/aer.h>
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+
static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
@@ -118,6 +119,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
{QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
{QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1},
+ {QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP, 4, 1},
};
const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -916,8 +918,6 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
mbx->req.arg = NULL;
return -ENOMEM;
}
- memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
- memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
temp = adapter->ahw->fw_hal_version << 29;
mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
mbx->cmd_op = type;
@@ -3513,6 +3513,31 @@ out:
qlcnic_free_mbx_args(&cmd);
}
+#define QLCNIC_83XX_ADD_PORT0 BIT_0
+#define QLCNIC_83XX_ADD_PORT1 BIT_1
+#define QLCNIC_83XX_EXTENDED_MEM_SIZE 13 /* In MB */
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = (QLCNIC_83XX_ADD_PORT0 | QLCNIC_83XX_ADD_PORT1);
+ cmd.req.arg[2] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+ cmd.req.arg[3] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "failed to issue extend iSCSI minidump capability\n");
+
+ return err;
+}
+
int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
{
u32 major, minor, sub;
@@ -4023,7 +4048,7 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
work);
struct qlcnic_adapter *adapter = mbx->adapter;
- struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
+ const struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
struct device *dev = &adapter->pdev->dev;
atomic_t *rsp_status = &mbx->rsp_status;
struct list_head *head = &mbx->cmd_q;
@@ -4073,7 +4098,7 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
}
}
-static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
+static const struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
.enqueue_cmd = qlcnic_83xx_enqueue_mbx_cmd,
.dequeue_cmd = qlcnic_83xx_dequeue_mbx_cmd,
.decode_resp = qlcnic_83xx_decode_mbx_rsp,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 69f828eb42cf..331ae2c20f40 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/etherdevice.h>
+
#include "qlcnic_hw.h"
#define QLCNIC_83XX_BAR0_LENGTH 0x4000
@@ -626,6 +627,7 @@ int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *);
int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 753ea8bad953..bf892160dd5f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1384,7 +1384,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
size_t size;
u64 addr;
- temp = kzalloc(fw->size, GFP_KERNEL);
+ temp = vzalloc(fw->size);
if (!temp) {
release_firmware(fw);
fw_info->fw = NULL;
@@ -1430,7 +1430,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
exit:
release_firmware(fw);
fw_info->fw = NULL;
- kfree(temp);
+ vfree(temp);
return ret;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 6e6f18fc5d76..a5f422f26cb4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -73,8 +73,6 @@ int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
mbx->req.arg = NULL;
return -ENOMEM;
}
- memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
- memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
mbx->req.arg[0] = type;
break;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 75ee9e4ced51..509b596cf1e8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -5,13 +5,13 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
-#include "qlcnic.h"
-#include "qlcnic_hdr.h"
-
#include <linux/slab.h>
#include <net/ip.h>
#include <linux/bitops.h>
+#include "qlcnic.h"
+#include "qlcnic_hdr.h"
+
#define MASK(n) ((1ULL<<(n))-1)
#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index cbe2399c30a0..4bb33af8e2b3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -109,6 +109,7 @@ enum qlcnic_regs {
#define QLCNIC_CMD_GET_LED_CONFIG 0x6A
#define QLCNIC_CMD_83XX_SET_DRV_VER 0x6F
#define QLCNIC_CMD_ADD_RCV_RINGS 0x0B
+#define QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP 0x37
#define QLCNIC_INTRPT_INTX 1
#define QLCNIC_INTRPT_MSIX 3
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 2f6cc423ab1d..d4481454b5f8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -7,11 +7,6 @@
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
-
-#include "qlcnic.h"
-#include "qlcnic_sriov.h"
-#include "qlcnic_hw.h"
-
#include <linux/swab.h>
#include <linux/dma-mapping.h>
#include <linux/if_vlan.h>
@@ -25,6 +20,10 @@
#include <net/vxlan.h>
#endif
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+#include "qlcnic_hw.h"
+
MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
@@ -111,8 +110,9 @@ static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
static const struct pci_device_id qlcnic_pci_tbl[] = {
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
- ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE8C30),
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
{0,}
@@ -483,11 +483,17 @@ static void qlcnic_add_vxlan_port(struct net_device *netdev,
/* Adapter supports only one VXLAN port. Use very first port
* for enabling offload
*/
- if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port)
+ if (!qlcnic_encap_rx_offload(adapter))
+ return;
+ if (!ahw->vxlan_port_count) {
+ ahw->vxlan_port_count = 1;
+ ahw->vxlan_port = ntohs(port);
+ adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
return;
+ }
+ if (ahw->vxlan_port == ntohs(port))
+ ahw->vxlan_port_count++;
- ahw->vxlan_port = ntohs(port);
- adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
}
static void qlcnic_del_vxlan_port(struct net_device *netdev,
@@ -496,11 +502,13 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
- if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port ||
+ if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count ||
(ahw->vxlan_port != ntohs(port)))
return;
- adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
+ ahw->vxlan_port_count--;
+ if (!ahw->vxlan_port_count)
+ adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
}
static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
@@ -1149,6 +1157,7 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
case PCI_DEVICE_ID_QLOGIC_QLE844X:
case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
*bar = QLCNIC_83XX_BAR0_LENGTH;
break;
default:
@@ -2403,7 +2412,6 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
qlcnic_free_tx_rings(adapter);
return -ENOMEM;
}
- memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
tx_ring->cmd_buf_arr = cmd_buf_arr;
spin_lock_init(&tx_ring->tx_clean_lock);
}
@@ -2492,6 +2500,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
qlcnic_83xx_register_map(ahw);
break;
case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
qlcnic_sriov_vf_register_map(ahw);
break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 332bb8a3f430..cda9e604a95f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -5,13 +5,13 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
+#include <net/ip.h>
+
#include "qlcnic.h"
#include "qlcnic_hdr.h"
#include "qlcnic_83xx_hw.h"
#include "qlcnic_hw.h"
-#include <net/ip.h>
-
#define QLC_83XX_MINIDUMP_FLASH 0x520000
#define QLC_83XX_OCM_INDEX 3
#define QLC_83XX_PCI_INDEX 0
@@ -1388,27 +1388,60 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
fw_dump->clr = 1;
snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
netdev_info(adapter->netdev,
- "Dump data %d bytes captured, template header size %d bytes\n",
- fw_dump->size, fw_dump->tmpl_hdr_size);
+ "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n",
+ fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size,
+ fw_dump->tmpl_hdr);
/* Send a udev event to notify availability of FW dump */
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
return 0;
}
+static inline bool
+qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter)
+{
+ /* For special adapters (with 0x8830 device ID), where iSCSI firmware
+ * dump needs to be captured as part of regular firmware dump
+ * collection process, firmware exports it's capability through
+ * capability registers
+ */
+ return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) &&
+ (adapter->ahw->extra_capability[0] &
+ QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP));
+}
+
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
{
u32 prev_version, current_version;
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
struct pci_dev *pdev = adapter->pdev;
+ bool extended = false;
prev_version = adapter->fw_version;
current_version = qlcnic_83xx_get_fw_version(adapter);
if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
vfree(fw_dump->tmpl_hdr);
+
+ if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
+ extended = !qlcnic_83xx_extend_md_capab(adapter);
+
if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
dev_info(&pdev->dev, "Supports FW dump capability\n");
+
+ /* Once we have minidump template with extended iSCSI dump
+ * capability, update the minidump capture mask to 0x1f as
+ * per FW requirement
+ */
+ if (extended) {
+ struct qlcnic_83xx_dump_template_hdr *hdr;
+
+ hdr = fw_dump->tmpl_hdr;
+ hdr->drv_cap_mask = 0x1f;
+ fw_dump->cap_mask = 0x1f;
+ dev_info(&pdev->dev,
+ "Extended iSCSI dump capability and updated capture mask to 0x1f\n");
+ }
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 4677b2edccca..017d8c2c8285 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -8,10 +8,11 @@
#ifndef _QLCNIC_83XX_SRIOV_H_
#define _QLCNIC_83XX_SRIOV_H_
-#include "qlcnic.h"
#include <linux/types.h>
#include <linux/pci.h>
+#include "qlcnic.h"
+
extern const u32 qlcnic_83xx_reg_tbl[];
extern const u32 qlcnic_83xx_ext_reg_tbl[];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index e6312465fe45..7327b729ba2e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -5,10 +5,11 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
+#include <linux/types.h>
+
#include "qlcnic_sriov.h"
#include "qlcnic.h"
#include "qlcnic_83xx_hw.h"
-#include <linux/types.h>
#define QLC_BC_COMMAND 0
#define QLC_BC_RESPONSE 1
@@ -728,8 +729,6 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
mbx->req.arg = NULL;
return -ENOMEM;
}
- memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
- memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
mbx->req.arg[0] = (type | (mbx->req.num << 16) |
(3 << 29));
mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index a29538b86edf..afd687e5e779 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -5,9 +5,10 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
+#include <linux/types.h>
+
#include "qlcnic_sriov.h"
#include "qlcnic.h"
-#include <linux/types.h>
#define QLCNIC_SRIOV_VF_MAX_MAC 7
#define QLC_VF_MIN_TX_RATE 100
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 05c28f2c6df7..ccbb04503b27 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -7,10 +7,6 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
-
-#include "qlcnic.h"
-#include "qlcnic_hw.h"
-
#include <linux/swab.h>
#include <linux/dma-mapping.h>
#include <net/ip.h>
@@ -24,6 +20,9 @@
#include <linux/hwmon-sysfs.h>
#endif
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
{
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index c3c514e332b5..5dade1fd08b8 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -415,13 +415,6 @@ static void ql_get_drvinfo(struct net_device *ndev,
(qdev->fw_rev_id & 0x000000ff));
strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = 0;
- drvinfo->testinfo_len = 0;
- if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
- drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
- else
- drvinfo->regdump_len = sizeof(struct ql_reg_dump);
- drvinfo->eedump_len = 0;
}
static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 2f87909f5186..ddb2c6c6ec94 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -974,7 +974,6 @@ MODULE_DEVICE_TABLE(spi, qca_spi_id);
static struct spi_driver qca_spi_driver = {
.driver = {
.name = QCASPI_DRV_NAME,
- .owner = THIS_MODULE,
.of_match_table = qca_spi_of_match,
},
.id_table = qca_spi_id,
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index d79e33b3c191..deae10d7426d 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -157,6 +157,7 @@ enum {
NWayAdvert = 0x66, /* MII ADVERTISE */
NWayLPAR = 0x68, /* MII LPA */
NWayExpansion = 0x6A, /* MII Expansion */
+ TxDmaOkLowDesc = 0x82, /* Low 16 bit address of a Tx descriptor. */
Config5 = 0xD8, /* Config5 */
TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
@@ -174,7 +175,7 @@ enum {
LastFrag = (1 << 28), /* Final segment of a packet */
LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
MSSShift = 16, /* MSS value position */
- MSSMask = 0xfff, /* MSS value: 11 bits */
+ MSSMask = 0x7ff, /* MSS value: 11 bits */
TxError = (1 << 23), /* Tx error summary */
RxError = (1 << 20), /* Rx error summary */
IPCS = (1 << 18), /* Calculate IP checksum */
@@ -341,6 +342,7 @@ struct cp_private {
unsigned tx_tail;
struct cp_desc *tx_ring;
struct sk_buff *tx_skb[CP_TX_RING_SIZE];
+ u32 tx_opts[CP_TX_RING_SIZE];
unsigned rx_buf_sz;
unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
@@ -665,7 +667,7 @@ static void cp_tx (struct cp_private *cp)
BUG_ON(!skb);
dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
- le32_to_cpu(txd->opts1) & 0xffff,
+ cp->tx_opts[tx_tail] & 0xffff,
PCI_DMA_TODEVICE);
if (status & LastFrag) {
@@ -733,7 +735,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
{
struct cp_private *cp = netdev_priv(dev);
unsigned entry;
- u32 eor, flags;
+ u32 eor, opts1;
unsigned long intr_flags;
__le32 opts2;
int mss = 0;
@@ -752,7 +754,28 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
mss = skb_shinfo(skb)->gso_size;
+ if (mss > MSSMask) {
+ WARN_ONCE(1, "Net bug: GSO size %d too large for 8139CP\n",
+ mss);
+ goto out_dma_error;
+ }
+
opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
+ opts1 = DescOwn;
+ if (mss)
+ opts1 |= LargeSend | (mss << MSSShift);
+ else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const struct iphdr *ip = ip_hdr(skb);
+ if (ip->protocol == IPPROTO_TCP)
+ opts1 |= IPCS | TCPCS;
+ else if (ip->protocol == IPPROTO_UDP)
+ opts1 |= IPCS | UDPCS;
+ else {
+ WARN_ONCE(1,
+ "Net bug: asked to checksum invalid Legacy IP packet\n");
+ goto out_dma_error;
+ }
+ }
if (skb_shinfo(skb)->nr_frags == 0) {
struct cp_desc *txd = &cp->tx_ring[entry];
@@ -768,31 +791,20 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
txd->addr = cpu_to_le64(mapping);
wmb();
- flags = eor | len | DescOwn | FirstFrag | LastFrag;
-
- if (mss)
- flags |= LargeSend | ((mss & MSSMask) << MSSShift);
- else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const struct iphdr *ip = ip_hdr(skb);
- if (ip->protocol == IPPROTO_TCP)
- flags |= IPCS | TCPCS;
- else if (ip->protocol == IPPROTO_UDP)
- flags |= IPCS | UDPCS;
- else
- WARN_ON(1); /* we need a WARN() */
- }
+ opts1 |= eor | len | FirstFrag | LastFrag;
- txd->opts1 = cpu_to_le32(flags);
+ txd->opts1 = cpu_to_le32(opts1);
wmb();
cp->tx_skb[entry] = skb;
- entry = NEXT_TX(entry);
+ cp->tx_opts[entry] = opts1;
+ netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
+ entry, skb->len);
} else {
struct cp_desc *txd;
- u32 first_len, first_eor;
+ u32 first_len, first_eor, ctrl;
dma_addr_t first_mapping;
int frag, first_entry = entry;
- const struct iphdr *ip = ip_hdr(skb);
/* We must give this initial chunk to the device last.
* Otherwise we could race with the device.
@@ -805,14 +817,14 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
goto out_dma_error;
cp->tx_skb[entry] = skb;
- entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len;
- u32 ctrl;
dma_addr_t mapping;
+ entry = NEXT_TX(entry);
+
len = skb_frag_size(this_frag);
mapping = dma_map_single(&cp->pdev->dev,
skb_frag_address(this_frag),
@@ -824,19 +836,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
- ctrl = eor | len | DescOwn;
-
- if (mss)
- ctrl |= LargeSend |
- ((mss & MSSMask) << MSSShift);
- else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (ip->protocol == IPPROTO_TCP)
- ctrl |= IPCS | TCPCS;
- else if (ip->protocol == IPPROTO_UDP)
- ctrl |= IPCS | UDPCS;
- else
- BUG();
- }
+ ctrl = opts1 | eor | len;
if (frag == skb_shinfo(skb)->nr_frags - 1)
ctrl |= LastFrag;
@@ -849,8 +849,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
txd->opts1 = cpu_to_le32(ctrl);
wmb();
+ cp->tx_opts[entry] = ctrl;
cp->tx_skb[entry] = skb;
- entry = NEXT_TX(entry);
}
txd = &cp->tx_ring[first_entry];
@@ -858,27 +858,17 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
txd->addr = cpu_to_le64(first_mapping);
wmb();
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (ip->protocol == IPPROTO_TCP)
- txd->opts1 = cpu_to_le32(first_eor | first_len |
- FirstFrag | DescOwn |
- IPCS | TCPCS);
- else if (ip->protocol == IPPROTO_UDP)
- txd->opts1 = cpu_to_le32(first_eor | first_len |
- FirstFrag | DescOwn |
- IPCS | UDPCS);
- else
- BUG();
- } else
- txd->opts1 = cpu_to_le32(first_eor | first_len |
- FirstFrag | DescOwn);
+ ctrl = opts1 | first_eor | first_len | FirstFrag;
+ txd->opts1 = cpu_to_le32(ctrl);
wmb();
+
+ cp->tx_opts[first_entry] = ctrl;
+ netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
+ first_entry, entry, skb->len);
}
- cp->tx_head = entry;
+ cp->tx_head = NEXT_TX(entry);
netdev_sent_queue(dev, skb->len);
- netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
- entry, skb->len);
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
@@ -1115,6 +1105,7 @@ static int cp_init_rings (struct cp_private *cp)
{
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
+ memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
cp_init_rings_index(cp);
@@ -1151,7 +1142,7 @@ static void cp_clean_rings (struct cp_private *cp)
desc = cp->rx_ring + i;
dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
- dev_kfree_skb(cp->rx_skb[i]);
+ dev_kfree_skb_any(cp->rx_skb[i]);
}
}
@@ -1164,7 +1155,7 @@ static void cp_clean_rings (struct cp_private *cp)
le32_to_cpu(desc->opts1) & 0xffff,
PCI_DMA_TODEVICE);
if (le32_to_cpu(desc->opts1) & LastFrag)
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
cp->dev->stats.tx_dropped++;
}
}
@@ -1172,6 +1163,7 @@ static void cp_clean_rings (struct cp_private *cp)
memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
+ memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
@@ -1249,7 +1241,7 @@ static void cp_tx_timeout(struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
- int rc;
+ int rc, i;
netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
cpr8(Cmd), cpr16(CpCmd),
@@ -1257,13 +1249,26 @@ static void cp_tx_timeout(struct net_device *dev)
spin_lock_irqsave(&cp->lock, flags);
+ netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n",
+ cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc));
+ for (i = 0; i < CP_TX_RING_SIZE; i++) {
+ netif_dbg(cp, tx_err, cp->dev,
+ "TX slot %d @%p: %08x (%08x) %08x %llx %p\n",
+ i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1),
+ cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2),
+ le64_to_cpu(cp->tx_ring[i].addr),
+ cp->tx_skb[i]);
+ }
+
cp_stop_hw(cp);
cp_clean_rings(cp);
rc = cp_init_rings(cp);
cp_start_hw(cp);
- cp_enable_irq(cp);
+ __cp_set_rx_mode(dev);
+ cpw16_f(IntrMask, cp_norx_intr_mask);
netif_wake_queue(dev);
+ napi_schedule_irqoff(&cp->napi);
spin_unlock_irqrestore(&cp->lock, flags);
}
@@ -1853,6 +1858,15 @@ static void cp_set_d3_state (struct cp_private *cp)
pci_set_power_state (cp->pdev, PCI_D3hot);
}
+static netdev_features_t cp_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb_shinfo(skb)->gso_size > MSSMask)
+ features &= ~NETIF_F_TSO;
+
+ return vlan_features_check(skb, features);
+}
static const struct net_device_ops cp_netdev_ops = {
.ndo_open = cp_open,
.ndo_stop = cp_close,
@@ -1865,6 +1879,7 @@ static const struct net_device_ops cp_netdev_ops = {
.ndo_tx_timeout = cp_tx_timeout,
.ndo_set_features = cp_set_features,
.ndo_change_mtu = cp_change_mtu,
+ .ndo_features_check = cp_features_check,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cp_poll_controller,
@@ -1984,12 +1999,12 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &cp_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
- /* disabled by default until verified */
dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 78bb4ceb1cdd..ef668d300800 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2388,7 +2388,6 @@ static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
- info->regdump_len = tp->regs_len;
}
static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3df51faf18ae..b4f21232019a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -637,6 +637,9 @@ enum rtl_register_content {
/* _TBICSRBit */
TBILinkOK = 0x02000000,
+ /* ResetCounterCommand */
+ CounterReset = 0x1,
+
/* DumpCounterCommand */
CounterDump = 0x8,
@@ -747,6 +750,13 @@ struct rtl8169_counters {
__le16 tx_underun;
};
+struct rtl8169_tc_offsets {
+ bool inited;
+ __le64 tx_errors;
+ __le32 tx_multi_collision;
+ __le16 tx_aborted;
+};
+
enum rtl_flag {
RTL_FLAG_TASK_ENABLED,
RTL_FLAG_TASK_SLOW_PENDING,
@@ -823,7 +833,9 @@ struct rtl8169_private {
unsigned features;
struct mii_if_info mii;
- struct rtl8169_counters counters;
+ dma_addr_t counters_phys_addr;
+ struct rtl8169_counters *counters;
+ struct rtl8169_tc_offsets tc_offset;
u32 saved_wolopts;
u32 opts1_mask;
@@ -2183,65 +2195,121 @@ DECLARE_RTL_COND(rtl_counters_cond)
{
void __iomem *ioaddr = tp->mmio_addr;
- return RTL_R32(CounterAddrLow) & CounterDump;
+ return RTL_R32(CounterAddrLow) & (CounterReset | CounterDump);
}
-static void rtl8169_update_counters(struct net_device *dev)
+static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
- struct device *d = &tp->pci_dev->dev;
- struct rtl8169_counters *counters;
- dma_addr_t paddr;
+ dma_addr_t paddr = tp->counters_phys_addr;
u32 cmd;
+ bool ret;
+
+ RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
+ cmd = (u64)paddr & DMA_BIT_MASK(32);
+ RTL_W32(CounterAddrLow, cmd);
+ RTL_W32(CounterAddrLow, cmd | counter_cmd);
+
+ ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
+
+ RTL_W32(CounterAddrLow, 0);
+ RTL_W32(CounterAddrHigh, 0);
+
+ return ret;
+}
+
+static bool rtl8169_reset_counters(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ /*
+ * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
+ * tally counters.
+ */
+ if (tp->mac_version < RTL_GIGA_MAC_VER_19)
+ return true;
+
+ return rtl8169_do_counters(dev, CounterReset);
+}
+
+static bool rtl8169_update_counters(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
/*
* Some chips are unable to dump tally counters when the receiver
* is disabled.
*/
if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
- return;
+ return true;
- counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
- if (!counters)
- return;
+ return rtl8169_do_counters(dev, CounterDump);
+}
- RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
- cmd = (u64)paddr & DMA_BIT_MASK(32);
- RTL_W32(CounterAddrLow, cmd);
- RTL_W32(CounterAddrLow, cmd | CounterDump);
+static bool rtl8169_init_counter_offsets(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_counters *counters = tp->counters;
+ bool ret = false;
+
+ /*
+ * rtl8169_init_counter_offsets is called from rtl_open. On chip
+ * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only
+ * reset by a power cycle, while the counter values collected by the
+ * driver are reset at every driver unload/load cycle.
+ *
+ * To make sure the HW values returned by @get_stats64 match the SW
+ * values, we collect the initial values at first open(*) and use them
+ * as offsets to normalize the values returned by @get_stats64.
+ *
+ * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one
+ * for the reason stated in rtl8169_update_counters; CmdRxEnb is only
+ * set at open time by rtl_hw_start.
+ */
- if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
- memcpy(&tp->counters, counters, sizeof(*counters));
+ if (tp->tc_offset.inited)
+ return true;
- RTL_W32(CounterAddrLow, 0);
- RTL_W32(CounterAddrHigh, 0);
+ /* If both, reset and update fail, propagate to caller. */
+ if (rtl8169_reset_counters(dev))
+ ret = true;
+
+ if (rtl8169_update_counters(dev))
+ ret = true;
- dma_free_coherent(d, sizeof(*counters), counters, paddr);
+ tp->tc_offset.tx_errors = counters->tx_errors;
+ tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
+ tp->tc_offset.tx_aborted = counters->tx_aborted;
+ tp->tc_offset.inited = true;
+
+ return ret;
}
static void rtl8169_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_counters *counters = tp->counters;
ASSERT_RTNL();
rtl8169_update_counters(dev);
- data[0] = le64_to_cpu(tp->counters.tx_packets);
- data[1] = le64_to_cpu(tp->counters.rx_packets);
- data[2] = le64_to_cpu(tp->counters.tx_errors);
- data[3] = le32_to_cpu(tp->counters.rx_errors);
- data[4] = le16_to_cpu(tp->counters.rx_missed);
- data[5] = le16_to_cpu(tp->counters.align_errors);
- data[6] = le32_to_cpu(tp->counters.tx_one_collision);
- data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
- data[8] = le64_to_cpu(tp->counters.rx_unicast);
- data[9] = le64_to_cpu(tp->counters.rx_broadcast);
- data[10] = le32_to_cpu(tp->counters.rx_multicast);
- data[11] = le16_to_cpu(tp->counters.tx_aborted);
- data[12] = le16_to_cpu(tp->counters.tx_underun);
+ data[0] = le64_to_cpu(counters->tx_packets);
+ data[1] = le64_to_cpu(counters->rx_packets);
+ data[2] = le64_to_cpu(counters->tx_errors);
+ data[3] = le32_to_cpu(counters->rx_errors);
+ data[4] = le16_to_cpu(counters->rx_missed);
+ data[5] = le16_to_cpu(counters->align_errors);
+ data[6] = le32_to_cpu(counters->tx_one_collision);
+ data[7] = le32_to_cpu(counters->tx_multi_collision);
+ data[8] = le64_to_cpu(counters->rx_unicast);
+ data[9] = le64_to_cpu(counters->rx_broadcast);
+ data[10] = le32_to_cpu(counters->rx_multicast);
+ data[11] = le16_to_cpu(counters->tx_aborted);
+ data[12] = le16_to_cpu(counters->tx_underun);
}
static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -4875,10 +4943,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_46:
case RTL_GIGA_MAC_VER_47:
case RTL_GIGA_MAC_VER_48:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ break;
case RTL_GIGA_MAC_VER_49:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
- RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
default:
RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
@@ -6011,7 +6081,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
- u16 rg_saw_cnt;
+ int rg_saw_cnt;
u32 data;
static const struct ephy_info e_info_8168h_1[] = {
{ 0x1e, 0x0800, 0x0001 },
@@ -7365,6 +7435,9 @@ process_pkt:
tp->rx_stats.packets++;
tp->rx_stats.bytes += pkt_size;
u64_stats_update_end(&tp->rx_stats.syncp);
+
+ if (skb->pkt_type == PACKET_MULTICAST)
+ dev->stats.multicast++;
}
release_descriptor:
desc->opts2 = 0;
@@ -7629,6 +7702,9 @@ static int rtl_open(struct net_device *dev)
rtl_hw_start(dev);
+ if (!rtl8169_init_counter_offsets(dev))
+ netif_warn(tp, hw, dev, "counter reset/update failed\n");
+
netif_start_queue(dev);
rtl_unlock_work(tp);
@@ -7661,6 +7737,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
+ struct rtl8169_counters *counters = tp->counters;
unsigned int start;
if (netif_running(dev))
@@ -7672,7 +7749,6 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_bytes = tp->rx_stats.bytes;
} while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
-
do {
start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
stats->tx_packets = tp->tx_stats.packets;
@@ -7686,6 +7762,24 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_crc_errors = dev->stats.rx_crc_errors;
stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
stats->rx_missed_errors = dev->stats.rx_missed_errors;
+ stats->multicast = dev->stats.multicast;
+
+ /*
+ * Fetch additonal counter values missing in stats collected by driver
+ * from tally counters.
+ */
+ rtl8169_update_counters(dev);
+
+ /*
+ * Subtract values fetched during initalization.
+ * See rtl8169_init_counter_offsets for a description why we do that.
+ */
+ stats->tx_errors = le64_to_cpu(counters->tx_errors) -
+ le64_to_cpu(tp->tc_offset.tx_errors);
+ stats->collisions = le32_to_cpu(counters->tx_multi_collision) -
+ le32_to_cpu(tp->tc_offset.tx_multi_collision);
+ stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
+ le16_to_cpu(tp->tc_offset.tx_aborted);
return stats;
}
@@ -7886,6 +7980,9 @@ static void rtl_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
+ dma_free_coherent(&tp->pci_dev->dev, sizeof(*tp->counters),
+ tp->counters, tp->counters_phys_addr);
+
rtl_release_firmware(tp);
if (pci_dev_run_wake(pdev))
@@ -8311,9 +8408,16 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
+ tp->counters = dma_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
+ &tp->counters_phys_addr, GFP_KERNEL);
+ if (!tp->counters) {
+ rc = -ENOMEM;
+ goto err_out_msi_4;
+ }
+
rc = register_netdev(dev);
if (rc < 0)
- goto err_out_msi_4;
+ goto err_out_cnt_5;
pci_set_drvdata(pdev, dev);
@@ -8347,6 +8451,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
out:
return rc;
+err_out_cnt_5:
+ dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters,
+ tp->counters_phys_addr);
err_out_msi_4:
netif_napi_del(&tp->napi);
rtl_disable_msi(pdev, tp);
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 8aa50ac4e2d6..0623fff932e4 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -658,6 +658,8 @@ struct ravb_desc {
__le32 dptr; /* Descriptor pointer */
};
+#define DPTR_ALIGN 4 /* Required descriptor pointer alignment */
+
enum DIE_DT {
/* Frame data */
DT_FMID = 0x40,
@@ -739,6 +741,7 @@ enum RAVB_QUEUE {
#define RX_QUEUE_OFFSET 4
#define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2
+#define NUM_TX_DESC 2 /* TX descriptors per packet */
struct ravb_tstamp_skb {
struct list_head list;
@@ -763,6 +766,11 @@ struct ravb_ptp {
struct ravb_ptp_perout perout[N_PER_OUT];
};
+enum ravb_chip_id {
+ RCAR_GEN2,
+ RCAR_GEN3,
+};
+
struct ravb_private {
struct net_device *ndev;
struct platform_device *pdev;
@@ -777,9 +785,9 @@ struct ravb_private {
dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
+ void *tx_align[NUM_TX_QUEUE];
struct sk_buff **rx_skb[NUM_RX_QUEUE];
struct sk_buff **tx_skb[NUM_TX_QUEUE];
- void **tx_buffers[NUM_TX_QUEUE];
u32 rx_over_errors;
u32 rx_fifo_errors;
struct net_device_stats stats[NUM_RX_QUEUE];
@@ -803,6 +811,8 @@ struct ravb_private {
int msg_enable;
int speed;
int duplex;
+ int emac_irq;
+ enum ravb_chip_id chip_id;
unsigned no_avb_link:1;
unsigned avb_link_active_low:1;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 78849dd4ef8e..aa7b2083cb53 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -195,25 +195,21 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_skb[q] = NULL;
/* Free aligned TX buffers */
- if (priv->tx_buffers[q]) {
- for (i = 0; i < priv->num_tx_ring[q]; i++)
- kfree(priv->tx_buffers[q][i]);
- }
- kfree(priv->tx_buffers[q]);
- priv->tx_buffers[q] = NULL;
+ kfree(priv->tx_align[q]);
+ priv->tx_align[q] = NULL;
if (priv->rx_ring[q]) {
ring_size = sizeof(struct ravb_ex_rx_desc) *
(priv->num_rx_ring[q] + 1);
- dma_free_coherent(NULL, ring_size, priv->rx_ring[q],
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
priv->rx_desc_dma[q]);
priv->rx_ring[q] = NULL;
}
if (priv->tx_ring[q]) {
ring_size = sizeof(struct ravb_tx_desc) *
- (priv->num_tx_ring[q] + 1);
- dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
+ (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
priv->tx_desc_dma[q]);
priv->tx_ring[q] = NULL;
}
@@ -223,11 +219,12 @@ static void ravb_ring_free(struct net_device *ndev, int q)
static void ravb_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct ravb_ex_rx_desc *rx_desc = NULL;
- struct ravb_tx_desc *tx_desc = NULL;
- struct ravb_desc *desc = NULL;
+ struct ravb_ex_rx_desc *rx_desc;
+ struct ravb_tx_desc *tx_desc;
+ struct ravb_desc *desc;
int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
- int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
+ int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
+ NUM_TX_DESC;
dma_addr_t dma_addr;
int i;
@@ -243,13 +240,13 @@ static void ravb_ring_format(struct net_device *ndev, int q)
rx_desc = &priv->rx_ring[q][i];
/* The size of the buffer should be on 16-byte boundary. */
rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
- dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
+ dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
ALIGN(PKT_BUF_SZ, 16),
DMA_FROM_DEVICE);
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
*/
- if (dma_mapping_error(&ndev->dev, dma_addr))
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
rx_desc->ds_cc = cpu_to_le16(0);
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
@@ -260,11 +257,12 @@ static void ravb_ring_format(struct net_device *ndev, int q)
memset(priv->tx_ring[q], 0, tx_ring_size);
/* Build TX ring buffer */
- for (i = 0; i < priv->num_tx_ring[q]; i++) {
- tx_desc = &priv->tx_ring[q][i];
+ for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
+ i++, tx_desc++) {
+ tx_desc->die_dt = DT_EEMPTY;
+ tx_desc++;
tx_desc->die_dt = DT_EEMPTY;
}
- tx_desc = &priv->tx_ring[q][i];
tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
tx_desc->die_dt = DT_LINKFIX; /* type */
@@ -285,7 +283,6 @@ static int ravb_ring_init(struct net_device *ndev, int q)
struct ravb_private *priv = netdev_priv(ndev);
struct sk_buff *skb;
int ring_size;
- void *buffer;
int i;
/* Allocate RX and TX skb rings */
@@ -305,22 +302,14 @@ static int ravb_ring_init(struct net_device *ndev, int q)
}
/* Allocate rings for the aligned buffers */
- priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
- sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
- if (!priv->tx_buffers[q])
+ priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
+ DPTR_ALIGN - 1, GFP_KERNEL);
+ if (!priv->tx_align[q])
goto error;
- for (i = 0; i < priv->num_tx_ring[q]; i++) {
- buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
- if (!buffer)
- goto error;
- /* Aligned TX buffer */
- priv->tx_buffers[q][i] = buffer;
- }
-
/* Allocate all RX descriptors. */
ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
- priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
+ priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->rx_desc_dma[q],
GFP_KERNEL);
if (!priv->rx_ring[q])
@@ -329,8 +318,9 @@ static int ravb_ring_init(struct net_device *ndev, int q)
priv->dirty_rx[q] = 0;
/* Allocate all TX descriptors. */
- ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] + 1);
- priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
+ ring_size = sizeof(struct ravb_tx_desc) *
+ (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
+ priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->tx_desc_dma[q],
GFP_KERNEL);
if (!priv->tx_ring[q])
@@ -439,11 +429,12 @@ static int ravb_tx_free(struct net_device *ndev, int q)
struct net_device_stats *stats = &priv->stats[q];
struct ravb_tx_desc *desc;
int free_num = 0;
- int entry = 0;
+ int entry;
u32 size;
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
- entry = priv->dirty_tx[q] % priv->num_tx_ring[q];
+ entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+ NUM_TX_DESC);
desc = &priv->tx_ring[q][entry];
if (desc->die_dt != DT_FEMPTY)
break;
@@ -451,14 +442,18 @@ static int ravb_tx_free(struct net_device *ndev, int q)
dma_rmb();
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
/* Free the original skb. */
- if (priv->tx_skb[q][entry]) {
- dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+ if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
size, DMA_TO_DEVICE);
- dev_kfree_skb_any(priv->tx_skb[q][entry]);
- priv->tx_skb[q][entry] = NULL;
+ /* Last packet descriptor? */
+ if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+ entry /= NUM_TX_DESC;
+ dev_kfree_skb_any(priv->tx_skb[q][entry]);
+ priv->tx_skb[q][entry] = NULL;
+ stats->tx_packets++;
+ }
free_num++;
}
- stats->tx_packets++;
stats->tx_bytes += size;
desc->die_dt = DT_EEMPTY;
}
@@ -512,8 +507,8 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
struct sk_buff *skb;
dma_addr_t dma_addr;
struct timespec64 ts;
- u16 pkt_len = 0;
u8 desc_status;
+ u16 pkt_len;
int limit;
boguscnt = min(boguscnt, *quota);
@@ -551,7 +546,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
- dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
ALIGN(PKT_BUF_SZ, 16),
DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
@@ -591,14 +586,14 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
if (!skb)
break; /* Better luck next round. */
ravb_set_buffer_align(skb);
- dma_addr = dma_map_single(&ndev->dev, skb->data,
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data,
le16_to_cpu(desc->ds_cc),
DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
/* We just set the data size to 0 for a failed mapping
* which should prevent DMA from happening...
*/
- if (dma_mapping_error(&ndev->dev, dma_addr))
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
desc->ds_cc = cpu_to_le16(0);
desc->dptr = cpu_to_le32(dma_addr);
priv->rx_skb[q][entry] = skb;
@@ -894,6 +889,22 @@ static int ravb_phy_init(struct net_device *ndev)
return -ENOENT;
}
+ /* This driver only support 10/100Mbit speeds on Gen3
+ * at this time.
+ */
+ if (priv->chip_id == RCAR_GEN3) {
+ int err;
+
+ err = phy_set_max_speed(phydev, SPEED_100);
+ if (err) {
+ netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
+ phy_disconnect(phydev);
+ return err;
+ }
+
+ netdev_info(ndev, "limited PHY to 100Mbit/s\n");
+ }
+
netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
phydev->addr, phydev->irq, phydev->drv->name);
@@ -1202,6 +1213,15 @@ static int ravb_open(struct net_device *ndev)
goto out_napi_off;
}
+ if (priv->chip_id == RCAR_GEN3) {
+ error = request_irq(priv->emac_irq, ravb_interrupt,
+ IRQF_SHARED, ndev->name, ndev);
+ if (error) {
+ netdev_err(ndev, "cannot request IRQ\n");
+ goto out_free_irq;
+ }
+ }
+
/* Device init */
error = ravb_dmac_init(ndev);
if (error)
@@ -1225,6 +1245,7 @@ out_ptp_stop:
ravb_ptp_stop(ndev);
out_free_irq:
free_irq(ndev->irq, ndev);
+ free_irq(priv->emac_irq, ndev);
out_napi_off:
napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
@@ -1277,44 +1298,60 @@ static void ravb_tx_timeout_work(struct work_struct *work)
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct ravb_tstamp_skb *ts_skb = NULL;
u16 q = skb_get_queue_mapping(skb);
+ struct ravb_tstamp_skb *ts_skb;
struct ravb_tx_desc *desc;
unsigned long flags;
u32 dma_addr;
void *buffer;
u32 entry;
+ u32 len;
spin_lock_irqsave(&priv->lock, flags);
- if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
+ if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
+ NUM_TX_DESC) {
netif_err(priv, tx_queued, ndev,
"still transmitting with the full ring!\n");
netif_stop_subqueue(ndev, q);
spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_BUSY;
}
- entry = priv->cur_tx[q] % priv->num_tx_ring[q];
- priv->tx_skb[q][entry] = skb;
+ entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
+ priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
if (skb_put_padto(skb, ETH_ZLEN))
goto drop;
- buffer = PTR_ALIGN(priv->tx_buffers[q][entry], RAVB_ALIGN);
- memcpy(buffer, skb->data, skb->len);
- desc = &priv->tx_ring[q][entry];
- desc->ds_tagl = cpu_to_le16(skb->len);
- dma_addr = dma_map_single(&ndev->dev, buffer, skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, dma_addr))
+ buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
+ entry / NUM_TX_DESC * DPTR_ALIGN;
+ len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
+ memcpy(buffer, skb->data, len);
+ dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
goto drop;
+
+ desc = &priv->tx_ring[q][entry];
+ desc->ds_tagl = cpu_to_le16(len);
+ desc->dptr = cpu_to_le32(dma_addr);
+
+ buffer = skb->data + len;
+ len = skb->len - len;
+ dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ goto unmap;
+
+ desc++;
+ desc->ds_tagl = cpu_to_le16(len);
desc->dptr = cpu_to_le32(dma_addr);
/* TX timestamp required */
if (q == RAVB_NC) {
ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
if (!ts_skb) {
- dma_unmap_single(&ndev->dev, dma_addr, skb->len,
+ desc--;
+ dma_unmap_single(ndev->dev.parent, dma_addr, len,
DMA_TO_DEVICE);
- goto drop;
+ goto unmap;
}
ts_skb->skb = skb;
ts_skb->tag = priv->ts_skb_tag++;
@@ -1330,13 +1367,15 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Descriptor type must be set after all the above writes */
dma_wmb();
- desc->die_dt = DT_FSINGLE;
+ desc->die_dt = DT_FEND;
+ desc--;
+ desc->die_dt = DT_FSTART;
ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
- priv->cur_tx[q]++;
- if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
- !ravb_tx_free(ndev, q))
+ priv->cur_tx[q] += NUM_TX_DESC;
+ if (priv->cur_tx[q] - priv->dirty_tx[q] >
+ (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
netif_stop_subqueue(ndev, q);
exit:
@@ -1344,9 +1383,12 @@ exit:
spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_OK;
+unmap:
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+ le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
drop:
dev_kfree_skb_any(skb);
- priv->tx_skb[q][entry] = NULL;
+ priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
goto exit;
}
@@ -1609,10 +1651,20 @@ static int ravb_mdio_release(struct ravb_private *priv)
return 0;
}
+static const struct of_device_id ravb_match_table[] = {
+ { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
+ { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
+ { .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ravb_match_table);
+
static int ravb_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
struct ravb_private *priv;
+ enum ravb_chip_id chip_id;
struct net_device *ndev;
int error, irq, q;
struct resource *res;
@@ -1641,9 +1693,16 @@ static int ravb_probe(struct platform_device *pdev)
/* The Ether-specific entries in the device structure. */
ndev->base_addr = res->start;
ndev->dma = -1;
- irq = platform_get_irq(pdev, 0);
+
+ match = of_match_device(of_match_ptr(ravb_match_table), &pdev->dev);
+ chip_id = (enum ravb_chip_id)match->data;
+
+ if (chip_id == RCAR_GEN3)
+ irq = platform_get_irq_byname(pdev, "ch22");
+ else
+ irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- error = -ENODEV;
+ error = irq;
goto out_release;
}
ndev->irq = irq;
@@ -1672,6 +1731,17 @@ static int ravb_probe(struct platform_device *pdev)
priv->avb_link_active_low =
of_property_read_bool(np, "renesas,ether-link-active-low");
+ if (chip_id == RCAR_GEN3) {
+ irq = platform_get_irq_byname(pdev, "ch24");
+ if (irq < 0) {
+ error = irq;
+ goto out_release;
+ }
+ priv->emac_irq = irq;
+ }
+
+ priv->chip_id = chip_id;
+
/* Set function */
ndev->netdev_ops = &ravb_netdev_ops;
ndev->ethtool_ops = &ravb_ethtool_ops;
@@ -1692,10 +1762,10 @@ static int ravb_probe(struct platform_device *pdev)
/* Allocate descriptor base address table */
priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
- priv->desc_bat = dma_alloc_coherent(NULL, priv->desc_bat_size,
+ priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
&priv->desc_bat_dma, GFP_KERNEL);
if (!priv->desc_bat) {
- dev_err(&ndev->dev,
+ dev_err(&pdev->dev,
"Cannot allocate desc base address table (size %d bytes)\n",
priv->desc_bat_size);
error = -ENOMEM;
@@ -1722,7 +1792,7 @@ static int ravb_probe(struct platform_device *pdev)
/* MDIO bus init */
error = ravb_mdio_init(priv);
if (error) {
- dev_err(&ndev->dev, "failed to initialize MDIO\n");
+ dev_err(&pdev->dev, "failed to initialize MDIO\n");
goto out_dma_free;
}
@@ -1747,7 +1817,7 @@ out_napi_del:
netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
out_dma_free:
- dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
+ dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
out_release:
if (ndev)
@@ -1763,7 +1833,7 @@ static int ravb_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct ravb_private *priv = netdev_priv(ndev);
- dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
+ dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
/* Set reset mode */
ravb_write(ndev, CCC_OPC_RESET, CCC);
@@ -1802,13 +1872,6 @@ static const struct dev_pm_ops ravb_dev_pm_ops = {
#define RAVB_PM_OPS NULL
#endif
-static const struct of_device_id ravb_match_table[] = {
- { .compatible = "renesas,etheravb-r8a7790" },
- { .compatible = "renesas,etheravb-r8a7794" },
- { }
-};
-MODULE_DEVICE_TABLE(of, ravb_match_table);
-
static struct platform_driver ravb_driver = {
.probe = ravb_probe,
.remove = ravb_remove,
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 7fb244f565b2..6150a235b72c 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1127,7 +1127,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
struct sh_eth_txdesc *txdesc = NULL;
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
- int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+ int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
dma_addr_t dma_addr;
mdp->cur_rx = 0;
@@ -1148,8 +1148,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
/* RX descriptor */
rxdesc = &mdp->rx_ring[i];
- /* The size of the buffer is a multiple of 16 bytes. */
- rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
+ /* The size of the buffer is a multiple of 32 bytes. */
+ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
dma_addr = dma_map_single(&ndev->dev, skb->data,
rxdesc->buffer_length,
DMA_FROM_DEVICE);
@@ -1173,7 +1173,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
/* Mark the last entry as wrapping the ring. */
- rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
+ rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE);
memset(mdp->tx_ring, 0, tx_ringsize);
@@ -1212,15 +1212,15 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->rx_buf_sz += NET_IP_ALIGN;
/* Allocate RX and TX skb rings */
- mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
- sizeof(*mdp->rx_skbuff), GFP_KERNEL);
+ mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
+ GFP_KERNEL);
if (!mdp->rx_skbuff) {
ret = -ENOMEM;
return ret;
}
- mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
- sizeof(*mdp->tx_skbuff), GFP_KERNEL);
+ mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
+ GFP_KERNEL);
if (!mdp->tx_skbuff) {
ret = -ENOMEM;
goto skb_ring_free;
@@ -1232,7 +1232,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
GFP_KERNEL);
if (!mdp->rx_ring) {
ret = -ENOMEM;
- goto desc_ring_free;
+ goto skb_ring_free;
}
mdp->dirty_rx = 0;
@@ -1416,7 +1416,7 @@ static int sh_eth_txfree(struct net_device *ndev)
if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
break;
/* TACT bit must be checked before all the following reads */
- rmb();
+ dma_rmb();
netif_info(mdp, tx_done, ndev,
"tx entry %d status 0x%08x\n",
entry, edmac_to_cpu(mdp, txdesc->status));
@@ -1450,7 +1450,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
struct sk_buff *skb;
u16 pkt_len = 0;
u32 desc_status;
- int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+ int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
dma_addr_t dma_addr;
boguscnt = min(boguscnt, *quota);
@@ -1458,7 +1458,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
rxdesc = &mdp->rx_ring[entry];
while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
/* RACT bit must be checked before all the following reads */
- rmb();
+ dma_rmb();
desc_status = edmac_to_cpu(mdp, rxdesc->status);
pkt_len = rxdesc->frame_length;
@@ -1506,7 +1506,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
dma_unmap_single(&ndev->dev, rxdesc->addr,
- ALIGN(mdp->rx_buf_sz, 16),
+ ALIGN(mdp->rx_buf_sz, 32),
DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
@@ -1524,8 +1524,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
entry = mdp->dirty_rx % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry];
- /* The size of the buffer is 16 byte boundary. */
- rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
+ /* The size of the buffer is 32 byte boundary. */
+ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
if (mdp->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb(ndev, skbuff_size);
@@ -1544,10 +1544,10 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
skb_checksum_none_assert(skb);
rxdesc->addr = dma_addr;
}
- wmb(); /* RACT bit must be set after all the above writes */
+ dma_wmb(); /* RACT bit must be set after all the above writes */
if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
- cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
+ cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDLE);
else
rxdesc->status |=
cpu_to_edmac(mdp, RD_RACT | RD_RFP);
@@ -2403,7 +2403,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
txdesc->buffer_length = skb->len;
- wmb(); /* TACT bit must be set after all the above writes */
+ dma_wmb(); /* TACT bit must be set after all the above writes */
if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
else
@@ -3089,10 +3089,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ndev->dma = -1;
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- ret = -ENODEV;
+ if (ret < 0)
goto out_release;
- }
ndev->irq = ret;
SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 06dbbe5201cb..50382b1c9ddc 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -285,7 +285,7 @@ enum DMAC_IM_BIT {
/* Receive descriptor bit */
enum RD_STS_BIT {
- RD_RACT = 0x80000000, RD_RDEL = 0x40000000,
+ RD_RACT = 0x80000000, RD_RDLE = 0x40000000,
RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000,
RD_RFE = 0x08000000, RD_RFS10 = 0x00000200,
RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080,
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 2d8578cade03..e9f2349e98bc 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -36,7 +36,7 @@
#include <net/ip_fib.h>
#include <net/netevent.h>
#include <net/arp.h>
-#include <asm-generic/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <generated/utsrelease.h>
#include "rocker.h"
@@ -152,8 +152,9 @@ struct rocker_fdb_tbl_entry {
struct hlist_node entry;
u32 key_crc32; /* key */
bool learned;
+ unsigned long touched;
struct rocker_fdb_tbl_key {
- u32 pport;
+ struct rocker_port *rocker_port;
u8 addr[ETH_ALEN];
__be16 vlan_id;
} key;
@@ -202,6 +203,7 @@ enum {
ROCKER_CTRL_IPV4_MCAST,
ROCKER_CTRL_IPV6_MCAST,
ROCKER_CTRL_DFLT_BRIDGING,
+ ROCKER_CTRL_DFLT_OVS,
ROCKER_CTRL_MAX,
};
@@ -219,13 +221,13 @@ struct rocker_port {
__be16 internal_vlan_id;
int stp_state;
u32 brport_flags;
+ unsigned long ageing_time;
bool ctrls[ROCKER_CTRL_MAX];
unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
struct napi_struct napi_tx;
struct napi_struct napi_rx;
struct rocker_dma_ring_info tx_ring;
struct rocker_dma_ring_info rx_ring;
- struct list_head trans_mem;
};
struct rocker {
@@ -245,6 +247,7 @@ struct rocker {
u64 flow_tbl_next_cookie;
DECLARE_HASHTABLE(group_tbl, 16);
spinlock_t group_tbl_lock; /* for group tbl accesses */
+ struct timer_list fdb_cleanup_timer;
DECLARE_HASHTABLE(fdb_tbl, 16);
spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
@@ -323,7 +326,14 @@ static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
{
- return !!rocker_port->bridge_dev;
+ return rocker_port->bridge_dev &&
+ netif_is_bridge_master(rocker_port->bridge_dev);
+}
+
+static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
+{
+ return rocker_port->bridge_dev &&
+ netif_is_ovs_master(rocker_port->bridge_dev);
}
#define ROCKER_OP_FLAG_REMOVE BIT(0)
@@ -332,74 +342,63 @@ static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
#define ROCKER_OP_FLAG_REFRESH BIT(3)
static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
size_t size)
{
- struct list_head *elem = NULL;
+ struct switchdev_trans_item *elem = NULL;
gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
GFP_ATOMIC : GFP_KERNEL;
/* If in transaction prepare phase, allocate the memory
- * and enqueue it on a per-port list. If in transaction
- * commit phase, dequeue the memory from the per-port list
+ * and enqueue it on a transaction. If in transaction
+ * commit phase, dequeue the memory from the transaction
* rather than re-allocating the memory. The idea is the
* driver code paths for prepare and commit are identical
* so the memory allocated in the prepare phase is the
* memory used in the commit phase.
*/
- switch (trans) {
- case SWITCHDEV_TRANS_PREPARE:
+ if (!trans) {
+ elem = kzalloc(size + sizeof(*elem), gfp_flags);
+ } else if (switchdev_trans_ph_prepare(trans)) {
elem = kzalloc(size + sizeof(*elem), gfp_flags);
if (!elem)
return NULL;
- list_add_tail(elem, &rocker_port->trans_mem);
- break;
- case SWITCHDEV_TRANS_COMMIT:
- BUG_ON(list_empty(&rocker_port->trans_mem));
- elem = rocker_port->trans_mem.next;
- list_del_init(elem);
- break;
- case SWITCHDEV_TRANS_NONE:
- elem = kzalloc(size + sizeof(*elem), gfp_flags);
- if (elem)
- INIT_LIST_HEAD(elem);
- break;
- default:
- break;
+ switchdev_trans_item_enqueue(trans, elem, kfree, elem);
+ } else {
+ elem = switchdev_trans_item_dequeue(trans);
}
return elem ? elem + 1 : NULL;
}
static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
size_t size)
{
return __rocker_port_mem_alloc(rocker_port, trans, flags, size);
}
static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
size_t n, size_t size)
{
return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size);
}
-static void rocker_port_kfree(enum switchdev_trans trans, const void *mem)
+static void rocker_port_kfree(struct switchdev_trans *trans, const void *mem)
{
- struct list_head *elem;
+ struct switchdev_trans_item *elem;
/* Frees are ignored if in transaction prepare phase. The
* memory remains on the per-port list until freed in the
* commit phase.
*/
- if (trans == SWITCHDEV_TRANS_PREPARE)
+ if (switchdev_trans_ph_prepare(trans))
return;
- elem = (struct list_head *)mem - 1;
- BUG_ON(!list_empty(elem));
+ elem = (struct switchdev_trans_item *) mem - 1;
kfree(elem);
}
@@ -422,7 +421,7 @@ static void rocker_wait_init(struct rocker_wait *wait)
}
static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
int flags)
{
struct rocker_wait *wait;
@@ -434,7 +433,7 @@ static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
return wait;
}
-static void rocker_wait_destroy(enum switchdev_trans trans,
+static void rocker_wait_destroy(struct switchdev_trans *trans,
struct rocker_wait *wait)
{
rocker_port_kfree(trans, wait);
@@ -1400,7 +1399,7 @@ static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
wait = rocker_desc_cookie_ptr_get(desc_info);
if (wait->nowait) {
rocker_desc_gen_clear(desc_info);
- rocker_wait_destroy(SWITCHDEV_TRANS_NONE, wait);
+ rocker_wait_destroy(NULL, wait);
} else {
rocker_wait_wake_up(wait);
}
@@ -1455,7 +1454,7 @@ static int rocker_event_link_change(const struct rocker *rocker,
}
static int rocker_port_fdb(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
const unsigned char *addr,
__be16 vlan_id, int flags);
@@ -1488,8 +1487,7 @@ static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
rocker_port->stp_state != BR_STATE_FORWARDING)
return 0;
- return rocker_port_fdb(rocker_port, SWITCHDEV_TRANS_NONE,
- addr, vlan_id, flags);
+ return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
}
static int rocker_event_process(const struct rocker *rocker,
@@ -1574,7 +1572,7 @@ typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
void *priv);
static int rocker_cmd_exec(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
rocker_cmd_prep_cb_t prepare, void *prepare_priv,
rocker_cmd_proc_cb_t process, void *process_priv)
{
@@ -1607,7 +1605,7 @@ static int rocker_cmd_exec(struct rocker_port *rocker_port,
rocker_desc_cookie_ptr_set(desc_info, wait);
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
@@ -1615,7 +1613,7 @@ static int rocker_cmd_exec(struct rocker_port *rocker_port,
if (nowait)
return 0;
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
if (!rocker_wait_event_timeout(wait, HZ / 10))
return -EIO;
@@ -1818,6 +1816,30 @@ rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
}
static int
+rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ int mtu = *(int *)priv;
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+ rocker_port->pport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
+ mtu))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+ return 0;
+}
+
+static int
rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
@@ -1843,7 +1865,7 @@ rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
struct ethtool_cmd *ecmd)
{
- return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+ return rocker_cmd_exec(rocker_port, NULL, 0,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_ethtool_proc,
ecmd);
@@ -1852,7 +1874,7 @@ static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
unsigned char *macaddr)
{
- return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+ return rocker_cmd_exec(rocker_port, NULL, 0,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_macaddr_proc,
macaddr);
@@ -1861,7 +1883,7 @@ static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
struct ethtool_cmd *ecmd)
{
- return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+ return rocker_cmd_exec(rocker_port, NULL, 0,
rocker_cmd_set_port_settings_ethtool_prep,
ecmd, NULL, NULL);
}
@@ -1869,13 +1891,21 @@ static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
unsigned char *macaddr)
{
- return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+ return rocker_cmd_exec(rocker_port, NULL, 0,
rocker_cmd_set_port_settings_macaddr_prep,
macaddr, NULL, NULL);
}
+static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
+ int mtu)
+{
+ return rocker_cmd_exec(rocker_port, NULL, 0,
+ rocker_cmd_set_port_settings_mtu_prep,
+ &mtu, NULL, NULL);
+}
+
static int rocker_port_set_learning(struct rocker_port *rocker_port,
- enum switchdev_trans trans)
+ struct switchdev_trans *trans)
{
return rocker_cmd_exec(rocker_port, trans, 0,
rocker_cmd_set_port_learning_prep,
@@ -2393,7 +2423,7 @@ rocker_flow_tbl_find(const struct rocker *rocker,
}
static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
struct rocker_flow_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
@@ -2409,7 +2439,7 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
if (found) {
match->cookie = found->cookie;
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
hash_del(&found->entry);
rocker_port_kfree(trans, found);
found = match;
@@ -2420,7 +2450,7 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
}
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
@@ -2430,7 +2460,7 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
struct rocker_flow_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
@@ -2446,7 +2476,7 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
found = rocker_flow_tbl_find(rocker, match);
if (found) {
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
hash_del(&found->entry);
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
}
@@ -2466,7 +2496,7 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
struct rocker_flow_tbl_entry *entry)
{
if (flags & ROCKER_OP_FLAG_REMOVE)
@@ -2476,7 +2506,7 @@ static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
u32 in_pport, u32 in_pport_mask,
enum rocker_of_dpa_table_id goto_tbl)
{
@@ -2496,7 +2526,7 @@ static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
u32 in_pport, __be16 vlan_id,
__be16 vlan_id_mask,
enum rocker_of_dpa_table_id goto_tbl,
@@ -2522,7 +2552,7 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
u32 in_pport, u32 in_pport_mask,
__be16 eth_type, const u8 *eth_dst,
const u8 *eth_dst_mask, __be16 vlan_id,
@@ -2559,7 +2589,7 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
const u8 *eth_dst, const u8 *eth_dst_mask,
__be16 vlan_id, u32 tunnel_id,
enum rocker_of_dpa_table_id goto_tbl,
@@ -2613,7 +2643,7 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
__be16 eth_type, __be32 dst,
__be32 dst_mask, u32 priority,
enum rocker_of_dpa_table_id goto_tbl,
@@ -2639,7 +2669,7 @@ static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
}
static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
u32 in_pport, u32 in_pport_mask,
const u8 *eth_src, const u8 *eth_src_mask,
const u8 *eth_dst, const u8 *eth_dst_mask,
@@ -2704,7 +2734,7 @@ rocker_group_tbl_find(const struct rocker *rocker,
return NULL;
}
-static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
+static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
struct rocker_group_tbl_entry *entry)
{
switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
@@ -2719,7 +2749,7 @@ static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
}
static int rocker_group_tbl_add(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
struct rocker_group_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
@@ -2731,7 +2761,7 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
found = rocker_group_tbl_find(rocker, match);
if (found) {
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
hash_del(&found->entry);
rocker_group_tbl_entry_free(trans, found);
found = match;
@@ -2741,7 +2771,7 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
}
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
hash_add(rocker->group_tbl, &found->entry, found->group_id);
spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
@@ -2751,7 +2781,7 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
}
static int rocker_group_tbl_del(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
struct rocker_group_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
@@ -2764,7 +2794,7 @@ static int rocker_group_tbl_del(struct rocker_port *rocker_port,
found = rocker_group_tbl_find(rocker, match);
if (found) {
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
hash_del(&found->entry);
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
}
@@ -2784,7 +2814,7 @@ static int rocker_group_tbl_del(struct rocker_port *rocker_port,
}
static int rocker_group_tbl_do(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
struct rocker_group_tbl_entry *entry)
{
if (flags & ROCKER_OP_FLAG_REMOVE)
@@ -2794,7 +2824,7 @@ static int rocker_group_tbl_do(struct rocker_port *rocker_port,
}
static int rocker_group_l2_interface(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
__be16 vlan_id, u32 out_pport,
int pop_vlan)
{
@@ -2811,7 +2841,7 @@ static int rocker_group_l2_interface(struct rocker_port *rocker_port,
}
static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
int flags, u8 group_count,
const u32 *group_ids, u32 group_id)
{
@@ -2836,7 +2866,7 @@ static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
}
static int rocker_group_l2_flood(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
__be16 vlan_id, u8 group_count,
const u32 *group_ids, u32 group_id)
{
@@ -2846,7 +2876,7 @@ static int rocker_group_l2_flood(struct rocker_port *rocker_port,
}
static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
u32 index, const u8 *src_mac, const u8 *dst_mac,
__be16 vlan_id, bool ttl_check, u32 pport)
{
@@ -2882,22 +2912,22 @@ rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
}
static void _rocker_neigh_add(struct rocker *rocker,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
struct rocker_neigh_tbl_entry *entry)
{
- if (trans != SWITCHDEV_TRANS_COMMIT)
+ if (!switchdev_trans_ph_commit(trans))
entry->index = rocker->neigh_tbl_next_index++;
- if (trans == SWITCHDEV_TRANS_PREPARE)
+ if (switchdev_trans_ph_prepare(trans))
return;
entry->ref_count++;
hash_add(rocker->neigh_tbl, &entry->entry,
be32_to_cpu(entry->ip_addr));
}
-static void _rocker_neigh_del(enum switchdev_trans trans,
+static void _rocker_neigh_del(struct switchdev_trans *trans,
struct rocker_neigh_tbl_entry *entry)
{
- if (trans == SWITCHDEV_TRANS_PREPARE)
+ if (switchdev_trans_ph_prepare(trans))
return;
if (--entry->ref_count == 0) {
hash_del(&entry->entry);
@@ -2906,19 +2936,19 @@ static void _rocker_neigh_del(enum switchdev_trans trans,
}
static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
const u8 *eth_dst, bool ttl_check)
{
if (eth_dst) {
ether_addr_copy(entry->eth_dst, eth_dst);
entry->ttl_check = ttl_check;
- } else if (trans != SWITCHDEV_TRANS_PREPARE) {
+ } else if (!switchdev_trans_ph_prepare(trans)) {
entry->ref_count++;
}
}
static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
int flags, __be32 ip_addr, const u8 *eth_dst)
{
struct rocker *rocker = rocker_port->rocker;
@@ -3010,7 +3040,8 @@ err_out:
}
static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
- enum switchdev_trans trans, __be32 ip_addr)
+ struct switchdev_trans *trans,
+ __be32 ip_addr)
{
struct net_device *dev = rocker_port->dev;
struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
@@ -3038,7 +3069,7 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
}
static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
__be32 ip_addr, u32 *index)
{
struct rocker *rocker = rocker_port->rocker;
@@ -3097,7 +3128,7 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
}
static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
int flags, __be16 vlan_id)
{
struct rocker_port *p;
@@ -3146,7 +3177,7 @@ no_ports_in_vlan:
}
static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
__be16 vlan_id, bool pop_vlan)
{
const struct rocker *rocker = rocker_port->rocker;
@@ -3243,10 +3274,16 @@ static struct rocker_ctrl {
.bridge = true,
.copy_to_cpu = true,
},
+ [ROCKER_CTRL_DFLT_OVS] = {
+ /* pass all pkts up to CPU */
+ .eth_dst = zero_mac,
+ .eth_dst_mask = zero_mac,
+ .acl = true,
+ },
};
static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
u32 in_pport = rocker_port->pport;
@@ -3279,7 +3316,8 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
}
static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans,
+ int flags,
const struct rocker_ctrl *ctrl,
__be16 vlan_id)
{
@@ -3304,7 +3342,7 @@ static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
}
static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
u32 in_pport_mask = 0xffffffff;
@@ -3328,7 +3366,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
}
static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
if (ctrl->acl)
@@ -3346,7 +3384,7 @@ static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
}
static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
__be16 vlan_id)
{
int err = 0;
@@ -3365,7 +3403,7 @@ static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
}
static int rocker_port_ctrl(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
const struct rocker_ctrl *ctrl)
{
u16 vid;
@@ -3384,7 +3422,7 @@ static int rocker_port_ctrl(struct rocker_port *rocker_port,
}
static int rocker_port_vlan(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags, u16 vid)
+ struct switchdev_trans *trans, int flags, u16 vid)
{
enum rocker_of_dpa_table_id goto_tbl =
ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
@@ -3441,14 +3479,14 @@ static int rocker_port_vlan(struct rocker_port *rocker_port,
"Error (%d) port VLAN table\n", err);
err_out:
- if (trans == SWITCHDEV_TRANS_PREPARE)
+ if (switchdev_trans_ph_prepare(trans))
change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
return err;
}
static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags)
+ struct switchdev_trans *trans, int flags)
{
enum rocker_of_dpa_table_id goto_tbl;
u32 in_pport;
@@ -3476,7 +3514,7 @@ static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
struct rocker_fdb_learn_work {
struct work_struct work;
struct rocker_port *rocker_port;
- enum switchdev_trans trans;
+ struct switchdev_trans *trans;
int flags;
u8 addr[ETH_ALEN];
u16 vid;
@@ -3504,7 +3542,7 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
}
static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
const u8 *addr, __be16 vlan_id)
{
struct rocker_fdb_learn_work *lw;
@@ -3546,7 +3584,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
ether_addr_copy(lw->addr, addr);
lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
- if (trans == SWITCHDEV_TRANS_PREPARE)
+ if (switchdev_trans_ph_prepare(trans))
rocker_port_kfree(trans, lw);
else
schedule_work(&lw->work);
@@ -3568,7 +3606,7 @@ rocker_fdb_tbl_find(const struct rocker *rocker,
}
static int rocker_port_fdb(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
const unsigned char *addr,
__be16 vlan_id, int flags)
{
@@ -3583,7 +3621,8 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
return -ENOMEM;
fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
- fdb->key.pport = rocker_port->pport;
+ fdb->touched = jiffies;
+ fdb->key.rocker_port = rocker_port;
ether_addr_copy(fdb->key.addr, addr);
fdb->key.vlan_id = vlan_id;
fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
@@ -3592,13 +3631,17 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
found = rocker_fdb_tbl_find(rocker, fdb);
- if (removing && found) {
- rocker_port_kfree(trans, fdb);
- if (trans != SWITCHDEV_TRANS_PREPARE)
- hash_del(&found->entry);
- } else if (!removing && !found) {
- if (trans != SWITCHDEV_TRANS_PREPARE)
- hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
+ if (found) {
+ found->touched = jiffies;
+ if (removing) {
+ rocker_port_kfree(trans, fdb);
+ if (!switchdev_trans_ph_prepare(trans))
+ hash_del(&found->entry);
+ }
+ } else if (!removing) {
+ if (!switchdev_trans_ph_prepare(trans))
+ hash_add(rocker->fdb_tbl, &fdb->entry,
+ fdb->key_crc32);
}
spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
@@ -3616,7 +3659,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
}
static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags)
+ struct switchdev_trans *trans, int flags)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_fdb_tbl_entry *found;
@@ -3629,12 +3672,12 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
rocker_port->stp_state == BR_STATE_FORWARDING)
return 0;
- flags |= ROCKER_OP_FLAG_REMOVE;
+ flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
- if (found->key.pport != rocker_port->pport)
+ if (found->key.rocker_port != rocker_port)
continue;
if (!found->learned)
continue;
@@ -3643,7 +3686,7 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
found->key.vlan_id);
if (err)
goto err_out;
- if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!switchdev_trans_ph_prepare(trans))
hash_del(&found->entry);
}
@@ -3653,8 +3696,43 @@ err_out:
return err;
}
+static void rocker_fdb_cleanup(unsigned long data)
+{
+ struct rocker *rocker = (struct rocker *)data;
+ struct rocker_port *rocker_port;
+ struct rocker_fdb_tbl_entry *entry;
+ struct hlist_node *tmp;
+ unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
+ unsigned long expires;
+ unsigned long lock_flags;
+ int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
+ ROCKER_OP_FLAG_LEARNED;
+ int bkt;
+
+ spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
+
+ hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
+ if (!entry->learned)
+ continue;
+ rocker_port = entry->key.rocker_port;
+ expires = entry->touched + rocker_port->ageing_time;
+ if (time_before_eq(expires, jiffies)) {
+ rocker_port_fdb_learn(rocker_port, NULL,
+ flags, entry->key.addr,
+ entry->key.vlan_id);
+ hash_del(&entry->entry);
+ } else if (time_before(expires, next_timer)) {
+ next_timer = expires;
+ }
+ }
+
+ spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
+
+ mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
+}
+
static int rocker_port_router_mac(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
__be16 vlan_id)
{
u32 in_pport_mask = 0xffffffff;
@@ -3687,7 +3765,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
}
static int rocker_port_fwding(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags)
+ struct switchdev_trans *trans, int flags)
{
bool pop_vlan;
u32 out_pport;
@@ -3726,16 +3804,16 @@ static int rocker_port_fwding(struct rocker_port *rocker_port,
}
static int rocker_port_stp_update(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags,
+ struct switchdev_trans *trans, int flags,
u8 state)
{
bool want[ROCKER_CTRL_MAX] = { 0, };
bool prev_ctrls[ROCKER_CTRL_MAX];
- u8 prev_state;
+ u8 uninitialized_var(prev_state);
int err;
int i;
- if (trans == SWITCHDEV_TRANS_PREPARE) {
+ if (switchdev_trans_ph_prepare(trans)) {
memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
prev_state = rocker_port->stp_state;
}
@@ -3755,11 +3833,14 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port,
break;
case BR_STATE_LEARNING:
case BR_STATE_FORWARDING:
- want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
+ if (!rocker_port_is_ovsed(rocker_port))
+ want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
want[ROCKER_CTRL_IPV4_MCAST] = true;
want[ROCKER_CTRL_IPV6_MCAST] = true;
if (rocker_port_is_bridged(rocker_port))
want[ROCKER_CTRL_DFLT_BRIDGING] = true;
+ else if (rocker_port_is_ovsed(rocker_port))
+ want[ROCKER_CTRL_DFLT_OVS] = true;
else
want[ROCKER_CTRL_LOCAL_ARP] = true;
break;
@@ -3784,7 +3865,7 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port,
err = rocker_port_fwding(rocker_port, trans, flags);
err_out:
- if (trans == SWITCHDEV_TRANS_PREPARE) {
+ if (switchdev_trans_ph_prepare(trans)) {
memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
rocker_port->stp_state = prev_state;
}
@@ -3793,7 +3874,7 @@ err_out:
}
static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags)
+ struct switchdev_trans *trans, int flags)
{
if (rocker_port_is_bridged(rocker_port))
/* bridge STP will enable port */
@@ -3805,7 +3886,7 @@ static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
}
static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
- enum switchdev_trans trans, int flags)
+ struct switchdev_trans *trans, int flags)
{
if (rocker_port_is_bridged(rocker_port))
/* bridge STP will disable port */
@@ -3903,7 +3984,7 @@ not_found:
}
static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
- enum switchdev_trans trans, __be32 dst,
+ struct switchdev_trans *trans, __be32 dst,
int dst_len, const struct fib_info *fi,
u32 tb_id, int flags)
{
@@ -3977,13 +4058,14 @@ static int rocker_port_open(struct net_device *dev)
goto err_request_rx_irq;
}
- err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+ err = rocker_port_fwd_enable(rocker_port, NULL, 0);
if (err)
goto err_fwd_enable;
napi_enable(&rocker_port->napi_tx);
napi_enable(&rocker_port->napi_rx);
- rocker_port_set_enable(rocker_port, true);
+ if (!dev->proto_down)
+ rocker_port_set_enable(rocker_port, true);
netif_start_queue(dev);
return 0;
@@ -4004,7 +4086,7 @@ static int rocker_port_stop(struct net_device *dev)
rocker_port_set_enable(rocker_port, false);
napi_disable(&rocker_port->napi_rx);
napi_disable(&rocker_port->napi_tx);
- rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE,
+ rocker_port_fwd_disable(rocker_port, NULL,
ROCKER_OP_FLAG_NOWAIT);
free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
@@ -4102,8 +4184,11 @@ static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
skb->data, skb_headlen(skb));
if (err)
goto nest_cancel;
- if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
- goto nest_cancel;
+ if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
+ err = skb_linearize(skb);
+ if (err)
+ goto unmap_frags;
+ }
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -4152,6 +4237,34 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p)
return 0;
}
+static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int running = netif_running(dev);
+ int err;
+
+#define ROCKER_PORT_MIN_MTU 68
+#define ROCKER_PORT_MAX_MTU 9000
+
+ if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
+ return -EINVAL;
+
+ if (running)
+ rocker_port_stop(dev);
+
+ netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
+ dev->mtu = new_mtu;
+
+ err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
+ if (err)
+ return err;
+
+ if (running)
+ err = rocker_port_open(dev);
+
+ return err;
+}
+
static int rocker_port_get_phys_port_name(struct net_device *dev,
char *buf, size_t len)
{
@@ -4159,7 +4272,7 @@ static int rocker_port_get_phys_port_name(struct net_device *dev,
struct port_name name = { .buf = buf, .len = len };
int err;
- err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+ err = rocker_cmd_exec(rocker_port, NULL, 0,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_phys_name_proc,
&name);
@@ -4167,11 +4280,33 @@ static int rocker_port_get_phys_port_name(struct net_device *dev,
return err ? -EOPNOTSUPP : 0;
}
+static int rocker_port_change_proto_down(struct net_device *dev,
+ bool proto_down)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ if (rocker_port->dev->flags & IFF_UP)
+ rocker_port_set_enable(rocker_port, !proto_down);
+ rocker_port->dev->proto_down = proto_down;
+ return 0;
+}
+
+static void rocker_port_neigh_destroy(struct neighbour *n)
+{
+ struct rocker_port *rocker_port = netdev_priv(n->dev);
+ int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
+ __be32 ip_addr = *(__be32 *)n->primary_key;
+
+ rocker_port_ipv4_neigh(rocker_port, NULL,
+ flags, ip_addr, n->ha);
+}
+
static const struct net_device_ops rocker_port_netdev_ops = {
.ndo_open = rocker_port_open,
.ndo_stop = rocker_port_stop,
.ndo_start_xmit = rocker_port_xmit,
.ndo_set_mac_address = rocker_port_set_mac_address,
+ .ndo_change_mtu = rocker_port_change_mtu,
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
.ndo_bridge_dellink = switchdev_port_bridge_dellink,
@@ -4179,6 +4314,8 @@ static const struct net_device_ops rocker_port_netdev_ops = {
.ndo_fdb_del = switchdev_port_fdb_del,
.ndo_fdb_dump = switchdev_port_fdb_dump,
.ndo_get_phys_port_name = rocker_port_get_phys_port_name,
+ .ndo_change_proto_down = rocker_port_change_proto_down,
+ .ndo_neigh_destroy = rocker_port_neigh_destroy,
};
/********************
@@ -4192,11 +4329,11 @@ static int rocker_port_attr_get(struct net_device *dev,
const struct rocker *rocker = rocker_port->rocker;
switch (attr->id) {
- case SWITCHDEV_ATTR_PORT_PARENT_ID:
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
attr->u.ppid.id_len = sizeof(rocker->hw.id);
memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
break;
- case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
attr->u.brport_flags = rocker_port->brport_flags;
break;
default:
@@ -4206,18 +4343,8 @@ static int rocker_port_attr_get(struct net_device *dev,
return 0;
}
-static void rocker_port_trans_abort(const struct rocker_port *rocker_port)
-{
- struct list_head *mem, *tmp;
-
- list_for_each_safe(mem, tmp, &rocker_port->trans_mem) {
- list_del(mem);
- kfree(mem);
- }
-}
-
static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+ struct switchdev_trans *trans,
unsigned long brport_flags)
{
unsigned long orig_flags;
@@ -4228,39 +4355,44 @@ static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
err = rocker_port_set_learning(rocker_port, trans);
- if (trans == SWITCHDEV_TRANS_PREPARE)
+ if (switchdev_trans_ph_prepare(trans))
rocker_port->brport_flags = orig_flags;
return err;
}
+static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
+ struct switchdev_trans *trans,
+ u32 ageing_time)
+{
+ if (!switchdev_trans_ph_prepare(trans)) {
+ rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
+ mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
+ }
+
+ return 0;
+}
+
static int rocker_port_attr_set(struct net_device *dev,
- struct switchdev_attr *attr)
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
{
struct rocker_port *rocker_port = netdev_priv(dev);
int err = 0;
- switch (attr->trans) {
- case SWITCHDEV_TRANS_PREPARE:
- BUG_ON(!list_empty(&rocker_port->trans_mem));
- break;
- case SWITCHDEV_TRANS_ABORT:
- rocker_port_trans_abort(rocker_port);
- return 0;
- default:
- break;
- }
-
switch (attr->id) {
- case SWITCHDEV_ATTR_PORT_STP_STATE:
- err = rocker_port_stp_update(rocker_port, attr->trans,
- ROCKER_OP_FLAG_NOWAIT,
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ err = rocker_port_stp_update(rocker_port, trans, 0,
attr->u.stp_state);
break;
- case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
- err = rocker_port_brport_flags_set(rocker_port, attr->trans,
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = rocker_port_brport_flags_set(rocker_port, trans,
attr->u.brport_flags);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ err = rocker_port_bridge_ageing_time(rocker_port, trans,
+ attr->u.ageing_time);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -4270,7 +4402,8 @@ static int rocker_port_attr_set(struct net_device *dev,
}
static int rocker_port_vlan_add(struct rocker_port *rocker_port,
- enum switchdev_trans trans, u16 vid, u16 flags)
+ struct switchdev_trans *trans,
+ u16 vid, u16 flags)
{
int err;
@@ -4289,8 +4422,8 @@ static int rocker_port_vlan_add(struct rocker_port *rocker_port,
}
static int rocker_port_vlans_add(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
- const struct switchdev_obj_vlan *vlan)
+ struct switchdev_trans *trans,
+ const struct switchdev_obj_port_vlan *vlan)
{
u16 vid;
int err;
@@ -4306,8 +4439,8 @@ static int rocker_port_vlans_add(struct rocker_port *rocker_port,
}
static int rocker_port_fdb_add(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
- const struct switchdev_obj_fdb *fdb)
+ struct switchdev_trans *trans,
+ const struct switchdev_obj_port_fdb *fdb)
{
__be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
int flags = 0;
@@ -4319,36 +4452,27 @@ static int rocker_port_fdb_add(struct rocker_port *rocker_port,
}
static int rocker_port_obj_add(struct net_device *dev,
- struct switchdev_obj *obj)
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans)
{
struct rocker_port *rocker_port = netdev_priv(dev);
const struct switchdev_obj_ipv4_fib *fib4;
int err = 0;
- switch (obj->trans) {
- case SWITCHDEV_TRANS_PREPARE:
- BUG_ON(!list_empty(&rocker_port->trans_mem));
- break;
- case SWITCHDEV_TRANS_ABORT:
- rocker_port_trans_abort(rocker_port);
- return 0;
- default:
- break;
- }
-
switch (obj->id) {
- case SWITCHDEV_OBJ_PORT_VLAN:
- err = rocker_port_vlans_add(rocker_port, obj->trans,
- &obj->u.vlan);
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = rocker_port_vlans_add(rocker_port, trans,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
- case SWITCHDEV_OBJ_IPV4_FIB:
- fib4 = &obj->u.ipv4_fib;
- err = rocker_port_fib_ipv4(rocker_port, obj->trans,
+ case SWITCHDEV_OBJ_ID_IPV4_FIB:
+ fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
+ err = rocker_port_fib_ipv4(rocker_port, trans,
htonl(fib4->dst), fib4->dst_len,
- fib4->fi, fib4->tb_id, 0);
+ &fib4->fi, fib4->tb_id, 0);
break;
- case SWITCHDEV_OBJ_PORT_FDB:
- err = rocker_port_fdb_add(rocker_port, obj->trans, &obj->u.fdb);
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = rocker_port_fdb_add(rocker_port, trans,
+ SWITCHDEV_OBJ_PORT_FDB(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -4363,17 +4487,17 @@ static int rocker_port_vlan_del(struct rocker_port *rocker_port,
{
int err;
- err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
+ err = rocker_port_router_mac(rocker_port, NULL,
ROCKER_OP_FLAG_REMOVE, htons(vid));
if (err)
return err;
- return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
+ return rocker_port_vlan(rocker_port, NULL,
ROCKER_OP_FLAG_REMOVE, vid);
}
static int rocker_port_vlans_del(struct rocker_port *rocker_port,
- const struct switchdev_obj_vlan *vlan)
+ const struct switchdev_obj_port_vlan *vlan)
{
u16 vid;
int err;
@@ -4388,11 +4512,11 @@ static int rocker_port_vlans_del(struct rocker_port *rocker_port,
}
static int rocker_port_fdb_del(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
- const struct switchdev_obj_fdb *fdb)
+ struct switchdev_trans *trans,
+ const struct switchdev_obj_port_fdb *fdb)
{
__be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
- int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
+ int flags = ROCKER_OP_FLAG_REMOVE;
if (!rocker_port_is_bridged(rocker_port))
return -EINVAL;
@@ -4401,25 +4525,27 @@ static int rocker_port_fdb_del(struct rocker_port *rocker_port,
}
static int rocker_port_obj_del(struct net_device *dev,
- struct switchdev_obj *obj)
+ const struct switchdev_obj *obj)
{
struct rocker_port *rocker_port = netdev_priv(dev);
const struct switchdev_obj_ipv4_fib *fib4;
int err = 0;
switch (obj->id) {
- case SWITCHDEV_OBJ_PORT_VLAN:
- err = rocker_port_vlans_del(rocker_port, &obj->u.vlan);
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = rocker_port_vlans_del(rocker_port,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
- case SWITCHDEV_OBJ_IPV4_FIB:
- fib4 = &obj->u.ipv4_fib;
- err = rocker_port_fib_ipv4(rocker_port, SWITCHDEV_TRANS_NONE,
+ case SWITCHDEV_OBJ_ID_IPV4_FIB:
+ fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
+ err = rocker_port_fib_ipv4(rocker_port, NULL,
htonl(fib4->dst), fib4->dst_len,
- fib4->fi, fib4->tb_id,
+ &fib4->fi, fib4->tb_id,
ROCKER_OP_FLAG_REMOVE);
break;
- case SWITCHDEV_OBJ_PORT_FDB:
- err = rocker_port_fdb_del(rocker_port, obj->trans, &obj->u.fdb);
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = rocker_port_fdb_del(rocker_port, NULL,
+ SWITCHDEV_OBJ_PORT_FDB(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -4430,10 +4556,10 @@ static int rocker_port_obj_del(struct net_device *dev,
}
static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
- struct switchdev_obj *obj)
+ struct switchdev_obj_port_fdb *fdb,
+ switchdev_obj_dump_cb_t *cb)
{
struct rocker *rocker = rocker_port->rocker;
- struct switchdev_obj_fdb *fdb = &obj->u.fdb;
struct rocker_fdb_tbl_entry *found;
struct hlist_node *tmp;
unsigned long lock_flags;
@@ -4442,12 +4568,13 @@ static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
- if (found->key.pport != rocker_port->pport)
+ if (found->key.rocker_port != rocker_port)
continue;
- fdb->addr = found->key.addr;
+ ether_addr_copy(fdb->addr, found->key.addr);
+ fdb->ndm_state = NUD_REACHABLE;
fdb->vid = rocker_port_vlan_to_vid(rocker_port,
found->key.vlan_id);
- err = obj->cb(rocker_port->dev, obj);
+ err = cb(&fdb->obj);
if (err)
break;
}
@@ -4457,9 +4584,9 @@ static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
}
static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
- struct switchdev_obj *obj)
+ struct switchdev_obj_port_vlan *vlan,
+ switchdev_obj_dump_cb_t *cb)
{
- struct switchdev_obj_vlan *vlan = &obj->u.vlan;
u16 vid;
int err = 0;
@@ -4470,7 +4597,7 @@ static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
if (rocker_vlan_id_is_internal(htons(vid)))
vlan->flags |= BRIDGE_VLAN_INFO_PVID;
vlan->vid_begin = vlan->vid_end = vid;
- err = obj->cb(rocker_port->dev, obj);
+ err = cb(&vlan->obj);
if (err)
break;
}
@@ -4479,17 +4606,20 @@ static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
}
static int rocker_port_obj_dump(struct net_device *dev,
- struct switchdev_obj *obj)
+ struct switchdev_obj *obj,
+ switchdev_obj_dump_cb_t *cb)
{
const struct rocker_port *rocker_port = netdev_priv(dev);
int err = 0;
switch (obj->id) {
- case SWITCHDEV_OBJ_PORT_FDB:
- err = rocker_port_fdb_dump(rocker_port, obj);
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = rocker_port_fdb_dump(rocker_port,
+ SWITCHDEV_OBJ_PORT_FDB(obj), cb);
break;
- case SWITCHDEV_OBJ_PORT_VLAN:
- err = rocker_port_vlan_dump(rocker_port, obj);
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = rocker_port_vlan_dump(rocker_port,
+ SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
break;
default:
err = -EOPNOTSUPP;
@@ -4632,7 +4762,7 @@ rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
void *priv)
{
- return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+ return rocker_cmd_exec(rocker_port, NULL, 0,
rocker_cmd_get_port_stats_prep, NULL,
rocker_cmd_get_port_stats_ethtool_proc,
priv);
@@ -4726,6 +4856,7 @@ static int rocker_port_rx_proc(const struct rocker *rocker,
const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
size_t rx_len;
+ u16 rx_flags = 0;
if (!skb)
return -ENOENT;
@@ -4733,6 +4864,8 @@ static int rocker_port_rx_proc(const struct rocker *rocker,
rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
return -EINVAL;
+ if (attrs[ROCKER_TLV_RX_FLAGS])
+ rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
rocker_dma_rx_ring_skb_unmap(rocker, attrs);
@@ -4740,6 +4873,9 @@ static int rocker_port_rx_proc(const struct rocker *rocker,
skb_put(skb, rx_len);
skb->protocol = eth_type_trans(skb, rocker_port->dev);
+ if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
+ skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
+
rocker_port->dev->stats.rx_packets++;
rocker_port->dev->stats.rx_bytes += skb->len;
@@ -4818,9 +4954,9 @@ static void rocker_remove_ports(const struct rocker *rocker)
rocker_port = rocker->ports[i];
if (!rocker_port)
continue;
- rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
- ROCKER_OP_FLAG_REMOVE);
+ rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
unregister_netdev(rocker_port->dev);
+ free_netdev(rocker_port->dev);
}
kfree(rocker->ports);
}
@@ -4856,7 +4992,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
rocker_port->port_number = port_number;
rocker_port->pport = port_number + 1;
rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
- INIT_LIST_HEAD(&rocker_port->trans_mem);
+ rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
rocker_port_dev_addr_init(rocker_port);
dev->netdev_ops = &rocker_port_netdev_ops;
@@ -4868,7 +5004,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
NAPI_POLL_WEIGHT);
rocker_carrier_init(rocker_port);
- dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
err = register_netdev(dev);
if (err) {
@@ -4877,19 +5013,20 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
}
rocker->ports[port_number] = rocker_port;
- rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
+ switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
+
+ rocker_port_set_learning(rocker_port, NULL);
- err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+ err = rocker_port_ig_tbl(rocker_port, NULL, 0);
if (err) {
- dev_err(&pdev->dev, "install ig port table failed\n");
+ netdev_err(rocker_port->dev, "install ig port table failed\n");
goto err_port_ig_tbl;
}
rocker_port->internal_vlan_id =
rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
- err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
- untagged_vid, 0);
+ err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
if (err) {
netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
goto err_untagged_vlan;
@@ -4898,9 +5035,9 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
return 0;
err_untagged_vlan:
- rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
- ROCKER_OP_FLAG_REMOVE);
+ rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
err_port_ig_tbl:
+ rocker->ports[port_number] = NULL;
unregister_netdev(dev);
err_register_netdev:
free_netdev(dev);
@@ -5067,17 +5204,23 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_init_tbls;
}
+ setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
+ (unsigned long) rocker);
+ mod_timer(&rocker->fdb_cleanup_timer, jiffies);
+
err = rocker_probe_ports(rocker);
if (err) {
dev_err(&pdev->dev, "failed to probe ports\n");
goto err_probe_ports;
}
- dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id);
+ dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
+ (int)sizeof(rocker->hw.id), &rocker->hw.id);
return 0;
err_probe_ports:
+ del_timer_sync(&rocker->fdb_cleanup_timer);
rocker_free_tbls(rocker);
err_init_tbls:
free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
@@ -5105,6 +5248,7 @@ static void rocker_remove(struct pci_dev *pdev)
{
struct rocker *rocker = pci_get_drvdata(pdev);
+ del_timer_sync(&rocker->fdb_cleanup_timer);
rocker_free_tbls(rocker);
rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
rocker_remove_ports(rocker);
@@ -5156,9 +5300,9 @@ static int rocker_port_bridge_join(struct rocker_port *rocker_port,
rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
rocker_port->bridge_dev = bridge;
+ switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
- return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
- untagged_vid, 0);
+ return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
}
static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
@@ -5176,60 +5320,91 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
rocker_port_internal_vlan_id_get(rocker_port,
rocker_port->dev->ifindex);
+ switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
+ false);
rocker_port->bridge_dev = NULL;
- err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
- untagged_vid, 0);
+ err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
if (err)
return err;
if (rocker_port->dev->flags & IFF_UP)
- err = rocker_port_fwd_enable(rocker_port,
- SWITCHDEV_TRANS_NONE, 0);
+ err = rocker_port_fwd_enable(rocker_port, NULL, 0);
return err;
}
-static int rocker_port_master_changed(struct net_device *dev)
+
+static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
+ struct net_device *master)
+{
+ int err;
+
+ rocker_port->bridge_dev = master;
+
+ err = rocker_port_fwd_disable(rocker_port, NULL, 0);
+ if (err)
+ return err;
+ err = rocker_port_fwd_enable(rocker_port, NULL, 0);
+
+ return err;
+}
+
+static int rocker_port_master_linked(struct rocker_port *rocker_port,
+ struct net_device *master)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- struct net_device *master = netdev_master_upper_dev_get(dev);
int err = 0;
- /* There are currently three cases handled here:
- * 1. Joining a bridge
- * 2. Leaving a previously joined bridge
- * 3. Other, e.g. being added to or removed from a bond or openvswitch,
- * in which case nothing is done
- */
- if (master && master->rtnl_link_ops &&
- !strcmp(master->rtnl_link_ops->kind, "bridge"))
+ if (netif_is_bridge_master(master))
err = rocker_port_bridge_join(rocker_port, master);
- else if (rocker_port_is_bridged(rocker_port))
- err = rocker_port_bridge_leave(rocker_port);
+ else if (netif_is_ovs_master(master))
+ err = rocker_port_ovs_changed(rocker_port, master);
+ return err;
+}
+static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
+{
+ int err = 0;
+
+ if (rocker_port_is_bridged(rocker_port))
+ err = rocker_port_bridge_leave(rocker_port);
+ else if (rocker_port_is_ovsed(rocker_port))
+ err = rocker_port_ovs_changed(rocker_port, NULL);
return err;
}
static int rocker_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
- struct net_device *dev;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ struct rocker_port *rocker_port;
int err;
+ if (!rocker_port_dev_check(dev))
+ return NOTIFY_DONE;
+
switch (event) {
case NETDEV_CHANGEUPPER:
- dev = netdev_notifier_info_to_dev(ptr);
- if (!rocker_port_dev_check(dev))
- return NOTIFY_DONE;
- err = rocker_port_master_changed(dev);
- if (err)
- netdev_warn(dev,
- "failed to reflect master change (err %d)\n",
- err);
+ info = ptr;
+ if (!info->master)
+ goto out;
+ rocker_port = netdev_priv(dev);
+ if (info->linking) {
+ err = rocker_port_master_linked(rocker_port,
+ info->upper_dev);
+ if (err)
+ netdev_warn(dev, "failed to reflect master linked (err %d)\n",
+ err);
+ } else {
+ err = rocker_port_master_unlinked(rocker_port);
+ if (err)
+ netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
+ err);
+ }
break;
}
-
+out:
return NOTIFY_DONE;
}
@@ -5248,8 +5423,7 @@ static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
ROCKER_OP_FLAG_NOWAIT;
__be32 ip_addr = *(__be32 *)n->primary_key;
- return rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
- flags, ip_addr, n->ha);
+ return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
}
static int rocker_netevent_event(struct notifier_block *unused,
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index c61fbf968036..12490b2f6504 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -159,6 +159,7 @@ enum {
ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */
ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */
ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */
+ ROCKER_TLV_CMD_PORT_SETTINGS_MTU, /* u16 */
__ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
ROCKER_TLV_CMD_PORT_SETTINGS_MAX =
@@ -245,6 +246,7 @@ enum {
#define ROCKER_RX_FLAGS_TCP BIT(5)
#define ROCKER_RX_FLAGS_UDP BIT(6)
#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD BIT(7)
+#define ROCKER_RX_FLAGS_FWD_OFFLOAD BIT(8)
enum {
ROCKER_TLV_TX_UNSPEC,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 605cc8948594..bc6d21b471be 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -49,6 +49,12 @@ enum {
*/
#define HUNT_FILTER_TBL_ROWS 8192
+#define EFX_EF10_FILTER_ID_INVALID 0xffff
+struct efx_ef10_dev_addr {
+ u8 addr[ETH_ALEN];
+ u16 id;
+};
+
struct efx_ef10_filter_table {
/* The RX match field masks supported by this fw & hw, in order of priority */
enum efx_filter_match_flags rx_match_flags[
@@ -69,13 +75,14 @@ struct efx_ef10_filter_table {
/* Shadow of net_device address lists, guarded by mac_lock */
#define EFX_EF10_FILTER_DEV_UC_MAX 32
#define EFX_EF10_FILTER_DEV_MC_MAX 256
- struct {
- u8 addr[ETH_ALEN];
- u16 id;
- } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX],
- dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
- int dev_uc_count; /* negative for PROMISC */
- int dev_mc_count; /* negative for PROMISC/ALLMULTI */
+ struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
+ struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
+ int dev_uc_count;
+ int dev_mc_count;
+/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */
+ u16 ucdef_id;
+ u16 bcast_id;
+ u16 mcdef_id;
};
/* An arbitrary search limit for the software hash table */
@@ -288,11 +295,11 @@ static int efx_ef10_probe(struct efx_nic *efx)
/* We can have one VI for each 8K region. However, until we
* use TX option descriptors we need two TX queues per channel.
*/
- efx->max_channels =
- min_t(unsigned int,
- EFX_MAX_CHANNELS,
- efx_ef10_mem_map_size(efx) /
- (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
+ efx->max_channels = min_t(unsigned int,
+ EFX_MAX_CHANNELS,
+ efx_ef10_mem_map_size(efx) /
+ (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
+ efx->max_tx_channels = efx->max_channels;
if (WARN_ON(efx->max_channels == 0))
return -EIO;
@@ -387,7 +394,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
* First try to enable it, then if we get EPERM, just
* ask if it's already enabled
*/
- rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
+ rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL);
if (rc == 0) {
nic_data->workaround_35388 = true;
} else if (rc == -EPERM) {
@@ -817,11 +824,13 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
unsigned int uc_mem_map_size, wc_mem_map_size;
- unsigned int min_vis, pio_write_vi_base, max_vis;
+ unsigned int min_vis = max(EFX_TXQ_TYPES,
+ efx_separate_tx_channels ? 2 : 1);
+ unsigned int channel_vis, pio_write_vi_base, max_vis;
void __iomem *membase;
int rc;
- min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+ channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
#ifdef EFX_USE_PIO
/* Try to allocate PIO buffers if wanted and if the full
@@ -855,11 +864,11 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
* page size is >4K). So we may allocate some extra VIs just
* for writing PIO buffers through.
*
- * The UC mapping contains (min_vis - 1) complete VIs and the
+ * The UC mapping contains (channel_vis - 1) complete VIs and the
* first half of the next VI. Then the WC mapping begins with
* the second half of this last VI.
*/
- uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
+ uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE +
ER_DZ_TX_PIOBUF);
if (nic_data->n_piobufs) {
/* pio_write_vi_base rounds down to give the number of complete
@@ -874,7 +883,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
} else {
pio_write_vi_base = 0;
wc_mem_map_size = 0;
- max_vis = min_vis;
+ max_vis = channel_vis;
}
/* In case the last attached driver failed to free VIs, do it now */
@@ -886,6 +895,23 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
if (rc != 0)
return rc;
+ if (nic_data->n_allocated_vis < channel_vis) {
+ netif_info(efx, drv, efx->net_dev,
+ "Could not allocate enough VIs to satisfy RSS"
+ " requirements. Performance may not be optimal.\n");
+ /* We didn't get the VIs to populate our channels.
+ * We could keep what we got but then we'd have more
+ * interrupts than we need.
+ * Instead calculate new max_channels and restart
+ */
+ efx->max_channels = nic_data->n_allocated_vis;
+ efx->max_tx_channels =
+ nic_data->n_allocated_vis / EFX_TXQ_TYPES;
+
+ efx_ef10_free_vis(efx);
+ return -EAGAIN;
+ }
+
/* If we didn't get enough VIs to map all the PIO buffers, free the
* PIO buffers
*/
@@ -984,12 +1010,24 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+#ifdef CONFIG_SFC_SRIOV
+ unsigned int i;
+#endif
/* All our allocations have been reset */
nic_data->must_realloc_vis = true;
nic_data->must_restore_filters = true;
nic_data->must_restore_piobufs = true;
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+
+ /* Driver-created vswitches and vports must be re-created */
+ nic_data->must_probe_vswitching = true;
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+#ifdef CONFIG_SFC_SRIOV
+ if (nic_data->vf)
+ for (i = 0; i < efx->vf_count; i++)
+ nic_data->vf[i].vport_id = 0;
+#endif
}
static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
@@ -1034,6 +1072,12 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
{
int rc = efx_mcdi_reset(efx, reset_type);
+ /* Unprivileged functions return -EPERM, but need to return success
+ * here so that the datapath is brought back up.
+ */
+ if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
+ rc = 0;
+
/* If it was a port reset, trigger reallocation of MC resources.
* Note that on an MC reset nothing needs to be done now because we'll
* detect the MC reset later and handle it then.
@@ -1282,7 +1326,12 @@ static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
}
}
- if (core_stats) {
+ if (!core_stats)
+ return stats_count;
+
+ if (nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
+ /* Use vadaptor stats. */
core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
stats[EF10_STAT_rx_multicast] +
stats[EF10_STAT_rx_broadcast];
@@ -1302,6 +1351,26 @@ static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
core_stats->rx_errors = core_stats->rx_crc_errors;
core_stats->tx_errors = stats[EF10_STAT_tx_bad];
+ } else {
+ /* Use port stats. */
+ core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
+ core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
+ core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
+ core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
+ core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
+ stats[GENERIC_STAT_rx_nodesc_trunc] +
+ stats[GENERIC_STAT_rx_noskb_drops];
+ core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
+ core_stats->rx_length_errors =
+ stats[EF10_STAT_port_rx_gtjumbo] +
+ stats[EF10_STAT_port_rx_length_error];
+ core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
+ core_stats->rx_frame_errors =
+ stats[EF10_STAT_port_rx_align_error];
+ core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors);
}
return stats_count;
@@ -1535,6 +1604,22 @@ efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
memcpy(outbuf, pdu + offset, outlen);
}
+static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ /* All our allocations have been reset */
+ efx_ef10_reset_mc_allocations(efx);
+
+ /* The datapath firmware might have been changed */
+ nic_data->must_check_datapath_caps = true;
+
+ /* MAC statistics have been cleared on the NIC; clear the local
+ * statistic that we update with efx_update_diff_stat().
+ */
+ nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
+}
+
static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -1554,21 +1639,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
return 0;
nic_data->warm_boot_count = rc;
-
- /* All our allocations have been reset */
- efx_ef10_reset_mc_allocations(efx);
-
- /* Driver-created vswitches and vports must be re-created */
- nic_data->must_probe_vswitching = true;
- nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
-
- /* The datapath firmware might have been changed */
- nic_data->must_check_datapath_caps = true;
-
- /* MAC statistics have been cleared on the NIC; clear the local
- * statistic that we update with efx_update_diff_stat().
- */
- nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
+ efx_ef10_mcdi_reboot_detected(efx);
return -EIO;
}
@@ -1784,7 +1855,9 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
unsigned int write_ptr;
efx_qword_t *txd;
- BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+ tx_queue->xmit_more_available = false;
+ if (unlikely(tx_queue->write_count == tx_queue->insert_count))
+ return;
do {
write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
@@ -2197,6 +2270,29 @@ static int efx_ef10_ev_probe(struct efx_channel *channel)
GFP_KERNEL);
}
+static void efx_ef10_ev_fini(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ struct efx_nic *efx = channel->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
+ outbuf, outlen, rc);
+}
+
static int efx_ef10_ev_init(struct efx_channel *channel)
{
MCDI_DECLARE_BUF(inbuf,
@@ -2208,6 +2304,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
struct efx_ef10_nic_data *nic_data;
bool supports_rx_merge;
size_t inlen, outlen;
+ unsigned int enabled, implemented;
dma_addr_t dma_addr;
int rc;
int i;
@@ -2248,30 +2345,52 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
outbuf, sizeof(outbuf), &outlen);
/* IRQ return is ignored */
- return rc;
-}
-
-static void efx_ef10_ev_fini(struct efx_channel *channel)
-{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
- MCDI_DECLARE_BUF_ERR(outbuf);
- struct efx_nic *efx = channel->efx;
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
-
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
+ if (channel->channel || rc)
+ return rc;
- if (rc && rc != -EALREADY)
+ /* Successfully created event queue on channel 0 */
+ rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
+ if (rc == -ENOSYS) {
+ /* GET_WORKAROUNDS was implemented before the bug26807
+ * workaround, thus the latter must be unavailable in this fw
+ */
+ nic_data->workaround_26807 = false;
+ rc = 0;
+ } else if (rc) {
goto fail;
+ } else {
+ nic_data->workaround_26807 =
+ !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
+
+ if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
+ !nic_data->workaround_26807) {
+ unsigned int flags;
+
+ rc = efx_mcdi_set_workaround(efx,
+ MC_CMD_WORKAROUND_BUG26807,
+ true, &flags);
+
+ if (!rc) {
+ if (flags &
+ 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
+ netif_info(efx, drv, efx->net_dev,
+ "other functions on NIC have been reset\n");
+ /* MC's boot count has incremented */
+ ++nic_data->warm_boot_count;
+ }
+ nic_data->workaround_26807 = true;
+ } else if (rc == -EPERM) {
+ rc = 0;
+ }
+ }
+ }
- return;
+ if (!rc)
+ return 0;
fail:
- efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
- outbuf, outlen, rc);
+ efx_ef10_ev_fini(channel);
+ return rc;
}
static void efx_ef10_ev_remove(struct efx_channel *channel)
@@ -3225,6 +3344,19 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
filter_id, false);
}
+static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
+{
+ return filter_id % HUNT_FILTER_TBL_ROWS;
+}
+
+static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ return efx_ef10_filter_remove_internal(efx, 1U << priority,
+ filter_id, true);
+}
+
static int efx_ef10_filter_get_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *spec)
@@ -3598,6 +3730,10 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
goto fail;
}
+ table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
+ table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
+ table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+
efx->filter_state = table;
init_waitqueue_head(&table->waitq);
return 0;
@@ -3700,145 +3836,233 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
kfree(table);
}
-/* Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+#define EFX_EF10_FILTER_DO_MARK_OLD(id) \
+ if (id != EFX_EF10_FILTER_ID_INVALID) { \
+ filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
+ WARN_ON(!table->entry[filter_idx].spec); \
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; \
+ }
+static void efx_ef10_filter_mark_old(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
- struct net_device *net_dev = efx->net_dev;
- struct efx_filter_spec spec;
- bool remove_failed = false;
- struct netdev_hw_addr *uc;
- struct netdev_hw_addr *mc;
- unsigned int filter_idx;
- int i, n, rc;
-
- if (!efx_dev_registered(efx))
- return;
+ unsigned int filter_idx, i;
if (!table)
return;
/* Mark old filters that may need to be removed */
spin_lock_bh(&efx->filter_lock);
- n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
- for (i = 0; i < n; i++) {
- filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
- }
- n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count;
- for (i = 0; i < n; i++) {
- filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
- }
+ for (i = 0; i < table->dev_uc_count; i++)
+ EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id);
+ for (i = 0; i < table->dev_mc_count; i++)
+ EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id);
+ EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id);
+ EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id);
+ EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id);
spin_unlock_bh(&efx->filter_lock);
+}
+#undef EFX_EF10_FILTER_DO_MARK_OLD
- /* Copy/convert the address lists; add the primary station
- * address and broadcast address
- */
- netif_addr_lock_bh(net_dev);
- if (net_dev->flags & IFF_PROMISC ||
- netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) {
- table->dev_uc_count = -1;
- } else {
- table->dev_uc_count = 1 + netdev_uc_count(net_dev);
- ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
- i = 1;
- netdev_for_each_uc_addr(uc, net_dev) {
- ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
- i++;
+static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *uc;
+ int addr_count;
+ unsigned int i;
+
+ table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
+ addr_count = netdev_uc_count(net_dev);
+ if (net_dev->flags & IFF_PROMISC)
+ *promisc = true;
+ table->dev_uc_count = 1 + addr_count;
+ ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
+ i = 1;
+ netdev_for_each_uc_addr(uc, net_dev) {
+ if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
+ *promisc = true;
+ break;
}
+ ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
+ table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
+ i++;
}
- if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
- netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) {
- table->dev_mc_count = -1;
- } else {
- table->dev_mc_count = 1 + netdev_mc_count(net_dev);
- eth_broadcast_addr(table->dev_mc_list[0].addr);
- i = 1;
- netdev_for_each_mc_addr(mc, net_dev) {
- ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
- i++;
+}
+
+static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *mc;
+ unsigned int i, addr_count;
+
+ table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+ table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
+ if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
+ *promisc = true;
+
+ addr_count = netdev_mc_count(net_dev);
+ i = 0;
+ netdev_for_each_mc_addr(mc, net_dev) {
+ if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
+ *promisc = true;
+ break;
}
+ ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
+ table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
+ i++;
}
- netif_addr_unlock_bh(net_dev);
- /* Insert/renew unicast filters */
- if (table->dev_uc_count >= 0) {
- for (i = 0; i < table->dev_uc_count; i++) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
- EFX_FILTER_FLAG_RX_RSS,
- 0);
- efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- table->dev_uc_list[i].addr);
- rc = efx_ef10_filter_insert(efx, &spec, true);
- if (rc < 0) {
- /* Fall back to unicast-promisc */
- while (i--)
- efx_ef10_filter_remove_safe(
+ table->dev_mc_count = i;
+}
+
+static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
+ bool multicast, bool rollback)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_dev_addr *addr_list;
+ struct efx_filter_spec spec;
+ u8 baddr[ETH_ALEN];
+ unsigned int i, j;
+ int addr_count;
+ int rc;
+
+ if (multicast) {
+ addr_list = table->dev_mc_list;
+ addr_count = table->dev_mc_count;
+ } else {
+ addr_list = table->dev_uc_list;
+ addr_count = table->dev_uc_count;
+ }
+
+ /* Insert/renew filters */
+ for (i = 0; i < addr_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
+ 0);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+ addr_list[i].addr);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ if (rollback) {
+ netif_info(efx, drv, efx->net_dev,
+ "efx_ef10_filter_insert failed rc=%d\n",
+ rc);
+ /* Fall back to promiscuous */
+ for (j = 0; j < i; j++) {
+ if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
+ continue;
+ efx_ef10_filter_remove_unsafe(
efx, EFX_FILTER_PRI_AUTO,
- table->dev_uc_list[i].id);
- table->dev_uc_count = -1;
- break;
+ addr_list[j].id);
+ addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+ }
+ return rc;
+ } else {
+ /* mark as not inserted, and carry on */
+ rc = EFX_EF10_FILTER_ID_INVALID;
}
- table->dev_uc_list[i].id = rc;
}
+ addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc);
}
- if (table->dev_uc_count < 0) {
+
+ if (multicast && rollback) {
+ /* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
EFX_FILTER_FLAG_RX_RSS,
0);
- efx_filter_set_uc_def(&spec);
+ eth_broadcast_addr(baddr);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
- WARN_ON(1);
- table->dev_uc_count = 0;
+ netif_warn(efx, drv, efx->net_dev,
+ "Broadcast filter insert failed rc=%d\n", rc);
+ /* Fall back to promiscuous */
+ for (j = 0; j < i; j++) {
+ if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
+ continue;
+ efx_ef10_filter_remove_unsafe(
+ efx, EFX_FILTER_PRI_AUTO,
+ addr_list[j].id);
+ addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+ }
+ return rc;
} else {
- table->dev_uc_list[0].id = rc;
+ table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
}
}
- /* Insert/renew multicast filters */
- if (table->dev_mc_count >= 0) {
- for (i = 0; i < table->dev_mc_count; i++) {
+ return 0;
+}
+
+static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
+ bool rollback)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_filter_spec spec;
+ u8 baddr[ETH_ALEN];
+ int rc;
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
+ 0);
+
+ if (multicast)
+ efx_filter_set_mc_def(&spec);
+ else
+ efx_filter_set_uc_def(&spec);
+
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ netif_warn(efx, drv, efx->net_dev,
+ "%scast mismatch filter insert failed rc=%d\n",
+ multicast ? "Multi" : "Uni", rc);
+ } else if (multicast) {
+ table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+ if (!nic_data->workaround_26807) {
+ /* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
EFX_FILTER_FLAG_RX_RSS,
0);
+ eth_broadcast_addr(baddr);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- table->dev_mc_list[i].addr);
+ baddr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
- /* Fall back to multicast-promisc */
- while (i--)
- efx_ef10_filter_remove_safe(
- efx, EFX_FILTER_PRI_AUTO,
- table->dev_mc_list[i].id);
- table->dev_mc_count = -1;
- break;
+ netif_warn(efx, drv, efx->net_dev,
+ "Broadcast filter insert failed rc=%d\n",
+ rc);
+ if (rollback) {
+ /* Roll back the mc_def filter */
+ efx_ef10_filter_remove_unsafe(
+ efx, EFX_FILTER_PRI_AUTO,
+ table->mcdef_id);
+ table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+ return rc;
+ }
+ } else {
+ table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
}
- table->dev_mc_list[i].id = rc;
- }
- }
- if (table->dev_mc_count < 0) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
- EFX_FILTER_FLAG_RX_RSS,
- 0);
- efx_filter_set_mc_def(&spec);
- rc = efx_ef10_filter_insert(efx, &spec, true);
- if (rc < 0) {
- WARN_ON(1);
- table->dev_mc_count = 0;
- } else {
- table->dev_mc_list[0].id = rc;
}
+ rc = 0;
+ } else {
+ table->ucdef_id = rc;
+ rc = 0;
}
+ return rc;
+}
+
+/* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD
+ * flag or removes these filters, we don't need to hold the filter_lock while
+ * scanning for these filters.
+ */
+static void efx_ef10_filter_remove_old(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ bool remove_failed = false;
+ int i;
- /* Remove filters that weren't renewed. Since nothing else
- * changes the AUTO_OLD flag or removes these filters, we
- * don't need to hold the filter_lock while scanning for
- * these filters.
- */
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
if (ACCESS_ONCE(table->entry[i].spec) &
EFX_EF10_FILTER_FLAG_AUTO_OLD) {
@@ -3917,6 +4141,87 @@ reset_nic:
return rc ? rc : rc2;
}
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
+static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct net_device *net_dev = efx->net_dev;
+ bool uc_promisc = false, mc_promisc = false;
+
+ if (!efx_dev_registered(efx))
+ return;
+
+ if (!table)
+ return;
+
+ efx_ef10_filter_mark_old(efx);
+
+ /* Copy/convert the address lists; add the primary station
+ * address and broadcast address
+ */
+ netif_addr_lock_bh(net_dev);
+ efx_ef10_filter_uc_addr_list(efx, &uc_promisc);
+ efx_ef10_filter_mc_addr_list(efx, &mc_promisc);
+ netif_addr_unlock_bh(net_dev);
+
+ /* Insert/renew unicast filters */
+ if (uc_promisc) {
+ efx_ef10_filter_insert_def(efx, false, false);
+ efx_ef10_filter_insert_addr_list(efx, false, false);
+ } else {
+ /* If any of the filters failed to insert, fall back to
+ * promiscuous mode - add in the uc_def filter. But keep
+ * our individual unicast filters.
+ */
+ if (efx_ef10_filter_insert_addr_list(efx, false, false))
+ efx_ef10_filter_insert_def(efx, false, false);
+ }
+
+ /* Insert/renew multicast filters */
+ /* If changing promiscuous state with cascaded multicast filters, remove
+ * old filters first, so that packets are dropped rather than duplicated
+ */
+ if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc)
+ efx_ef10_filter_remove_old(efx);
+ if (mc_promisc) {
+ if (nic_data->workaround_26807) {
+ /* If we failed to insert promiscuous filters, rollback
+ * and fall back to individual multicast filters
+ */
+ if (efx_ef10_filter_insert_def(efx, true, true)) {
+ /* Changing promisc state, so remove old filters */
+ efx_ef10_filter_remove_old(efx);
+ efx_ef10_filter_insert_addr_list(efx, true, false);
+ }
+ } else {
+ /* If we failed to insert promiscuous filters, don't
+ * rollback. Regardless, also insert the mc_list
+ */
+ efx_ef10_filter_insert_def(efx, true, false);
+ efx_ef10_filter_insert_addr_list(efx, true, false);
+ }
+ } else {
+ /* If any filters failed to insert, rollback and fall back to
+ * promiscuous mode - mc_def filter and maybe broadcast. If
+ * that fails, roll back again and insert as many of our
+ * individual multicast filters as we can.
+ */
+ if (efx_ef10_filter_insert_addr_list(efx, true, true)) {
+ /* Changing promisc state, so remove old filters */
+ if (nic_data->workaround_26807)
+ efx_ef10_filter_remove_old(efx);
+ if (efx_ef10_filter_insert_def(efx, true, true))
+ efx_ef10_filter_insert_addr_list(efx, true, false);
+ }
+ }
+
+ efx_ef10_filter_remove_old(efx);
+ efx->mc_promisc = mc_promisc;
+}
+
static int efx_ef10_set_mac_address(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
@@ -4085,6 +4390,8 @@ efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
out:
+ if (rc == -EPERM)
+ rc = 0;
rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
return rc ? rc : rc2;
}
@@ -4371,6 +4678,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.mcdi_poll_response = efx_ef10_mcdi_poll_response,
.mcdi_read_response = efx_ef10_mcdi_read_response,
.mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
.irq_enable_master = efx_port_dummy_op_void,
.irq_test_generate = efx_ef10_irq_test_generate,
.irq_disable_non_ev = efx_port_dummy_op_void,
@@ -4475,6 +4783,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.mcdi_poll_response = efx_ef10_mcdi_poll_response,
.mcdi_read_response = efx_ef10_mcdi_read_response,
.mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
.irq_enable_master = efx_port_dummy_op_void,
.irq_test_generate = efx_ef10_irq_test_generate,
.irq_disable_non_ev = efx_port_dummy_op_void,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 03bc03b67f08..6e11ee6173ce 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -115,9 +115,9 @@ static struct workqueue_struct *reset_workqueue;
*
* This is only used in MSI-X interrupt mode
*/
-static bool separate_tx_channels;
-module_param(separate_tx_channels, bool, 0444);
-MODULE_PARM_DESC(separate_tx_channels,
+bool efx_separate_tx_channels;
+module_param(efx_separate_tx_channels, bool, 0444);
+MODULE_PARM_DESC(efx_separate_tx_channels,
"Use separate channels for TX and RX");
/* This is the weight assigned to each of the (per-channel) virtual
@@ -1391,7 +1391,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
unsigned int n_channels;
n_channels = efx_wanted_parallelism(efx);
- if (separate_tx_channels)
+ if (efx_separate_tx_channels)
n_channels *= 2;
n_channels += extra_channels;
n_channels = min(n_channels, efx->max_channels);
@@ -1418,13 +1418,16 @@ static int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = n_channels;
if (n_channels > extra_channels)
n_channels -= extra_channels;
- if (separate_tx_channels) {
- efx->n_tx_channels = max(n_channels / 2, 1U);
+ if (efx_separate_tx_channels) {
+ efx->n_tx_channels = min(max(n_channels / 2,
+ 1U),
+ efx->max_tx_channels);
efx->n_rx_channels = max(n_channels -
efx->n_tx_channels,
1U);
} else {
- efx->n_tx_channels = n_channels;
+ efx->n_tx_channels = min(n_channels,
+ efx->max_tx_channels);
efx->n_rx_channels = n_channels;
}
for (i = 0; i < efx->n_channels; i++)
@@ -1450,7 +1453,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
/* Assume legacy interrupts */
if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
- efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
+ efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
efx->legacy_irq = efx->pci_dev->irq;
@@ -1624,7 +1627,8 @@ static void efx_set_channels(struct efx_nic *efx)
struct efx_tx_queue *tx_queue;
efx->tx_channel_offset =
- separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
+ efx_separate_tx_channels ?
+ efx->n_channels - efx->n_tx_channels : 0;
/* We need to mark which channels really have RX and TX
* queues, and adjust the TX queue numbers if we have separate
@@ -1653,17 +1657,34 @@ static int efx_probe_nic(struct efx_nic *efx)
if (rc)
return rc;
- /* Determine the number of channels and queues by trying to hook
- * in MSI-X interrupts. */
- rc = efx_probe_interrupts(efx);
- if (rc)
- goto fail1;
+ do {
+ if (!efx->max_channels || !efx->max_tx_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "Insufficient resources to allocate"
+ " any channels\n");
+ rc = -ENOSPC;
+ goto fail1;
+ }
- efx_set_channels(efx);
+ /* Determine the number of channels and queues by trying
+ * to hook in MSI-X interrupts.
+ */
+ rc = efx_probe_interrupts(efx);
+ if (rc)
+ goto fail1;
- rc = efx->type->dimension_resources(efx);
- if (rc)
- goto fail2;
+ efx_set_channels(efx);
+
+ /* dimension_resources can fail with EAGAIN */
+ rc = efx->type->dimension_resources(efx);
+ if (rc != 0 && rc != -EAGAIN)
+ goto fail2;
+
+ if (rc == -EAGAIN)
+ /* try again with new max_channels */
+ efx_remove_interrupts(efx);
+
+ } while (rc == -EAGAIN);
if (efx->n_channels > 1)
netdev_rss_key_fill(&efx->rx_hash_key,
@@ -2041,7 +2062,7 @@ static void efx_init_napi_channel(struct efx_channel *channel)
netif_napi_add(channel->napi_dev, &channel->napi_str,
efx_poll, napi_weight);
napi_hash_add(&channel->napi_str);
- efx_channel_init_lock(channel);
+ efx_channel_busy_poll_init(channel);
}
static void efx_init_napi(struct efx_nic *efx)
@@ -2104,7 +2125,7 @@ static int efx_busy_poll(struct napi_struct *napi)
if (!netif_running(efx->net_dev))
return LL_FLUSH_FAILED;
- if (!efx_channel_lock_poll(channel))
+ if (!efx_channel_try_lock_poll(channel))
return LL_FLUSH_BUSY;
old_rx_packets = channel->rx_queue.rx_packets;
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index acb1e0718485..1aaf76c1ace8 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -35,6 +35,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
extern unsigned int efx_piobuf_size;
+extern bool efx_separate_tx_channels;
/* RX */
void efx_set_default_rx_indir_table(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 80e69af21642..d790cb8d9db3 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2371,6 +2371,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 :
EFX_MAX_CHANNELS);
+ efx->max_tx_channels = efx->max_channels;
efx->timer_quantum_ns = 4968; /* 621 cycles */
/* Initialise I2C adapter */
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index f08266f0eca2..5a1c5a8f278a 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -321,7 +321,9 @@ void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
unsigned write_ptr;
unsigned old_write_count = tx_queue->write_count;
- BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+ tx_queue->xmit_more_available = false;
+ if (unlikely(tx_queue->write_count == tx_queue->insert_count))
+ return;
do {
write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 81640f8bb811..41fb6b60a3f0 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -9,7 +9,7 @@
#include <linux/delay.h>
#include <linux/moduleparam.h>
-#include <asm/cmpxchg.h>
+#include <linux/atomic.h>
#include "net_driver.h"
#include "nic.h"
#include "io.h"
@@ -1028,10 +1028,21 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
/* Consume the status word since efx_mcdi_rpc_finish() won't */
for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
- if (efx_mcdi_poll_reboot(efx))
+ rc = efx_mcdi_poll_reboot(efx);
+ if (rc)
break;
udelay(MCDI_STATUS_DELAY_US);
}
+
+ /* On EF10, a CODE_MC_REBOOT event can be received without the
+ * reboot detection in efx_mcdi_poll_reboot() being triggered.
+ * If zero was returned from the final call to
+ * efx_mcdi_poll_reboot(), the MC reboot wasn't noticed but the
+ * MC has definitely rebooted so prepare for the reset.
+ */
+ if (!rc && efx->type->mcdi_reboot_detected)
+ efx->type->mcdi_reboot_detected(efx);
+
mcdi->new_epoch = true;
/* Nobody was waiting for an MCDI request, so trigger a reset */
@@ -1779,15 +1790,31 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
return rc;
}
-int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
+ unsigned int *flags)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN);
+ size_t outlen;
+ int rc;
BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
- return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (!flags)
+ return 0;
+
+ if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
+ *flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS);
+ else
+ *flags = 0;
+
+ return 0;
}
int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
@@ -1816,7 +1843,11 @@ int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
return 0;
fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ /* Older firmware lacks GET_WORKAROUNDS and this isn't especially
+ * terrifying. The call site will have to deal with it though.
+ */
+ netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR,
+ efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 1838afe2da92..025d504c472b 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -346,7 +346,8 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
-int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
+ unsigned int *flags);
int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
unsigned int *enabled_out);
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 45fca9fc66b7..4cc772164a79 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -26,6 +26,10 @@
* Unlike a warm boot, assume DMEM has been reloaded, so that
* the MC persistent data must be reinitialised. */
#define MC_FW_TEPID_BOOT_OK (16)
+/* We have entered the main firmware via recovery mode. This
+ * means that MC persistent data must be reinitialised, but that
+ * we shouldn't touch PCIe config. */
+#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32)
/* BIST state has been initialized */
#define MC_FW_BIST_INIT_OK (128)
@@ -169,6 +173,8 @@
#define MC_CMD_ERR_EINTR 4
/* I/O failure */
#define MC_CMD_ERR_EIO 5
+/* Already exists */
+#define MC_CMD_ERR_EEXIST 6
/* Try again */
#define MC_CMD_ERR_EAGAIN 11
/* Out of memory */
@@ -181,6 +187,10 @@
#define MC_CMD_ERR_ENODEV 19
/* Invalid argument to target */
#define MC_CMD_ERR_EINVAL 22
+/* Broken pipe */
+#define MC_CMD_ERR_EPIPE 32
+/* Read-only */
+#define MC_CMD_ERR_EROFS 30
/* Out of range */
#define MC_CMD_ERR_ERANGE 34
/* Non-recursive resource is already acquired */
@@ -226,6 +236,43 @@
#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
/* The datapath is disabled. */
#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
+/* The requested operation might require the
+ command to be passed between MCs, and the
+ transport doesn't support that. Should
+ only ever been seen over the UART. */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* Notifies the driver that the request has been relayed
+ * to an admin function for authorization. The driver should
+ * wait for a PROXY_RESPONSE event and then resend its request.
+ * This error code is followed by a 32-bit handle that
+ * helps matching it with the respective PROXY_RESPONSE event. */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
+/* The request cannot be passed for authorization because
+ * another request from the same function is currently being
+ * authorized. The drvier should try again later. */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that
+ * doesn't await an authorization. */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege.
+ * Normally it is translated to EPERM by send_cmd_err(),
+ * but it may also be used to trigger some special mechanism
+ * for handling such case, e.g. to relay the failed request
+ * to a designated admin function for authorization. */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807. */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
#define MC_CMD_ERR_CODE_OFST 0
@@ -275,6 +322,11 @@
MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \
(n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default
+ * stack ID (which must be in the range 1-255) along with an EVB port ID.
+ */
+#define EVB_STACK_ID(n) (((n) & 0xff) << 16)
+
/* Version 2 adds an optional argument to error returns: the errno value
* may be followed by the (0-based) number of the first argument that
@@ -394,6 +446,8 @@
#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
/* enum: DDR ECC status update */
#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
+/* enum: PTP status update */
+#define MCDI_EVENT_AOE_PTP_STATUS 0xb
#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
@@ -408,6 +462,16 @@
#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
+#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0
+#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8
+/* enum: MUM failed to load - no valid image? */
+#define MCDI_EVENT_MUM_NO_LOAD 0x1
+/* enum: MUM f/w reported an exception */
+#define MCDI_EVENT_MUM_ASSERT 0x2
+/* enum: MUM not kicking watchdog */
+#define MCDI_EVENT_MUM_WATCHDOG 0x3
+#define MCDI_EVENT_MUM_ERR_DATA_LBN 8
+#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
#define MCDI_EVENT_SRC_LBN 36
@@ -416,6 +480,8 @@
#define MCDI_EVENT_EV_CODE_WIDTH 4
#define MCDI_EVENT_CODE_LBN 44
#define MCDI_EVENT_CODE_WIDTH 8
+/* enum: Event generated by host software */
+#define MCDI_EVENT_SW_EVENT 0x0
/* enum: Bad assert. */
#define MCDI_EVENT_CODE_BADSSERT 0x1
/* enum: PM Notice. */
@@ -470,6 +536,14 @@
#define MCDI_EVENT_CODE_MC_BIST 0x19
/* enum: PTP tick event providing current NIC time */
#define MCDI_EVENT_CODE_PTP_TIME 0x1a
+/* enum: MUM fault */
+#define MCDI_EVENT_CODE_MUM 0x1b
+/* enum: notify the designated PF of a new authorization request */
+#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c
+/* enum: notify a function that awaits an authorization that its request has
+ * been processed and it may now resend the command
+ */
+#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
@@ -537,6 +611,33 @@
/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC clock has ever been set
+ */
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC and System clocks are in sync
+ */
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of
+ * the minor value of the PTP clock
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
+/* Zero means that the request has been completed or authorized, and the driver
+ * should resend it. A non-zero value means that the authorization has been
+ * denied, and gives the reason. Typically it will be EPERM.
+ */
+#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
+#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
/* FCDI_EVENT structuredef */
#define FCDI_EVENT_LEN 8
@@ -581,6 +682,10 @@
#define FCDI_EVENT_CODE_PTP_TICK 0x7
/* enum: ECC error counters */
#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
+/* enum: Current status of PTP */
+#define FCDI_EVENT_CODE_PTP_STATUS 0x9
+/* enum: Port id config to map MC-FC port idx */
+#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
@@ -594,11 +699,24 @@
#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define FCDI_EVENT_PTP_STATE_OFST 0
+#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
+#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
+#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
+#define FCDI_EVENT_PTP_STATE_LBN 0
+#define FCDI_EVENT_PTP_STATE_WIDTH 32
#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+/* Index of MC port being referred to */
+#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
+#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
+/* FC Port index that matches the MC port index in SRC */
+#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
* to the MC. Note that this structure | is overlayed over a normal FCDI event
@@ -631,6 +749,90 @@
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
+/* MUM_EVENT structuredef */
+#define MUM_EVENT_LEN 8
+#define MUM_EVENT_CONT_LBN 32
+#define MUM_EVENT_CONT_WIDTH 1
+#define MUM_EVENT_LEVEL_LBN 33
+#define MUM_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MUM_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MUM_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MUM_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MUM_EVENT_LEVEL_FATAL 0x3
+#define MUM_EVENT_DATA_OFST 0
+#define MUM_EVENT_SENSOR_ID_LBN 0
+#define MUM_EVENT_SENSOR_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MUM_EVENT_SENSOR_STATE_LBN 8
+#define MUM_EVENT_SENSOR_STATE_WIDTH 8
+#define MUM_EVENT_PORT_PHY_READY_LBN 0
+#define MUM_EVENT_PORT_PHY_READY_WIDTH 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
+#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
+#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
+#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
+#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
+#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
+#define MUM_EVENT_DATA_LBN 0
+#define MUM_EVENT_DATA_WIDTH 32
+#define MUM_EVENT_SRC_LBN 36
+#define MUM_EVENT_SRC_WIDTH 8
+#define MUM_EVENT_EV_CODE_LBN 60
+#define MUM_EVENT_EV_CODE_WIDTH 4
+#define MUM_EVENT_CODE_LBN 44
+#define MUM_EVENT_CODE_WIDTH 8
+/* enum: The MUM was rebooted. */
+#define MUM_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define MUM_EVENT_CODE_ASSERT 0x2
+/* enum: Sensor failure. */
+#define MUM_EVENT_CODE_SENSOR 0x3
+/* enum: Link fault has been asserted, or has cleared. */
+#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
+#define MUM_EVENT_SENSOR_DATA_OFST 0
+#define MUM_EVENT_SENSOR_DATA_LBN 0
+#define MUM_EVENT_SENSOR_DATA_WIDTH 32
+#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
+#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
+#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
+#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
+#define MUM_EVENT_PORT_PHY_TECH_LBN 0
+#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
+
/***********************************/
/* MC_CMD_READ32
@@ -687,24 +889,34 @@
/* MC_CMD_COPYCODE_IN msgrequest */
#define MC_CMD_COPYCODE_IN_LEN 16
-/* Source address */
-#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
-/* enum: The main image should be entered via a copy of a single word from and
- * to this address when none of the other magic behaviours are required.
+/* Source address
+ *
+ * The main image should be entered via a copy of a single word from and to a
+ * magic address, which controls various aspects of the boot. The magic address
+ * is a bitfield, with each bit as documented below.
*/
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
-/* enum: Entering the main image via a copy of a single word from and to this
- * address indicates that it should not attempt to start the datapath CPUs.
- * This is useful for certain soft rebooting scenarios. (Huntington only)
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
*/
#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
-/* enum: Entering the main image via a copy of a single word from and to this
- * address indicates that it should not attempt to parse any configuration from
- * flash. (In addition, the datapath CPUs will not be started, as for
- * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for
- * certain soft rebooting scenarios. (Huntington only)
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
+ * below)
*/
#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
/* Destination address */
#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
@@ -795,6 +1007,10 @@
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
/* Failing thread address */
#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
@@ -802,7 +1018,8 @@
/***********************************/
/* MC_CMD_LOG_CTRL
- * Configure the output stream for various events and messages.
+ * Configure the output stream for log events such as link state changes,
+ * sensor notifications and MCDI completions
*/
#define MC_CMD_LOG_CTRL 0x7
@@ -816,6 +1033,7 @@
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
/* enum: Event queue. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
+/* Legacy argument. Must be zero. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
/* MC_CMD_LOG_CTRL_OUT msgresponse */
@@ -955,8 +1173,12 @@
* input on the same NIC.
*/
#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
+/* enum: Set the PTP sync status. Status is used by firmware to report to event
+ * subscribers.
+ */
+#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
/* enum: Above this for future use. */
-#define MC_CMD_PTP_OP_MAX 0x1b
+#define MC_CMD_PTP_OP_MAX 0x1c
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
@@ -1191,8 +1413,12 @@
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
-/* Event queue to send PTP time events to */
+/* Original field containing queue ID. Now extended to include flags. */
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1
/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
@@ -1214,6 +1440,23 @@
/* 1 to enable PPS test mode, 0 to disable and return result. */
#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* NIC - Host System Clock Synchronization status */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+/* enum: Host System clock and NIC clock are not in sync */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
+/* enum: Host System clock and NIC clock are synchronized */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1
+/* If synchronized, number of seconds until clocks should be considered to be
+ * no longer in sync.
+ */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -1375,7 +1618,7 @@
#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
-#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24
/* Time format required/used by for this NIC. Applies to all PTP MCDI
* operations that pass times between the host and firmware. If this operation
* is not supported (older firmware) a format of seconds and nanoseconds should
@@ -1396,6 +1639,13 @@
* end and start times minus the time that the MC waited for host end.
*/
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+/* Various PTP capabilities */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
@@ -1415,6 +1665,9 @@
/* Enum values, see field(s): */
/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
+
/***********************************/
/* MC_CMD_CSR_READ32
@@ -1915,6 +2168,14 @@
#define MC_CMD_FW_FULL_FEATURED 0x0
/* enum: Prefer to use firmware with fewer features but lower latency */
#define MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+#define MC_CMD_FW_PACKED_STREAM 0x2
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+#define MC_CMD_FW_HIGH_TX_RATE 0x3
+/* enum: Reserved value */
+#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4
/* enum: Only this option is allowed for non-admin functions */
#define MC_CMD_FW_DONT_CARE 0xffffffff
@@ -2481,6 +2742,12 @@
#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
/* enum: Near side of AOE Siena side port */
#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
+/* enum: Medford Wireside datapath loopback */
+#define MC_CMD_LOOPBACK_DATA_WS 0x24
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
@@ -2552,12 +2819,8 @@
#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
-/* enum: Flow control is off. */
-#define MC_CMD_FCNTL_OFF 0x0
-/* enum: Respond to flow control. */
-#define MC_CMD_FCNTL_RESPOND 0x1
-/* enum: Respond to and Issue flow control. */
-#define MC_CMD_FCNTL_BIDIR 0x2
+/* Enum values, see field(s): */
+/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
@@ -2632,7 +2895,7 @@
#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK
/* MC_CMD_SET_MAC_IN msgrequest */
-#define MC_CMD_SET_MAC_IN_LEN 24
+#define MC_CMD_SET_MAC_IN_LEN 28
/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
* EtherII, VLAN, bug16011 padding).
*/
@@ -2649,13 +2912,20 @@
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
/* enum: Flow control is off. */
-/* MC_CMD_FCNTL_OFF 0x0 */
+#define MC_CMD_FCNTL_OFF 0x0
/* enum: Respond to flow control. */
-/* MC_CMD_FCNTL_RESPOND 0x1 */
+#define MC_CMD_FCNTL_RESPOND 0x1
/* enum: Respond to and Issue flow control. */
-/* MC_CMD_FCNTL_BIDIR 0x2 */
+#define MC_CMD_FCNTL_BIDIR 0x2
/* enum: Auto neg flow control. */
#define MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control (eftest builds only). */
+#define MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define MC_CMD_FCNTL_GENERATE 0x5
+#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
/* MC_CMD_SET_MAC_OUT msgresponse */
#define MC_CMD_SET_MAC_OUT_LEN 0
@@ -2748,7 +3018,8 @@
* guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
* performed, and the statistics may be read from the message response. If
* DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
- * Locks required: None. Returns: 0, ETIME
+ * Locks required: None. The PERIODIC_CLEAR option is not used and now has no
+ * effect. Returns: 0, ETIME
*/
#define MC_CMD_MAC_STATS 0x2e
@@ -2791,6 +3062,7 @@
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
+#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
@@ -2890,8 +3162,8 @@
* PM_AND_RXDP_COUNTERS capability only.
*/
#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
-/* enum: RXDP counter: Number of times an emergency descriptor fetch was
- * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed.
+ * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
/* enum: RXDP counter: Number of times the DPCPU waited for an existing
@@ -3213,6 +3485,8 @@
#define MC_CMD_NVRAM_TYPE_LICENSE 0x12
/* enum: FC Log. */
#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13
+/* enum: Additional flash on FPGA. */
+#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14
/***********************************/
@@ -3407,6 +3681,8 @@
*/
#define MC_CMD_SCHEDINFO 0x3e
+#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SCHEDINFO_IN msgrequest */
#define MC_CMD_SCHEDINFO_IN_LEN 0
@@ -3593,6 +3869,68 @@
#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
/* enum: Hotpoint temperature: degC */
#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
+/* enum: Port 0 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e
+/* enum: Port 1 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f
+/* enum: Mop-up microcontroller reference voltage (millivolts) */
+#define MC_CMD_SENSOR_MUM_VCC 0x30
+/* enum: 0.9v power phase A voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_A 0x31
+/* enum: 0.9v power phase A current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_A 0x32
+/* enum: 0.9V voltage regulator phase A temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33
+/* enum: 0.9v power phase B voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_B 0x34
+/* enum: 0.9v power phase B current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_B 0x35
+/* enum: 0.9V voltage regulator phase B temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36
+/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37
+/* enum: CCOM AVREG 1v2 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38
+/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
+/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
+/* enum: controller internal temperature sensor voltage on master core
+ * (internal ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40
+/* enum: controller internal temperature on master core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41
+/* enum: controller internal temperature sensor voltage on master core
+ * (external ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42
+/* enum: controller internal temperature on master core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43
+/* enum: controller internal temperature on slave core sensor voltage (internal
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44
+/* enum: controller internal temperature on slave core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45
+/* enum: controller internal temperature on slave core sensor voltage (external
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46
+/* enum: controller internal temperature on slave core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47
+/* enum: Voltage supplied to the SODIMMs from their power supply: mV */
+#define MC_CMD_SENSOR_SODIMM_VOUT 0x49
+/* enum: Temperature of SODIMM 0 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a
+/* enum: Temperature of SODIMM 1 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b
+/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY0_VCC 0x4c
+/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY1_VCC 0x4d
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
@@ -3701,6 +4039,8 @@
#define MC_CMD_SENSOR_STATE_BROKEN 0x3
/* enum: Sensor is working but does not currently have a reading. */
#define MC_CMD_SENSOR_STATE_NO_READING 0x4
+/* enum: Sensor initialisation failed. */
+#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
@@ -3870,6 +4210,7 @@
/* MC_CMD_WORKAROUND_IN msgrequest */
#define MC_CMD_WORKAROUND_IN_LEN 8
+/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
/* enum: Bug 17230 work around. */
#define MC_CMD_WORKAROUND_BUG17230 0x1
@@ -3877,11 +4218,38 @@
#define MC_CMD_WORKAROUND_BUG35388 0x2
/* enum: Bug35017 workaround (A64 tables must be identity map) */
#define MC_CMD_WORKAROUND_BUG35017 0x3
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_WORKAROUND_BUG41750 0x4
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_WORKAROUND_BUG42008 0x5
+/* enum: Bug 26807 features present in firmware (multicast filter chaining)
+ * This feature cannot be turned on/off while there are any filters already
+ * present. The behaviour in such case depends on the acting client's privilege
+ * level. If the client has the admin privilege, then all functions that have
+ * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise
+ * the command will fail with MC_CMD_ERR_FILTERS_PRESENT.
+ */
+#define MC_CMD_WORKAROUND_BUG26807 0x6
+/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable
+ * the workaround
+ */
#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
/* MC_CMD_WORKAROUND_OUT msgresponse */
#define MC_CMD_WORKAROUND_OUT_LEN 0
+/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used
+ * when (TYPE == MC_CMD_WORKAROUND_BUG26807)
+ */
+#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
+
/***********************************/
/* MC_CMD_GET_PHY_MEDIA_INFO
@@ -4093,7 +4461,7 @@
/***********************************/
/* MC_CMD_GET_MAC_ADDRESSES
- * Returns the base MAC, count and stride for the requestiong function
+ * Returns the base MAC, count and stride for the requesting function
*/
#define MC_CMD_GET_MAC_ADDRESSES 0x55
@@ -4115,6 +4483,527 @@
/* Spacing of allocated MAC addresses */
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+
+/***********************************/
+/* MC_CMD_CLP
+ * Perform a CLP related operation
+ */
+#define MC_CMD_CLP 0x56
+
+#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CLP_IN msgrequest */
+#define MC_CMD_CLP_IN_LEN 4
+/* Sub operation */
+#define MC_CMD_CLP_IN_OP_OFST 0
+/* enum: Return to factory default settings */
+#define MC_CMD_CLP_OP_DEFAULT 0x1
+/* enum: Set MAC address */
+#define MC_CMD_CLP_OP_SET_MAC 0x2
+/* enum: Get MAC address */
+#define MC_CMD_CLP_OP_GET_MAC 0x3
+/* enum: Set UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_SET_BOOT 0x4
+/* enum: Get UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_GET_BOOT 0x5
+
+/* MC_CMD_CLP_OUT msgresponse */
+#define MC_CMD_CLP_OUT_LEN 0
+
+/* MC_CMD_CLP_IN_DEFAULT msgrequest */
+#define MC_CMD_CLP_IN_DEFAULT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
+#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC msgrequest */
+#define MC_CMD_CLP_IN_SET_MAC_LEN 12
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MAC address assigned to port */
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_SET_MAC_LEN 0
+
+/* MC_CMD_CLP_IN_GET_MAC msgrequest */
+#define MC_CMD_CLP_IN_GET_MAC_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
+/* MAC address assigned to port */
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* Boot flag */
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
+
+/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0
+
+/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
+/* Boot flag */
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
+
+
+/***********************************/
+/* MC_CMD_MUM
+ * Perform a MUM operation
+ */
+#define MC_CMD_MUM 0x57
+
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MUM_IN msgrequest */
+#define MC_CMD_MUM_IN_LEN 4
+#define MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define MC_CMD_MUM_IN_OP_LBN 0
+#define MC_CMD_MUM_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to MUM */
+#define MC_CMD_MUM_OP_NULL 0x1
+/* enum: Get MUM version */
+#define MC_CMD_MUM_OP_GET_VERSION 0x2
+/* enum: Issue raw I2C command to MUM */
+#define MC_CMD_MUM_OP_RAW_CMD 0x3
+/* enum: Read from registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_READ 0x4
+/* enum: Write to registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_WRITE 0x5
+/* enum: Control UART logging. */
+#define MC_CMD_MUM_OP_LOG 0x6
+/* enum: Operations on MUM GPIO lines */
+#define MC_CMD_MUM_OP_GPIO 0x7
+/* enum: Get sensor readings from MUM */
+#define MC_CMD_MUM_OP_READ_SENSORS 0x8
+/* enum: Initiate clock programming on the MUM */
+#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
+/* enum: Initiate FPGA load from flash on the MUM */
+#define MC_CMD_MUM_OP_FPGA_LOAD 0xa
+/* enum: Request sensor reading from MUM ADC resulting from earlier request via
+ * MUM ATB
+ */
+#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
+/* enum: Send commands relating to the QSFP ports via the MUM for PHY
+ * operations
+ */
+#define MC_CMD_MUM_OP_QSFP 0xc
+
+/* MC_CMD_MUM_IN_NULL msgrequest */
+#define MC_CMD_MUM_IN_NULL_LEN 4
+/* MUM cmd header */
+#define MC_CMD_MUM_IN_CMD_OFST 0
+
+/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
+#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_READ_LEN 16
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to read from registers of */
+#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE 0x1
+/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
+/* 32-bit address to read from */
+#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
+/* Number of words to read. */
+#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+
+/* MC_CMD_MUM_IN_WRITE msgrequest */
+#define MC_CMD_MUM_IN_WRITE_LENMIN 16
+#define MC_CMD_MUM_IN_WRITE_LENMAX 252
+#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to write to registers of */
+#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+/* MC_CMD_MUM_DEV_HITTITE 0x1 */
+/* 32-bit address to write to */
+#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+/* Words to write */
+#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
+#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
+#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MUM I2C cmd code */
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+/* Number of bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+/* Number of bytes to read */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+/* Bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
+
+/* MC_CMD_MUM_IN_LOG msgrequest */
+#define MC_CMD_MUM_IN_LOG_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
+
+/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
+#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* Enable/disable debug output to UART */
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+
+/* MC_CMD_MUM_IN_GPIO msgrequest */
+#define MC_CMD_MUM_IN_GPIO_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
+
+/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
+#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
+
+/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Bit-mask of clocks to be programmed */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
+/* Control flags for clock programming */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+
+/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
+#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Enable/Disable FPGA config from flash */
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+
+/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
+#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_QSFP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
+#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
+#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
+#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+
+/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+
+/* MC_CMD_MUM_OUT msgresponse */
+#define MC_CMD_MUM_OUT_LEN 0
+
+/* MC_CMD_MUM_OUT_NULL msgresponse */
+#define MC_CMD_MUM_OUT_NULL_LEN 0
+
+/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
+#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
+/* returned data */
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
+
+/* MC_CMD_MUM_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_READ_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
+#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_MUM_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG msgresponse */
+#define MC_CMD_MUM_OUT_LOG_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
+#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
+/* The first 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
+/* The first 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
+
+/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+
+/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
+#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
+
+/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
+
+/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+
+/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+
/* MC_CMD_RESOURCE_SPECIFIER enum */
/* enum: Any */
#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
@@ -4203,6 +5092,30 @@
#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+/* enum: Primary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA 0xb00
+/* enum: Secondary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01
+/* enum: FC firmware partition */
+#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02
+/* enum: FC License partition */
+#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
+/* enum: Non-volatile log output partition for FC */
+#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
+/* enum: MUM firmware partition */
+#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+/* enum: MUM Non-volatile log output partition. */
+#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
+/* enum: MUM Application table partition. */
+#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
+/* enum: MUM boot rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03
+/* enum: MUM production signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04
+/* enum: MUM user signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
+/* enum: MUM fuses and lockbits partition. */
+#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
@@ -4218,66 +5131,69 @@
#define LICENSED_APP_ID_LEN 4
#define LICENSED_APP_ID_ID_OFST 0
/* enum: OpenOnload */
-#define LICENSED_APP_ID_ONLOAD 0x1
+#define LICENSED_APP_ID_ONLOAD 0x1
/* enum: PTP timestamping */
-#define LICENSED_APP_ID_PTP 0x2
+#define LICENSED_APP_ID_PTP 0x2
/* enum: SolarCapture Pro */
-#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+/* enum: SolarSecure filter engine */
+#define LICENSED_APP_ID_SOLARSECURE 0x8
+/* enum: Performance monitor */
+#define LICENSED_APP_ID_PERF_MONITOR 0x10
+/* enum: SolarCapture Live */
+#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20
+/* enum: Capture SolarSystem */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40
+/* enum: Network Access Control */
+#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
-
-/***********************************/
-/* MC_CMD_GET_WORKAROUNDS
- * Read the list of all implemented and all currently enabled workarounds. The
- * enums here must correspond with those in MC_CMD_WORKAROUND.
- */
-#define MC_CMD_GET_WORKAROUNDS 0x59
-
-/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
-#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
-/* Each workaround is represented by a single bit according to the enums below.
- */
-#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
-#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
-/* enum: Bug 17230 work around. */
-#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
-/* enum: Bug 35388 work around (unsafe EVQ writes). */
-#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
-/* enum: Bug35017 workaround (A64 tables must be identity map) */
-#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
-
-
-/***********************************/
-/* MC_CMD_LINK_STATE_MODE
- * Read/set link state mode of a VF
- */
-#define MC_CMD_LINK_STATE_MODE 0x5c
-
-#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
-#define MC_CMD_LINK_STATE_MODE_IN_LEN 8
-/* The target function to have its link state mode read or set, must be a VF
- * e.g. VF 1,3 = 0x00030001
- */
-#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
-#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
-#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
-#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
-#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
-/* New link state mode to be set */
-#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
-#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
-#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
-#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
-/* enum: Use this value to just read the existing setting without modifying it.
- */
-#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
-
-/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
-#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
-#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+/* TX_TIMESTAMP_EVENT structuredef */
+#define TX_TIMESTAMP_EVENT_LEN 6
+/* lower 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16
+/* Type of TX event, ordinary TX completion, low or high part of TX timestamp
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
+/* enum: This is a TX completion event, not a timestamp */
+#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+/* enum: This is the low part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
+/* enum: This is the high part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8
+/* upper 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16
+
+/* RSS_MODE structuredef */
+#define RSS_MODE_LEN 1
+/* The RSS mode for a particular packet type is a value from 0 - 15 which can
+ * be considered as 4 bits selecting which fields are included in the hash. (A
+ * value 0 effectively disables RSS spreading for the packet type.) The YAML
+ * generation tools require this structure to be a whole number of bytes wide,
+ * but only 4 bits are relevant.
+ */
+#define RSS_MODE_HASH_SELECTOR_OFST 0
+#define RSS_MODE_HASH_SELECTOR_LEN 1
+#define RSS_MODE_HASH_SRC_ADDR_LBN 0
+#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1
+#define RSS_MODE_HASH_DST_ADDR_LBN 1
+#define RSS_MODE_HASH_DST_ADDR_WIDTH 1
+#define RSS_MODE_HASH_SRC_PORT_LBN 2
+#define RSS_MODE_HASH_SRC_PORT_WIDTH 1
+#define RSS_MODE_HASH_DST_PORT_LBN 3
+#define RSS_MODE_HASH_DST_PORT_WIDTH 1
+#define RSS_MODE_HASH_SELECTOR_LBN 0
+#define RSS_MODE_HASH_SELECTOR_WIDTH 8
/***********************************/
@@ -4413,7 +5329,9 @@
#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-/* MC_CMD_INIT_RXQ_IN msgrequest */
+/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version
+ * in new code.
+ */
#define MC_CMD_INIT_RXQ_IN_LENMIN 36
#define MC_CMD_INIT_RXQ_IN_LENMAX 252
#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
@@ -4456,9 +5374,73 @@
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+
/* MC_CMD_INIT_RXQ_OUT msgresponse */
#define MC_CMD_INIT_RXQ_OUT_LEN 0
+/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0
+
/***********************************/
/* MC_CMD_INIT_TXQ
@@ -4467,7 +5449,9 @@
#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-/* MC_CMD_INIT_TXQ_IN msgrequest */
+/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version
+ * in new code.
+ */
#define MC_CMD_INIT_TXQ_IN_LENMIN 36
#define MC_CMD_INIT_TXQ_IN_LENMAX 252
#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
@@ -4499,6 +5483,10 @@
#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -4511,6 +5499,60 @@
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+/* Flags related to Qbb flow control mode. */
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3
+
/* MC_CMD_INIT_TXQ_OUT msgresponse */
#define MC_CMD_INIT_TXQ_OUT_LEN 0
@@ -4617,6 +5659,132 @@
/* MC_CMD_PROXY_CMD_OUT msgresponse */
#define MC_CMD_PROXY_CMD_OUT_LEN 0
+/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
+ * manage proxied requests
+ */
+#define MC_PROXY_STATUS_BUFFER_LEN 16
+/* Handle allocated by the firmware for this proxy transaction */
+#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+/* enum: An invalid handle. */
+#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
+/* The requesting physical function number */
+#define MC_PROXY_STATUS_BUFFER_PF_OFST 4
+#define MC_PROXY_STATUS_BUFFER_PF_LEN 2
+#define MC_PROXY_STATUS_BUFFER_PF_LBN 32
+#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
+/* The requesting virtual function number. Set to VF_NULL if the target is a
+ * PF.
+ */
+#define MC_PROXY_STATUS_BUFFER_VF_OFST 6
+#define MC_PROXY_STATUS_BUFFER_VF_LEN 2
+#define MC_PROXY_STATUS_BUFFER_VF_LBN 48
+#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
+/* The target function RID. */
+#define MC_PROXY_STATUS_BUFFER_RID_OFST 8
+#define MC_PROXY_STATUS_BUFFER_RID_LEN 2
+#define MC_PROXY_STATUS_BUFFER_RID_LBN 64
+#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
+/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
+#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
+#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
+#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
+#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
+/* If a request is authorized rather than carried out by the host, this is the
+ * elevated privilege mask granted to the requesting function.
+ */
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PROXY_CONFIGURE
+ * Enable/disable authorization of MCDI requests from unprivileged functions by
+ * a designated admin function
+ */
+#define MC_CMD_PROXY_CONFIGURE 0x58
+
+#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
+#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
+#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
+#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
+#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
+
+/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
+#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_COMPLETE
+ * Tells FW that a requested proxy operation has either been completed (by
+ * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
+ * function that enabled proxying/authorization (by using
+ * MC_CMD_PROXY_CONFIGURE).
+ */
+#define MC_CMD_PROXY_COMPLETE 0x5f
+
+#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
+#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
+#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
+ * is stored in the REPLY_BUFF.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
+/* enum: The operation has been authorized. The originating function may now
+ * try again.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
+/* enum: The operation has been declined. */
+#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
+/* enum: The authorization failed because the relevant application did not
+ * respond in time.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
+#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+
+/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
+#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
+
/***********************************/
/* MC_CMD_ALLOC_BUFTBL_CHUNK
@@ -4688,6 +5856,44 @@
/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
+/* PORT_CONFIG_ENTRY structuredef */
+#define PORT_CONFIG_ENTRY_LEN 16
+/* External port number (label) */
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
+/* Port core location */
+#define PORT_CONFIG_ENTRY_CORE_OFST 1
+#define PORT_CONFIG_ENTRY_CORE_LEN 1
+#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */
+#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */
+#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */
+#define PORT_CONFIG_ENTRY_CORE_LBN 8
+#define PORT_CONFIG_ENTRY_CORE_WIDTH 8
+/* Internal number (HW resource) relative to the core */
+#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
+#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
+#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
+#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
+/* Reserved */
+#define PORT_CONFIG_ENTRY_RSVD_OFST 3
+#define PORT_CONFIG_ENTRY_RSVD_LEN 1
+#define PORT_CONFIG_ENTRY_RSVD_LBN 24
+#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8
+/* Bitmask of KR lanes used by the port */
+#define PORT_CONFIG_ENTRY_LANES_OFST 4
+#define PORT_CONFIG_ENTRY_LANES_LBN 32
+#define PORT_CONFIG_ENTRY_LANES_WIDTH 32
+/* Port capabilities (MC_CMD_PHY_CAP_*) */
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
+/* Reserved (align to 16 bytes) */
+#define PORT_CONFIG_ENTRY_RSVD2_OFST 12
+#define PORT_CONFIG_ENTRY_RSVD2_LBN 96
+#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
+
/***********************************/
/* MC_CMD_FILTER_OP
@@ -4759,9 +5965,9 @@
#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
/* enum: receive to MC */
#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
-/* enum: loop back to port 0 TX MAC */
+/* enum: loop back to TXDP 0 */
#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
-/* enum: loop back to port 1 TX MAC */
+/* enum: loop back to TXDP 1 */
#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
/* receive queue handle (for multiple queue modes, this is the base queue) */
#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
@@ -4778,9 +5984,7 @@
#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
* RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
- * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered
- * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be
- * a valid handle.
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
*/
#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
/* transmit domain (reserved; set to 0) */
@@ -4835,6 +6039,235 @@
#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
+/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to
+ * include handling of VXLAN/NVGRE encapsulated frame filtering (which is
+ * supported on Medford only).
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_LEN 172
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0
+/* enum: Match Geneve traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1
+/* enum: Reserved for experimental development use */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
+
/* MC_CMD_FILTER_OP_OUT msgresponse */
#define MC_CMD_FILTER_OP_OUT_LEN 12
/* identifies the type of operation requested */
@@ -4849,6 +6282,27 @@
#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+/* enum: guaranteed invalid filter handle (low 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
+/* enum: guaranteed invalid filter handle (high 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff
+
+/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */
+#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_EXT_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_OUT/HANDLE */
/***********************************/
@@ -4865,6 +6319,10 @@
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
/* enum: read the list of supported RX filter matches */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+/* enum: read flags indicating restrictions on filter insertion for the calling
+ * client
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -4884,6 +6342,17 @@
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
+/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* bitfield of filter insertion restrictions */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+
/***********************************/
/* MC_CMD_PARSER_DISP_RW
@@ -4901,8 +6370,10 @@
#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
/* enum: TX dispatcher CPU */
#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
-/* enum: Lookup engine */
+/* enum: Lookup engine (with original metadata format) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
+/* enum: Lookup engine (with requested metadata format) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
/* identifies the type of operation requested */
#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
/* enum: read a word of DICPU DMEM or a LUE entry */
@@ -4919,6 +6390,8 @@
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
/* value to write (for LUE writes) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
@@ -5019,7 +6492,9 @@
/* The maximum number of VIs that would be useful */
#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
-/* MC_CMD_ALLOC_VIS_OUT msgresponse */
+/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
+ * Use extended version in new code.
+ */
#define MC_CMD_ALLOC_VIS_OUT_LEN 8
/* The number of VIs allocated on this function */
#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
@@ -5028,6 +6503,17 @@
*/
#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+
/***********************************/
/* MC_CMD_FREE_VIS
@@ -5114,13 +6600,15 @@
#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
/* The number of VIs allocated on this function */
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
/***********************************/
@@ -5575,6 +7063,7 @@
#define MC_CMD_GET_CAPABILITIES 0xbe
#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
@@ -5582,6 +7071,20 @@
#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
@@ -5600,8 +7103,14 @@
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1
/* RxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
@@ -5609,6 +7118,10 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
/* enum: Low latency RXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
/* enum: RXDP Test firmware image 2 */
@@ -5632,6 +7145,10 @@
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
/* enum: Low latency TXDP firmware */
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
/* enum: TXDP Test firmware image 2 */
@@ -5642,22 +7159,69 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
/* Licensed capabilities */
@@ -5735,6 +7299,15 @@
/* the rate in mbps */
#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+/* the desired maximum fill level */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+
/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
@@ -5753,8 +7326,14 @@
#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
-/* bitmask of the priority queues this txq is inserted into */
+/* bitmask of the priority queues this txq is inserted into when inserted. */
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
/* the reaction point (RP) bucket */
#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
/* an already reserved bucket (typically set to bucket associated with outer
@@ -5768,6 +7347,35 @@
/* the min bucket (typically for ETS/minimum bandwidth) */
#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+
/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
@@ -5826,13 +7434,23 @@
#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
/* enum: VEB */
#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
-/* enum: VEPA */
+/* enum: VEPA (obsolete) */
#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+/* enum: MUX */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4
+/* enum: Snapper specific; semantics TBD */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
/* Flags controlling v-port creation */
#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
-/* The number of VLAN tags to support. */
+/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
+ * this must be one or greated, and the attached v-ports must have exactly this
+ * number of tags. For other v-switch types, this must be zero of greater, and
+ * is an upper limit on the number of VLAN tags for attached v-ports. An error
+ * will be returned if existing configuration means we can't support attached
+ * v-ports with this number of tags.
+ */
#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
@@ -5892,7 +7510,10 @@
#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
-/* The number of VLAN tags to insert/remove. */
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
@@ -6136,8 +7757,13 @@
/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
-/* The handle of the new RSS context */
+/* The handle of the new RSS context. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+/* enum: guaranteed invalid RSS context handle value */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
/***********************************/
@@ -6249,7 +7875,11 @@
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
-/* Hash control flags */
+/* Hash control flags. The _EN bits are always supported. The _MODE bits only
+ * work when the firmware reports ADDITIONAL_RSS_MODES in
+ * MC_CMD_GET_CAPABILITIES and override the _EN bits if any of them are not 0.
+ * See the RSS_MODE structure for the meaning of the mode bits.
+ */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
@@ -6259,6 +7889,20 @@
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4
/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
@@ -6279,7 +7923,12 @@
/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
-/* Hash control flags */
+/* Hash control flags. If any _MODE bits are non-zero (which will only be true
+ * when the firmware reports ADDITIONAL_RSS_MODES) then the _EN bits should be
+ * disregarded (but are guaranteed to be consistent with the _MODE bits if
+ * RSS_CONTEXT_SET_FLAGS has never been called for this context since it was
+ * allocated).
+ */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
@@ -6289,6 +7938,20 @@
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4
/***********************************/
@@ -6311,8 +7974,13 @@
/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
-/* The handle of the new .1p mapping */
+/* The handle of the new .1p mapping. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+/* enum: guaranteed invalid .1p mapping handle value */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
/***********************************/
@@ -6421,375 +8089,6 @@
/***********************************/
-/* MC_CMD_RMON_RX_CLASS_STATS
- * Retrieve rmon rx class statistics
- */
-#define MC_CMD_RMON_RX_CLASS_STATS 0xc3
-
-/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_CLASS_STATS
- * Retrieve rmon tx class statistics
- */
-#define MC_CMD_RMON_TX_CLASS_STATS 0xc4
-
-/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS
- * Retrieve rmon rx super_class statistics
- */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS
- * Retrieve rmon tx super_class statistics
- */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6
-
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7
-
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8
-
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9
-
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_ALLOC_CLASS
- * Allocate an rmon class
- */
-#define MC_CMD_RMON_ALLOC_CLASS 0xca
-
-/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */
-#define MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0
-
-/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4
-/* class */
-#define MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_DEALLOC_CLASS
- * Deallocate an rmon class
- */
-#define MC_CMD_RMON_DEALLOC_CLASS 0xcb
-
-/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */
-#define MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4
-/* class */
-#define MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0
-
-/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS
- * Allocate an rmon super_class
- */
-#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc
-
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */
-#define MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0
-
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4
-/* super_class */
-#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS
- * Deallocate an rmon tx super_class
- */
-#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd
-
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */
-#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4
-/* super_class */
-#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0
-
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */
-#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_UP_CONV_STATS
- * Retrieve up converter statistics
- */
-#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce
-
-/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2
-#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPI_STATS
- * Retrieve rx ipi stats
- */
-#define MC_CMD_RMON_RX_IPI_STATS 0xcf
-
-/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0
-#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5
-#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5
-#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS
- * Retrieve rx ipsec cntxt_ptr indexed stats
- */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0
-
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS
- * Retrieve rx ipsec port indexed stats
- */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1
-
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS
- * Retrieve tx ipsec overflow
- */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2
-
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
/* MC_CMD_VPORT_ADD_MAC_ADDRESS
* Add a MAC address to a v-port
*/
@@ -6877,7 +8176,7 @@
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
-/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */
+/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
@@ -6921,354 +8220,6 @@
/***********************************/
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS
- * Retrieve rx class drop stats
- */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3
-
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS
- * Retrieve rx super class drop stats
- */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_ERRORS_STATS
- * Retrieve rxdp errors
- */
-#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5
-
-/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11
-#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_OVERFLOW_STATS
- * Retrieve rxdp overflow
- */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6
-
-/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPI_STATS
- * Retrieve tx ipi stats
- */
-#define MC_CMD_RMON_TX_IPI_STATS 0xd7
-
-/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0
-#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5
-#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5
-#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS
- * Retrieve tx ipsec counters by cntxt_ptr
- */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8
-
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS
- * Retrieve tx ipsec counters by port
- */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9
-
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS
- * Retrieve tx ipsec overflow
- */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda
-
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_NOWHERE_STATS
- * Retrieve tx nowhere stats
- */
-#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb
-
-/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS
- * Retrieve tx nowhere qbb stats
- */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc
-
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_ERRORS_STATS
- * Retrieve rxdp errors
- */
-#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd
-
-/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11
-#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_OVERFLOW_STATS
- * Retrieve rxdp overflow
- */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde
-
-/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4
-/* flags */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_COLLECT_CLASS_STATS
- * Explicitly collect class stats at the specified evb port
- */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf
-
-/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4
-/* The port id associated with the vport/pport at which to collect class stats
- */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0
-
-/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4
-/* class */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS
- * Explicitly collect class stats at the specified evb port
- */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0
-
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4
-/* The port id associated with the vport/pport at which to collect class stats
- */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0
-
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4
-/* super_class */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0
-
-
-/***********************************/
/* MC_CMD_GET_CLOCK
* Return the system and PDCPU clock frequencies.
*/
@@ -7296,22 +8247,66 @@
#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
/* MC_CMD_SET_CLOCK_IN msgrequest */
-#define MC_CMD_SET_CLOCK_IN_LEN 12
-/* Requested system frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_LEN 28
+/* Requested frequency in MHz for system clock domain */
#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
-/* Requested inter-core frequency in MHz; 0 leaves unchanged. */
+/* enum: Leave the system clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for inter-core clock domain */
#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
-/* Request DPCPU frequency in MHz; 0 leaves unchanged. */
+/* enum: Leave the inter-core clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for DPCPU clock domain */
#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+/* enum: Leave the DPCPU clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for PCS clock domain */
+#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+/* enum: Leave the PCS clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for MC clock domain */
+#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+/* enum: Leave the MC clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for rmon clock domain */
+#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+/* enum: Leave the rmon clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for vswitch clock domain */
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+/* enum: Leave the vswitch clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
/* MC_CMD_SET_CLOCK_OUT msgresponse */
-#define MC_CMD_SET_CLOCK_OUT_LEN 12
+#define MC_CMD_SET_CLOCK_OUT_LEN 28
/* Resulting system frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* enum: The system clock domain doesn't exist */
+#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
/* Resulting inter-core frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+/* enum: The inter-core clock domain doesn't exist / isn't used */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
/* Resulting DPCPU frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+/* enum: The dpcpu clock domain doesn't exist */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
+/* Resulting PCS frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+/* enum: The PCS clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
+/* Resulting MC frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+/* enum: The MC clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
+/* Resulting rmon frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+/* enum: The rmon clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
+/* Resulting vswitch frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+/* enum: The vswitch clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
/***********************************/
@@ -7325,12 +8320,22 @@
/* MC_CMD_DPCPU_RPC_IN msgrequest */
#define MC_CMD_DPCPU_RPC_IN_LEN 36
#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
-/* enum: RxDPCPU */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x0
+/* enum: RxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
/* enum: TxDPCPU0 */
#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
/* enum: TxDPCPU1 */
#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
+/* enum: RxDPCPU1 (Medford only) */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
+/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_RX0)
+ */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
+/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_TX0)
+ */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
* initialised to zero
*/
@@ -7418,6 +8423,25 @@
/***********************************/
+/* MC_CMD_SHMBOOT_OP
+ * Special operations to support (for now) shmboot.
+ */
+#define MC_CMD_SHMBOOT_OP 0xe6
+
+#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SHMBOOT_OP_IN msgrequest */
+#define MC_CMD_SHMBOOT_OP_IN_LEN 4
+/* Identifies the operation to perform */
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+/* enum: Copy slave_data section to the slave core. (Greenport only) */
+#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
+
+/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
+#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
+
+
+/***********************************/
/* MC_CMD_CAP_BLK_READ
* Read multiple 64bit words from capture block memory
*/
@@ -7730,6 +8754,8 @@
* more data is returned.
*/
#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
+/* enum: Read Figure Of Merit (eye quality, higher is better). */
+#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7
/* Align the arguments to 32 bits */
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
@@ -7762,20 +8788,32 @@
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: Attenuation (0-15) */
+/* enum: Attenuation (0-15, TBD for Medford) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
-/* enum: CTLE Boost (0-15) */
+/* enum: CTLE Boost (0-15, TBD for Medford) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
-/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive, TBD
+ * for Medford)
+ */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
-/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
-/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
-/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
-/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
+/* enum: Edge DFE DLEV (TBD for Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
@@ -7865,6 +8903,8 @@
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
/* enum: TX Slew Rate Fine control */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
+/* enum: TX Termination Impedance control */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
@@ -7955,6 +8995,20 @@
#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+
/***********************************/
/* MC_CMD_PCIE_TUNE
@@ -8224,6 +9278,8 @@
#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
/* enum: validate application */
#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+/* enum: mask application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
/* arguments specific to this particular operation */
#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
@@ -8258,10 +9314,22 @@
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+/* flag */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+
+/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
+
/***********************************/
/* MC_CMD_SET_PORT_SNIFF_CONFIG
- * Configure port sniffing for the physical port associated with the calling
+ * Configure RX port sniffing for the physical port associated with the calling
* function. Only a privileged function may change the port sniffing
* configuration. A copy of all traffic delivered to the host (non-promiscuous
* mode) or all traffic arriving at the port (promiscuous mode) may be
@@ -8299,7 +9367,7 @@
/***********************************/
/* MC_CMD_GET_PORT_SNIFF_CONFIG
- * Obtain the current port sniffing configuration for the physical port
+ * Obtain the current RX port sniffing configuration for the physical port
* associated with the calling function. Only a privileged function may read
* the configuration.
*/
@@ -8330,4 +9398,673 @@
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+/***********************************/
+/* MC_CMD_SET_PARSER_DISP_CONFIG
+ * Change configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9
+
+#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
+/* the type of configuration setting to change */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
+ * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0
+/* enum: Per-v-adaptor enable for suppression of self-transmissions on the
+ * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single
+ * boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1
+/* handle for the entity to update: queue handle, EVB port ID, etc. depending
+ * on the type of configuration setting being changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+/* new value: the details depend on the type of configuration setting being
+ * changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_CONFIG
+ * Read configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
+
+#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
+/* the type of configuration setting to read */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
+/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
+ * the type of configuration setting being read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
+/* current value: the details depend on the type of configuration setting being
+ * read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
+ * Configure TX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic transmitted through the port may be
+ * delivered to a specific queue, or a set of queues with RSS. Note that these
+ * packets are delivered with transmit timestamps in the packet prefix, not
+ * receive timestamps, so it is likely that the queue(s) will need to be
+ * dedicated as TX sniff receivers.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
+
+#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
+ * Obtain the current TX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
+
+#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_RMON_STATS_RX_ERRORS
+ * Per queue rx error stats.
+ */
+#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
+
+#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
+/* The rx queue to get stats for. */
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_GET_PCIE_RESOURCE_INFO
+ * Find out about available PCIE resources
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
+/* The maximum number of PFs the device can expose */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+/* The maximum number of VFs the device can expose in total */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+/* The maximum number of MSI-X vectors the device can provide in total */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+/* the number of MSI-X vectors the device will allocate by default to each PF
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+/* the number of MSI-X vectors the device will allocate by default to each VF
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+/* the maximum number of MSI-X vectors the device can allocate to any one PF */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+/* the maximum number of MSI-X vectors the device can allocate to any one VF */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_MODES
+ * Find out about available port modes
+ */
+#define MC_CMD_GET_PORT_MODES 0xff
+
+#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_MODES_IN msgrequest */
+#define MC_CMD_GET_PORT_MODES_IN_LEN 0
+
+/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
+#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+/* Default (canonical) board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+/* Current board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+
+
+/***********************************/
+/* MC_CMD_READ_ATB
+ * Sample voltages on the ATB
+ */
+#define MC_CMD_READ_ATB 0x100
+
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_ATB_IN msgrequest */
+#define MC_CMD_READ_ATB_IN_LEN 16
+#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
+#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+
+/* MC_CMD_READ_ATB_OUT msgresponse */
+#define MC_CMD_READ_ATB_OUT_LEN 4
+#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+
+#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+/* enum: Bug 17230 work around. */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20
+/* enum: Bug 26807 features present in firmware (multicast filter chaining) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MASK
+ * Read/set privileges of an arbitrary PCIe function
+ */
+#define MC_CMD_PRIVILEGE_MASK 0x5a
+
+#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8
+/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF
+ * 1,3 = 0x00030001
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */
+/* New privilege mask to be set. The mask will only be changed if the MSB is
+ * set to 1.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
+/* enum: Set this bit to indicate that a new privilege mask is to be set,
+ * otherwise the command will only read the existing mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000
+
+/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
+/* For an admin function, always all the privileges are reported. */
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_SNAPSHOT_LENGTH
+ * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * parameter to MC_CMD_INIT_RXQ.
+ */
+#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
+
+#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
+/* Minimum acceptable snapshot length. */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+/* Maximum acceptable snapshot length. */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+
+
+/***********************************/
+/* MC_CMD_FUSE_DIAGS
+ * Additional fuse diagnostics
+ */
+#define MC_CMD_FUSE_DIAGS 0x102
+
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_FUSE_DIAGS_IN msgrequest */
+#define MC_CMD_FUSE_DIAGS_IN_LEN 0
+
+/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
+#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
+/* Total number of mismatched bits between pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+/* Checksum of data after logical OR of pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+/* Total number of mismatched bits between pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+/* Checksum of data after logical OR of pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+/* Total number of mismatched bits between pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+/* Checksum of data after logical OR of pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MODIFY
+ * Modify the privileges of a set of PCIe functions. Note that this operation
+ * only effects non-admin functions unless the admin privilege itself is
+ * included in one of the masks provided.
+ */
+#define MC_CMD_PRIVILEGE_MODIFY 0x60
+
+#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
+/* The groups of functions to have their privilege masks modified. */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
+/* For VFS_OF_PF specify the PF, for ONE specify the target function */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
+/* Privileges to be added to the target functions. For privilege definitions
+ * refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+/* Privileges to be removed from the target functions. For privilege
+ * definitions refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+
+/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_BYTES
+ * Read XPM memory
+ */
+#define MC_CMD_XPM_READ_BYTES 0x103
+
+#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
+#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
+#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
+#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
+#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
+/* Data */
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_BYTES
+ * Write XPM memory
+ */
+#define MC_CMD_XPM_WRITE_BYTES 0x104
+
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
+#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
+#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
+#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
+/* Start address (byte) */
+#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+/* Data */
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
+
+/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_SECTOR
+ * Read XPM sector
+ */
+#define MC_CMD_XPM_READ_SECTOR 0x105
+
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
+/* Sector index */
+#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+/* Sector size */
+#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+
+/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
+#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
+/* Sector type */
+#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
+/* Sector data */
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_SECTOR
+ * Write XPM sector
+ */
+#define MC_CMD_XPM_WRITE_SECTOR 0x106
+
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
+/* If writing fails due to an uncorrectable error, try up to RETRIES following
+ * sectors (or until no more space available). If 0, only one write attempt is
+ * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
+ * mechanism.
+ */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
+/* Sector type */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
+/* Sector size */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+/* Sector data */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
+
+/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
+/* New sector index */
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+
+
+/***********************************/
+/* MC_CMD_XPM_INVALIDATE_SECTOR
+ * Invalidate XPM sector
+ */
+#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
+
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
+/* Sector index */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_BLANK_CHECK
+ * Blank-check XPM memory and report bad locations
+ */
+#define MC_CMD_XPM_BLANK_CHECK 0x108
+
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
+#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
+/* Total number of bad (non-blank) locations */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
+ * into MCDI response)
+ */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
+
+
+/***********************************/
+/* MC_CMD_XPM_REPAIR
+ * Blank-check and repair XPM memory
+ */
+#define MC_CMD_XPM_REPAIR 0x109
+
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_REPAIR_IN msgrequest */
+#define MC_CMD_XPM_REPAIR_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_REPAIR_OUT msgresponse */
+#define MC_CMD_XPM_REPAIR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_DECODER_TEST
+ * Test XPM memory address decoders for gross manufacturing defects. Can only
+ * be performed on an unprogrammed part.
+ */
+#define MC_CMD_XPM_DECODER_TEST 0x10a
+
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
+#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
+#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_TEST
+ * XPM memory write test. Test XPM write logic for gross manufacturing defects
+ * by writing to a dedicated test row. There are 16 locations in the test row
+ * and the test can only be performed on locations that have not been
+ * previously used (i.e. can be run at most 16 times). The test will pick the
+ * first available location to use, or fail with ENOSPC if none left.
+ */
+#define MC_CMD_XPM_WRITE_TEST 0x10b
+
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
+#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
+
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 47d1e3a96522..a8ddd122f685 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -219,6 +219,7 @@ struct efx_tx_buffer {
* @tso_packets: Number of packets via the TSO xmit path
* @pushes: Number of times the TX push feature has been used
* @pio_packets: Number of times the TX PIO feature has been used
+ * @xmit_more_available: Are any packets waiting to be pushed to the NIC
* @empty_read_count: If the completion path has seen the queue as empty
* and the transmission path has not yet checked this, the value of
* @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
@@ -253,6 +254,7 @@ struct efx_tx_queue {
unsigned int tso_packets;
unsigned int pushes;
unsigned int pio_packets;
+ bool xmit_more_available;
/* Statistics to supplement MAC stats */
unsigned long tx_packets;
@@ -431,21 +433,8 @@ struct efx_channel {
struct net_device *napi_dev;
struct napi_struct napi_str;
#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned int state;
- spinlock_t state_lock;
-#define EFX_CHANNEL_STATE_IDLE 0
-#define EFX_CHANNEL_STATE_NAPI (1 << 0) /* NAPI owns this channel */
-#define EFX_CHANNEL_STATE_POLL (1 << 1) /* poll owns this channel */
-#define EFX_CHANNEL_STATE_DISABLED (1 << 2) /* channel is disabled */
-#define EFX_CHANNEL_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this channel */
-#define EFX_CHANNEL_STATE_POLL_YIELD (1 << 4) /* poll yielded this channel */
-#define EFX_CHANNEL_OWNED \
- (EFX_CHANNEL_STATE_NAPI | EFX_CHANNEL_STATE_POLL)
-#define EFX_CHANNEL_LOCKED \
- (EFX_CHANNEL_OWNED | EFX_CHANNEL_STATE_DISABLED)
-#define EFX_CHANNEL_USER_PEND \
- (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_POLL_YIELD)
-#endif /* CONFIG_NET_RX_BUSY_POLL */
+ unsigned long busy_poll_state;
+#endif
struct efx_special_buffer eventq;
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
@@ -480,98 +469,94 @@ struct efx_channel {
};
#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void efx_channel_init_lock(struct efx_channel *channel)
+enum efx_channel_busy_poll_state {
+ EFX_CHANNEL_STATE_IDLE = 0,
+ EFX_CHANNEL_STATE_NAPI = BIT(0),
+ EFX_CHANNEL_STATE_NAPI_REQ_BIT = 1,
+ EFX_CHANNEL_STATE_NAPI_REQ = BIT(1),
+ EFX_CHANNEL_STATE_POLL_BIT = 2,
+ EFX_CHANNEL_STATE_POLL = BIT(2),
+ EFX_CHANNEL_STATE_DISABLE_BIT = 3,
+};
+
+static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
{
- spin_lock_init(&channel->state_lock);
+ WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
}
/* Called from the device poll routine to get ownership of a channel. */
static inline bool efx_channel_lock_napi(struct efx_channel *channel)
{
- bool rc = true;
-
- spin_lock_bh(&channel->state_lock);
- if (channel->state & EFX_CHANNEL_LOCKED) {
- WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
- channel->state |= EFX_CHANNEL_STATE_NAPI_YIELD;
- rc = false;
- } else {
- /* we don't care if someone yielded */
- channel->state = EFX_CHANNEL_STATE_NAPI;
+ unsigned long prev, old = READ_ONCE(channel->busy_poll_state);
+
+ while (1) {
+ switch (old) {
+ case EFX_CHANNEL_STATE_POLL:
+ /* Ensure efx_channel_try_lock_poll() wont starve us */
+ set_bit(EFX_CHANNEL_STATE_NAPI_REQ_BIT,
+ &channel->busy_poll_state);
+ /* fallthrough */
+ case EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_REQ:
+ return false;
+ default:
+ break;
+ }
+ prev = cmpxchg(&channel->busy_poll_state, old,
+ EFX_CHANNEL_STATE_NAPI);
+ if (unlikely(prev != old)) {
+ /* This is likely to mean we've just entered polling
+ * state. Go back round to set the REQ bit.
+ */
+ old = prev;
+ continue;
+ }
+ return true;
}
- spin_unlock_bh(&channel->state_lock);
- return rc;
}
static inline void efx_channel_unlock_napi(struct efx_channel *channel)
{
- spin_lock_bh(&channel->state_lock);
- WARN_ON(channel->state &
- (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_YIELD));
-
- channel->state &= EFX_CHANNEL_STATE_DISABLED;
- spin_unlock_bh(&channel->state_lock);
+ /* Make sure write has completed from efx_channel_lock_napi() */
+ smp_wmb();
+ WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
}
/* Called from efx_busy_poll(). */
-static inline bool efx_channel_lock_poll(struct efx_channel *channel)
+static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
{
- bool rc = true;
-
- spin_lock_bh(&channel->state_lock);
- if ((channel->state & EFX_CHANNEL_LOCKED)) {
- channel->state |= EFX_CHANNEL_STATE_POLL_YIELD;
- rc = false;
- } else {
- /* preserve yield marks */
- channel->state |= EFX_CHANNEL_STATE_POLL;
- }
- spin_unlock_bh(&channel->state_lock);
- return rc;
+ return cmpxchg(&channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE,
+ EFX_CHANNEL_STATE_POLL) == EFX_CHANNEL_STATE_IDLE;
}
-/* Returns true if NAPI tried to get the channel while it was locked. */
static inline void efx_channel_unlock_poll(struct efx_channel *channel)
{
- spin_lock_bh(&channel->state_lock);
- WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
-
- /* will reset state to idle, unless channel is disabled */
- channel->state &= EFX_CHANNEL_STATE_DISABLED;
- spin_unlock_bh(&channel->state_lock);
+ clear_bit_unlock(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
}
-/* True if a socket is polling, even if it did not get the lock. */
static inline bool efx_channel_busy_polling(struct efx_channel *channel)
{
- WARN_ON(!(channel->state & EFX_CHANNEL_OWNED));
- return channel->state & EFX_CHANNEL_USER_PEND;
+ return test_bit(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
}
static inline void efx_channel_enable(struct efx_channel *channel)
{
- spin_lock_bh(&channel->state_lock);
- channel->state = EFX_CHANNEL_STATE_IDLE;
- spin_unlock_bh(&channel->state_lock);
+ clear_bit_unlock(EFX_CHANNEL_STATE_DISABLE_BIT,
+ &channel->busy_poll_state);
}
-/* False if the channel is currently owned. */
+/* Stop further polling or napi access.
+ * Returns false if the channel is currently busy polling.
+ */
static inline bool efx_channel_disable(struct efx_channel *channel)
{
- bool rc = true;
-
- spin_lock_bh(&channel->state_lock);
- if (channel->state & EFX_CHANNEL_OWNED)
- rc = false;
- channel->state |= EFX_CHANNEL_STATE_DISABLED;
- spin_unlock_bh(&channel->state_lock);
-
- return rc;
+ set_bit(EFX_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state);
+ /* Implicit barrier in efx_channel_busy_polling() */
+ return !efx_channel_busy_polling(channel);
}
#else /* CONFIG_NET_RX_BUSY_POLL */
-static inline void efx_channel_init_lock(struct efx_channel *channel)
+static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
{
}
@@ -584,7 +569,7 @@ static inline void efx_channel_unlock_napi(struct efx_channel *channel)
{
}
-static inline bool efx_channel_lock_poll(struct efx_channel *channel)
+static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
{
return false;
}
@@ -925,6 +910,7 @@ struct vfdi_status;
* @stats_lock: Statistics update lock. Must be held when calling
* efx_nic_type::{update,start,stop}_stats.
* @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
+ * @mc_promisc: Whether in multicast promiscuous mode when last changed
*
* This is stored in the private area of the &struct net_device.
*/
@@ -971,6 +957,7 @@ struct efx_nic {
unsigned next_buffer_table;
unsigned int max_channels;
+ unsigned int max_tx_channels;
unsigned n_channels;
unsigned n_rx_channels;
unsigned rss_spread;
@@ -1072,6 +1059,7 @@ struct efx_nic {
int last_irq_cpu;
spinlock_t stats_lock;
atomic_t n_rx_noskb_drops;
+ bool mc_promisc;
};
static inline int efx_dev_registered(struct efx_nic *efx)
@@ -1274,6 +1262,7 @@ struct efx_nic_type {
void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
size_t pdu_offset, size_t pdu_len);
int (*mcdi_poll_reboot)(struct efx_nic *efx);
+ void (*mcdi_reboot_detected)(struct efx_nic *efx);
void (*irq_enable_master)(struct efx_nic *efx);
void (*irq_test_generate)(struct efx_nic *efx);
void (*irq_disable_non_ev)(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 31ff9084d9a4..0b536e27d3b2 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -506,6 +506,7 @@ enum {
* @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
+ * @workaround_26807: Flag: firmware supports workaround for bug 26807
* @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
* after MC reboot
* @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
@@ -535,6 +536,7 @@ struct efx_ef10_nic_data {
bool rx_rss_context_exclusive;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
+ bool workaround_26807;
bool must_check_datapath_caps;
u32 datapath_caps;
unsigned int rx_dpcpu_fw_id;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index ad62615a93dc..c771e0af4e06 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -401,8 +401,8 @@ size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
/* For Siena platforms NIC time is s and ns */
static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor)
{
- struct timespec ts = ns_to_timespec(ns);
- *nic_major = ts.tv_sec;
+ struct timespec64 ts = ns_to_timespec64(ns);
+ *nic_major = (u32)ts.tv_sec;
*nic_minor = ts.tv_nsec;
}
@@ -431,8 +431,8 @@ static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor,
*/
static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor)
{
- struct timespec ts = ns_to_timespec(ns);
- u32 maj = ts.tv_sec;
+ struct timespec64 ts = ns_to_timespec64(ns);
+ u32 maj = (u32)ts.tv_sec;
u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT +
(1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT);
@@ -646,28 +646,28 @@ static void efx_ptp_send_times(struct efx_nic *efx,
struct pps_event_time *last_time)
{
struct pps_event_time now;
- struct timespec limit;
+ struct timespec64 limit;
struct efx_ptp_data *ptp = efx->ptp_data;
- struct timespec start;
+ struct timespec64 start;
int *mc_running = ptp->start.addr;
pps_get_ts(&now);
start = now.ts_real;
limit = now.ts_real;
- timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS);
+ timespec64_add_ns(&limit, SYNCHRONISE_PERIOD_NS);
/* Write host time for specified period or until MC is done */
- while ((timespec_compare(&now.ts_real, &limit) < 0) &&
+ while ((timespec64_compare(&now.ts_real, &limit) < 0) &&
ACCESS_ONCE(*mc_running)) {
- struct timespec update_time;
+ struct timespec64 update_time;
unsigned int host_time;
/* Don't update continuously to avoid saturating the PCIe bus */
update_time = now.ts_real;
- timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS);
+ timespec64_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS);
do {
pps_get_ts(&now);
- } while ((timespec_compare(&now.ts_real, &update_time) < 0) &&
+ } while ((timespec64_compare(&now.ts_real, &update_time) < 0) &&
ACCESS_ONCE(*mc_running));
/* Synchronise NIC with single word of time only */
@@ -723,7 +723,7 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
struct efx_ptp_data *ptp = efx->ptp_data;
u32 last_sec;
u32 start_sec;
- struct timespec delta;
+ struct timespec64 delta;
ktime_t mc_time;
if (number_readings == 0)
@@ -737,14 +737,14 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
*/
for (i = 0; i < number_readings; i++) {
s32 window, corrected;
- struct timespec wait;
+ struct timespec64 wait;
efx_ptp_read_timeset(
MCDI_ARRAY_STRUCT_PTR(synch_buf,
PTP_OUT_SYNCHRONIZE_TIMESET, i),
&ptp->timeset[i]);
- wait = ktime_to_timespec(
+ wait = ktime_to_timespec64(
ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0));
window = ptp->timeset[i].window;
corrected = window - wait.tv_nsec;
@@ -803,7 +803,7 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
ptp->timeset[last_good].minor, 0);
/* Calculate delay from NIC top of second to last_time */
- delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec;
+ delta.tv_nsec += ktime_to_timespec64(mc_time).tv_nsec;
/* Set PPS timestamp to match NIC top of second */
ptp->host_time_pps = *last_time;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index b605dfd5c7bc..9d78830da609 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -114,7 +114,10 @@ static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
if (efx->type->test_nvram) {
rc = efx->type->test_nvram(efx);
- tests->nvram = rc ? -1 : 1;
+ if (rc == -EPERM)
+ rc = 0;
+ else
+ tests->nvram = rc ? -1 : 1;
}
return rc;
@@ -253,6 +256,12 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
mutex_lock(&efx->mac_lock);
rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
mutex_unlock(&efx->mac_lock);
+ if (rc == -EPERM)
+ rc = 0;
+ else
+ netif_info(efx, drv, efx->net_dev,
+ "%s phy selftest\n", rc ? "Failed" : "Passed");
+
return rc;
}
@@ -661,6 +670,9 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
wmb();
kfree(state);
+ if (rc == -EPERM)
+ rc = 0;
+
return rc;
}
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index b323b9167526..2219b5424d2b 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -262,6 +262,7 @@ static int siena_probe_nic(struct efx_nic *efx)
}
efx->max_channels = EFX_MAX_CHANNELS;
+ efx->max_tx_channels = EFX_MAX_CHANNELS;
efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
@@ -1042,9 +1043,5 @@ const struct efx_nic_type siena_a0_nic_type = {
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
.hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
- 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
- 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ),
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
};
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 1833a0146571..67f6afaa022f 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -431,8 +431,20 @@ finish_packet:
efx_tx_maybe_stop_queue(tx_queue);
/* Pass off to hardware */
- if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
+ if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+ struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
+
+ /* There could be packets left on the partner queue if those
+ * SKBs had skb->xmit_more set. If we do not push those they
+ * could be left for a long time and cause a netdev watchdog.
+ */
+ if (txq2->xmit_more_available)
+ efx_nic_push_buffers(txq2);
+
efx_nic_push_buffers(tx_queue);
+ } else {
+ tx_queue->xmit_more_available = skb->xmit_more;
+ }
tx_queue->tx_packets++;
@@ -722,6 +734,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->read_count = 0;
tx_queue->old_read_count = 0;
tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
+ tx_queue->xmit_more_available = false;
/* Set up TX descriptor ring */
efx_nic_init_tx(tx_queue);
@@ -747,6 +760,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
++tx_queue->read_count;
}
+ tx_queue->xmit_more_available = false;
netdev_tx_reset_queue(tx_queue->core_txq);
}
@@ -1302,8 +1316,20 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
efx_tx_maybe_stop_queue(tx_queue);
/* Pass off to hardware */
- if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
+ if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+ struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
+
+ /* There could be packets left on the partner queue if those
+ * SKBs had skb->xmit_more set. If we do not push those they
+ * could be left for a long time and cause a netdev watchdog.
+ */
+ if (txq2->xmit_more_available)
+ efx_nic_push_buffers(txq2);
+
efx_nic_push_buffers(tx_queue);
+ } else {
+ tx_queue->xmit_more_available = skb->xmit_more;
+ }
tx_queue->tso_bursts++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 67d9fdeedd86..664f596971b5 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -1031,36 +1031,8 @@ err_out:
static void print_packet( byte * buf, int length )
{
#if 0
- int i;
- int remainder;
- int lines;
-
- pr_dbg("Packet of length %d\n", length);
- lines = length / 16;
- remainder = length % 16;
-
- for ( i = 0; i < lines ; i ++ ) {
- int cur;
-
- printk(KERN_DEBUG);
- for ( cur = 0; cur < 8; cur ++ ) {
- byte a, b;
-
- a = *(buf ++ );
- b = *(buf ++ );
- pr_cont("%02x%02x ", a, b);
- }
- pr_cont("\n");
- }
- printk(KERN_DEBUG);
- for ( i = 0; i < remainder/2 ; i++ ) {
- byte a, b;
-
- a = *(buf ++ );
- b = *(buf ++ );
- pr_cont("%02x%02x ", a, b);
- }
- pr_cont("\n");
+ print_hex_dump_debug(DRV_NAME, DUMP_PREFIX_OFFSET, 16, 1,
+ buf, length, true);
#endif
}
#endif
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 630f0b7800e4..0e2fc1a844ab 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2018,10 +2018,18 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
lp->cfg.flags |= SMC91X_USE_DMA;
# endif
if (lp->cfg.flags & SMC91X_USE_DMA) {
- int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW,
- smc_pxa_dma_irq, NULL);
- if (dma >= 0)
- dev->dma = dma;
+ dma_cap_mask_t mask;
+ struct pxad_param param;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ param.prio = PXAD_PRIO_LOWEST;
+ param.drcmr = -1UL;
+
+ lp->dma_chan =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param, &dev->dev,
+ "data");
}
#endif
@@ -2032,8 +2040,8 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
version_string, revision_register & 0x0f,
lp->base, dev->irq);
- if (dev->dma != (unsigned char)-1)
- pr_cont(" DMA %d", dev->dma);
+ if (lp->dma_chan)
+ pr_cont(" DMA %p", lp->dma_chan);
pr_cont("%s%s\n",
lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "",
@@ -2058,8 +2066,8 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
err_out:
#ifdef CONFIG_ARCH_PXA
- if (retval && dev->dma != (unsigned char)-1)
- pxa_free_dma(dev->dma);
+ if (retval && lp->dma_chan)
+ dma_release_channel(lp->dma_chan);
#endif
return retval;
}
@@ -2370,6 +2378,7 @@ static int smc_drv_probe(struct platform_device *pdev)
struct smc_local *lp = netdev_priv(ndev);
lp->device = &pdev->dev;
lp->physaddr = res->start;
+
}
#endif
@@ -2406,8 +2415,8 @@ static int smc_drv_remove(struct platform_device *pdev)
free_irq(ndev->irq, ndev);
#ifdef CONFIG_ARCH_PXA
- if (ndev->dma != (unsigned char)-1)
- pxa_free_dma(ndev->dma);
+ if (lp->dma_chan)
+ dma_release_channel(lp->dma_chan);
#endif
iounmap(lp->base);
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 3a18501d1068..a3c129e1e40a 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -33,6 +33,7 @@
#ifndef _SMC91X_H_
#define _SMC91X_H_
+#include <linux/dmaengine.h>
#include <linux/smc91x.h>
/*
@@ -244,6 +245,7 @@ struct smc_local {
u_long physaddr;
struct device *device;
#endif
+ struct dma_chan *dma_chan;
void __iomem *base;
void __iomem *datacs;
@@ -265,21 +267,47 @@ struct smc_local {
* as RX which can overrun memory and lose packets.
*/
#include <linux/dma-mapping.h>
-#include <mach/dma.h>
+#include <linux/dma/pxa-dma.h>
#ifdef SMC_insl
#undef SMC_insl
#define SMC_insl(a, r, p, l) \
smc_pxa_dma_insl(a, lp, r, dev->dma, p, l)
static inline void
+smc_pxa_dma_inpump(struct smc_local *lp, u_char *buf, int len)
+{
+ dma_addr_t dmabuf;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+ enum dma_status status;
+ struct dma_tx_state state;
+
+ dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE);
+ tx = dmaengine_prep_slave_single(lp->dma_chan, dmabuf, len,
+ DMA_DEV_TO_MEM, 0);
+ if (tx) {
+ cookie = dmaengine_submit(tx);
+ dma_async_issue_pending(lp->dma_chan);
+ do {
+ status = dmaengine_tx_status(lp->dma_chan, cookie,
+ &state);
+ cpu_relax();
+ } while (status != DMA_COMPLETE && status != DMA_ERROR &&
+ state.residue);
+ dmaengine_terminate_all(lp->dma_chan);
+ }
+ dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE);
+}
+
+static inline void
smc_pxa_dma_insl(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
u_char *buf, int len)
{
- u_long physaddr = lp->physaddr;
- dma_addr_t dmabuf;
+ struct dma_slave_config config;
+ int ret;
/* fallback if no DMA available */
- if (dma == (unsigned char)-1) {
+ if (!lp->dma_chan) {
readsl(ioaddr + reg, buf, len);
return;
}
@@ -291,18 +319,22 @@ smc_pxa_dma_insl(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
len--;
}
+ memset(&config, 0, sizeof(config));
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.src_addr = lp->physaddr + reg;
+ config.dst_addr = lp->physaddr + reg;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
+ ret = dmaengine_slave_config(lp->dma_chan, &config);
+ if (ret) {
+ dev_err(lp->device, "dma channel configuration failed: %d\n",
+ ret);
+ return;
+ }
+
len *= 4;
- dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE);
- DCSR(dma) = DCSR_NODESC;
- DTADR(dma) = dmabuf;
- DSADR(dma) = physaddr + reg;
- DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
- DCMD_WIDTH4 | (DCMD_LENGTH & len));
- DCSR(dma) = DCSR_NODESC | DCSR_RUN;
- while (!(DCSR(dma) & DCSR_STOPSTATE))
- cpu_relax();
- DCSR(dma) = 0;
- dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE);
+ smc_pxa_dma_inpump(lp, buf, len);
}
#endif
@@ -314,11 +346,11 @@ static inline void
smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
u_char *buf, int len)
{
- u_long physaddr = lp->physaddr;
- dma_addr_t dmabuf;
+ struct dma_slave_config config;
+ int ret;
/* fallback if no DMA available */
- if (dma == (unsigned char)-1) {
+ if (!lp->dma_chan) {
readsw(ioaddr + reg, buf, len);
return;
}
@@ -330,26 +362,25 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
len--;
}
+ memset(&config, 0, sizeof(config));
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ config.src_addr = lp->physaddr + reg;
+ config.dst_addr = lp->physaddr + reg;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
+ ret = dmaengine_slave_config(lp->dma_chan, &config);
+ if (ret) {
+ dev_err(lp->device, "dma channel configuration failed: %d\n",
+ ret);
+ return;
+ }
+
len *= 2;
- dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE);
- DCSR(dma) = DCSR_NODESC;
- DTADR(dma) = dmabuf;
- DSADR(dma) = physaddr + reg;
- DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
- DCMD_WIDTH2 | (DCMD_LENGTH & len));
- DCSR(dma) = DCSR_NODESC | DCSR_RUN;
- while (!(DCSR(dma) & DCSR_STOPSTATE))
- cpu_relax();
- DCSR(dma) = 0;
- dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE);
+ smc_pxa_dma_inpump(lp, buf, len);
}
#endif
-static void
-smc_pxa_dma_irq(int dma, void *dummy)
-{
- DCSR(dma) = 0;
-}
#endif /* CONFIG_ARCH_PXA */
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 959aeeade0c9..c860c9007e49 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -59,7 +59,9 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_net.h>
+#include <linux/acpi.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include "smsc911x.h"
@@ -1050,6 +1052,7 @@ static int smsc911x_mii_probe(struct net_device *dev)
#ifdef USE_PHY_WORK_AROUND
if (smsc911x_phy_loopbacktest(dev) < 0) {
SMSC_WARN(pdata, hw, "Failed Loop Back Test");
+ phy_disconnect(phydev);
return -ENODEV;
}
SMSC_TRACE(pdata, hw, "Passed Loop Back Test");
@@ -2362,59 +2365,50 @@ static const struct smsc911x_ops shifted_smsc911x_ops = {
.tx_writefifo = smsc911x_tx_writefifo_shift,
};
-#ifdef CONFIG_OF
-static int smsc911x_probe_config_dt(struct smsc911x_platform_config *config,
- struct device_node *np)
+static int smsc911x_probe_config(struct smsc911x_platform_config *config,
+ struct device *dev)
{
- const char *mac;
+ int phy_interface;
u32 width = 0;
+ int err;
- if (!np)
- return -ENODEV;
-
- config->phy_interface = of_get_phy_mode(np);
+ phy_interface = device_get_phy_mode(dev);
+ if (phy_interface < 0)
+ phy_interface = PHY_INTERFACE_MODE_NA;
+ config->phy_interface = phy_interface;
- mac = of_get_mac_address(np);
- if (mac)
- memcpy(config->mac, mac, ETH_ALEN);
+ device_get_mac_address(dev, config->mac, ETH_ALEN);
- of_property_read_u32(np, "reg-shift", &config->shift);
-
- of_property_read_u32(np, "reg-io-width", &width);
- if (width == 4)
+ err = device_property_read_u32(dev, "reg-io-width", &width);
+ if (err == -ENXIO)
+ return err;
+ if (!err && width == 4)
config->flags |= SMSC911X_USE_32BIT;
else
config->flags |= SMSC911X_USE_16BIT;
- if (of_get_property(np, "smsc,irq-active-high", NULL))
+ device_property_read_u32(dev, "reg-shift", &config->shift);
+
+ if (device_property_present(dev, "smsc,irq-active-high"))
config->irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH;
- if (of_get_property(np, "smsc,irq-push-pull", NULL))
+ if (device_property_present(dev, "smsc,irq-push-pull"))
config->irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL;
- if (of_get_property(np, "smsc,force-internal-phy", NULL))
+ if (device_property_present(dev, "smsc,force-internal-phy"))
config->flags |= SMSC911X_FORCE_INTERNAL_PHY;
- if (of_get_property(np, "smsc,force-external-phy", NULL))
+ if (device_property_present(dev, "smsc,force-external-phy"))
config->flags |= SMSC911X_FORCE_EXTERNAL_PHY;
- if (of_get_property(np, "smsc,save-mac-address", NULL))
+ if (device_property_present(dev, "smsc,save-mac-address"))
config->flags |= SMSC911X_SAVE_MAC_ADDRESS;
return 0;
}
-#else
-static inline int smsc911x_probe_config_dt(
- struct smsc911x_platform_config *config,
- struct device_node *np)
-{
- return -ENODEV;
-}
-#endif /* CONFIG_OF */
static int smsc911x_drv_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct net_device *dev;
struct smsc911x_data *pdata;
struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
@@ -2435,7 +2429,10 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
res_size = resource_size(res);
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
+ if (irq == -EPROBE_DEFER) {
+ retval = -EPROBE_DEFER;
+ goto out_0;
+ } else if (irq <= 0) {
pr_warn("Could not allocate irq resource\n");
retval = -ENODEV;
goto out_0;
@@ -2478,7 +2475,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
goto out_disable_resources;
}
- retval = smsc911x_probe_config_dt(&pdata->config, np);
+ retval = smsc911x_probe_config(&pdata->config, &pdev->dev);
if (retval && config) {
/* copy config parameters across to pdata */
memcpy(&pdata->config, config, sizeof(pdata->config));
@@ -2654,6 +2651,12 @@ static const struct of_device_id smsc911x_dt_ids[] = {
MODULE_DEVICE_TABLE(of, smsc911x_dt_ids);
#endif
+static const struct acpi_device_id smsc911x_acpi_match[] = {
+ { "ARMH9118", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, smsc911x_acpi_match);
+
static struct platform_driver smsc911x_driver = {
.probe = smsc911x_drv_probe,
.remove = smsc911x_drv_remove,
@@ -2661,6 +2664,7 @@ static struct platform_driver smsc911x_driver = {
.name = SMSC_CHIPNAME,
.pm = SMSC911X_PM_OPS,
.of_match_table = of_match_ptr(smsc911x_dt_ids),
+ .acpi_match_table = ACPI_PTR(smsc911x_acpi_match),
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index e817a1a44379..b1e5f24708c9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -16,6 +16,46 @@
#include "stmmac.h"
#include "stmmac_platform.h"
+static int dwmac_generic_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ if (pdev->dev.of_node) {
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat)) {
+ dev_err(&pdev->dev, "dt configuration failed\n");
+ return PTR_ERR(plat_dat);
+ }
+ } else {
+ plat_dat = dev_get_platdata(&pdev->dev);
+ if (!plat_dat) {
+ dev_err(&pdev->dev, "no platform data provided\n");
+ return -EINVAL;
+ }
+
+ /* Set default value for multicast hash bins */
+ plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat_dat->unicast_filter_entries = 1;
+ }
+
+ /* Custom initialisation (if needed) */
+ if (plat_dat->init) {
+ ret = plat_dat->init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+ }
+
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
+
static const struct of_device_id dwmac_generic_match[] = {
{ .compatible = "st,spear600-gmac"},
{ .compatible = "snps,dwmac-3.610"},
@@ -27,7 +67,7 @@ static const struct of_device_id dwmac_generic_match[] = {
MODULE_DEVICE_TABLE(of, dwmac_generic_match);
static struct platform_driver dwmac_generic_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = dwmac_generic_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = STMMAC_RESOURCE_NAME,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 7e3129e7f143..9d89bdbf029f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -42,7 +42,7 @@
#define NSS_COMMON_CLK_DIV_MASK 0x7f
#define NSS_COMMON_CLK_SRC_CTRL 0x14
-#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x)
+#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (x)
/* Mode is coded on 1 bit but is different depending on the MAC ID:
* MAC0: QSGMII=0 RGMII=1
* MAC1: QSGMII=0 SGMII=0 RGMII=1
@@ -248,23 +248,40 @@ static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
return NULL;
}
-static void *ipq806x_gmac_setup(struct platform_device *pdev)
+static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct ipq806x_gmac *gmac = priv;
+
+ ipq806x_gmac_set_speed(gmac, speed);
+}
+
+static int ipq806x_gmac_probe(struct platform_device *pdev)
{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
struct device *dev = &pdev->dev;
struct ipq806x_gmac *gmac;
int val;
void *err;
+ val = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (val)
+ return val;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
if (!gmac)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
gmac->pdev = pdev;
err = ipq806x_gmac_of_parse(gmac);
- if (err) {
+ if (IS_ERR(err)) {
dev_err(dev, "device tree parsing error\n");
- return err;
+ return PTR_ERR(err);
}
regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
@@ -285,13 +302,13 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
phy_modes(gmac->phy_mode));
- return NULL;
+ return -EINVAL;
}
regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
/* Configure the clock src according to the mode */
regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
- val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
@@ -304,7 +321,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
phy_modes(gmac->phy_mode));
- return NULL;
+ return -EINVAL;
}
regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
@@ -327,30 +344,21 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
}
- return gmac;
-}
+ plat_dat->has_gmac = true;
+ plat_dat->bsp_priv = gmac;
+ plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
-static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
-{
- struct ipq806x_gmac *gmac = priv;
-
- ipq806x_gmac_set_speed(gmac, speed);
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
-static const struct stmmac_of_data ipq806x_gmac_data = {
- .has_gmac = 1,
- .setup = ipq806x_gmac_setup,
- .fix_mac_speed = ipq806x_gmac_fix_mac_speed,
-};
-
static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
- { .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data },
+ { .compatible = "qcom,ipq806x-gmac" },
{ }
};
MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
static struct platform_driver ipq806x_gmac_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = ipq806x_gmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "ipq806x-gmac-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index cb888d3ebbdc..78e9d1861896 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -25,66 +25,53 @@
# define LPC18XX_CREG_CREG6_ETHMODE_MII 0x0
# define LPC18XX_CREG_CREG6_ETHMODE_RMII 0x4
-struct lpc18xx_dwmac_priv_data {
+static int lpc18xx_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
struct regmap *reg;
- int interface;
-};
+ u8 ethmode;
+ int ret;
-static void *lpc18xx_dwmac_setup(struct platform_device *pdev)
-{
- struct lpc18xx_dwmac_priv_data *dwmac;
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
- dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac)
- return ERR_PTR(-ENOMEM);
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
- dwmac->interface = of_get_phy_mode(pdev->dev.of_node);
- if (dwmac->interface < 0)
- return ERR_PTR(dwmac->interface);
+ plat_dat->has_gmac = true;
- dwmac->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
- if (IS_ERR(dwmac->reg)) {
- dev_err(&pdev->dev, "Syscon lookup failed\n");
- return dwmac->reg;
+ reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+ if (IS_ERR(reg)) {
+ dev_err(&pdev->dev, "syscon lookup failed\n");
+ return PTR_ERR(reg);
}
- return dwmac;
-}
-
-static int lpc18xx_dwmac_init(struct platform_device *pdev, void *priv)
-{
- struct lpc18xx_dwmac_priv_data *dwmac = priv;
- u8 ethmode;
-
- if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
+ if (plat_dat->interface == PHY_INTERFACE_MODE_MII) {
ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
- } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+ } else if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) {
ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
} else {
dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
return -EINVAL;
}
- regmap_update_bits(dwmac->reg, LPC18XX_CREG_CREG6,
+ regmap_update_bits(reg, LPC18XX_CREG_CREG6,
LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
- return 0;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
-static const struct stmmac_of_data lpc18xx_dwmac_data = {
- .has_gmac = 1,
- .setup = lpc18xx_dwmac_setup,
- .init = lpc18xx_dwmac_init,
-};
-
static const struct of_device_id lpc18xx_dwmac_match[] = {
- { .compatible = "nxp,lpc1850-dwmac", .data = &lpc18xx_dwmac_data },
+ { .compatible = "nxp,lpc1850-dwmac" },
{ }
};
MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match);
static struct platform_driver lpc18xx_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = lpc18xx_dwmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "lpc18xx-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 61a324a87d09..c1bac1912b37 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -47,36 +47,45 @@ static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed)
writel(val, dwmac->reg);
}
-static void *meson6_dwmac_setup(struct platform_device *pdev)
+static int meson6_dwmac_probe(struct platform_device *pdev)
{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
struct meson_dwmac *dwmac;
struct resource *res;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
if (!dwmac)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
dwmac->reg = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dwmac->reg))
- return ERR_CAST(dwmac->reg);
+ return PTR_ERR(dwmac->reg);
- return dwmac;
-}
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
-static const struct stmmac_of_data meson6_dwmac_data = {
- .setup = meson6_dwmac_setup,
- .fix_mac_speed = meson6_dwmac_fix_mac_speed,
-};
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
static const struct of_device_id meson6_dwmac_match[] = {
- { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
+ { .compatible = "amlogic,meson6-dwmac" },
{ }
};
MODULE_DEVICE_TABLE(of, meson6_dwmac_match);
static struct platform_driver meson6_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = meson6_dwmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "meson6-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 00a1e1e09d4f..11baa4b19779 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -46,7 +46,7 @@ struct rk_priv_data {
struct platform_device *pdev;
int phy_iface;
struct regulator *regulator;
- struct rk_gmac_ops *ops;
+ const struct rk_gmac_ops *ops;
bool clk_enabled;
bool clock_input;
@@ -177,7 +177,7 @@ static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
}
}
-struct rk_gmac_ops rk3288_ops = {
+static const struct rk_gmac_ops rk3288_ops = {
.set_to_rgmii = rk3288_set_to_rgmii,
.set_to_rmii = rk3288_set_to_rmii,
.set_rgmii_speed = rk3288_set_rgmii_speed,
@@ -289,7 +289,7 @@ static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
}
}
-struct rk_gmac_ops rk3368_ops = {
+static const struct rk_gmac_ops rk3368_ops = {
.set_to_rgmii = rk3368_set_to_rgmii,
.set_to_rmii = rk3368_set_to_rmii,
.set_rgmii_speed = rk3368_set_rgmii_speed,
@@ -448,7 +448,7 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
}
static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
- struct rk_gmac_ops *ops)
+ const struct rk_gmac_ops *ops)
{
struct rk_priv_data *bsp_priv;
struct device *dev = &pdev->dev;
@@ -529,16 +529,6 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
return bsp_priv;
}
-static void *rk3288_gmac_setup(struct platform_device *pdev)
-{
- return rk_gmac_setup(pdev, &rk3288_ops);
-}
-
-static void *rk3368_gmac_setup(struct platform_device *pdev)
-{
- return rk_gmac_setup(pdev, &rk3368_ops);
-}
-
static int rk_gmac_init(struct platform_device *pdev, void *priv)
{
struct rk_priv_data *bsp_priv = priv;
@@ -576,31 +566,52 @@ static void rk_fix_speed(void *priv, unsigned int speed)
dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
}
-static const struct stmmac_of_data rk3288_gmac_data = {
- .has_gmac = 1,
- .fix_mac_speed = rk_fix_speed,
- .setup = rk3288_gmac_setup,
- .init = rk_gmac_init,
- .exit = rk_gmac_exit,
-};
+static int rk_gmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ const struct rk_gmac_ops *data;
+ int ret;
-static const struct stmmac_of_data rk3368_gmac_data = {
- .has_gmac = 1,
- .fix_mac_speed = rk_fix_speed,
- .setup = rk3368_gmac_setup,
- .init = rk_gmac_init,
- .exit = rk_gmac_exit,
-};
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "no of match data provided\n");
+ return -EINVAL;
+ }
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ plat_dat->has_gmac = true;
+ plat_dat->init = rk_gmac_init;
+ plat_dat->exit = rk_gmac_exit;
+ plat_dat->fix_mac_speed = rk_fix_speed;
+
+ plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
+ if (IS_ERR(plat_dat->bsp_priv))
+ return PTR_ERR(plat_dat->bsp_priv);
+
+ ret = rk_gmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
static const struct of_device_id rk_gmac_dwmac_match[] = {
- { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
- { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_gmac_data},
+ { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
+ { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ }
};
MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
static struct platform_driver rk_gmac_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = rk_gmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "rk_gmac-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 8141c5b844ae..401383b252a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -175,31 +175,6 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
return 0;
}
-static void *socfpga_dwmac_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- int ret;
- struct socfpga_dwmac *dwmac;
-
- dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac)
- return ERR_PTR(-ENOMEM);
-
- ret = socfpga_dwmac_parse_data(dwmac, dev);
- if (ret) {
- dev_err(dev, "Unable to parse OF data\n");
- return ERR_PTR(ret);
- }
-
- ret = socfpga_dwmac_setup(dwmac);
- if (ret) {
- dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
- return ERR_PTR(ret);
- }
-
- return dwmac;
-}
-
static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
{
struct socfpga_dwmac *dwmac = priv;
@@ -257,21 +232,58 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
return ret;
}
-static const struct stmmac_of_data socfpga_gmac_data = {
- .setup = socfpga_dwmac_probe,
- .init = socfpga_dwmac_init,
- .exit = socfpga_dwmac_exit,
- .fix_mac_speed = socfpga_dwmac_fix_mac_speed,
-};
+static int socfpga_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ int ret;
+ struct socfpga_dwmac *dwmac;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return -ENOMEM;
+
+ ret = socfpga_dwmac_parse_data(dwmac, dev);
+ if (ret) {
+ dev_err(dev, "Unable to parse OF data\n");
+ return ret;
+ }
+
+ ret = socfpga_dwmac_setup(dwmac);
+ if (ret) {
+ dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
+ return ret;
+ }
+
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->init = socfpga_dwmac_init;
+ plat_dat->exit = socfpga_dwmac_exit;
+ plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
+
+ ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
static const struct of_device_id socfpga_dwmac_match[] = {
- { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
+ { .compatible = "altr,socfpga-stmmac" },
{ }
};
MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
static struct platform_driver socfpga_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = socfpga_dwmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "socfpga-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index a2e8111c5d14..7f6f4a4fcc70 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -21,6 +21,7 @@
#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_net.h>
#include "stmmac_platform.h"
@@ -128,6 +129,11 @@ struct sti_dwmac {
struct device *dev;
struct regmap *regmap;
u32 speed;
+ void (*fix_retime_src)(void *priv, unsigned int speed);
+};
+
+struct sti_dwmac_of_data {
+ void (*fix_retime_src)(void *priv, unsigned int speed);
};
static u32 phy_intf_sels[] = {
@@ -222,8 +228,9 @@ static void stid127_fix_retime_src(void *priv, u32 spd)
regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val);
}
-static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac)
+static int sti_dwmac_init(struct platform_device *pdev, void *priv)
{
+ struct sti_dwmac *dwmac = priv;
struct regmap *regmap = dwmac->regmap;
int iface = dwmac->interface;
struct device *dev = dwmac->dev;
@@ -241,28 +248,8 @@ static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac)
val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
regmap_update_bits(regmap, reg, ENMII_MASK, val);
-}
-
-static int stix4xx_init(struct platform_device *pdev, void *priv)
-{
- struct sti_dwmac *dwmac = priv;
- u32 spd = dwmac->speed;
-
- sti_dwmac_ctrl_init(dwmac);
-
- stih4xx_fix_retime_src(priv, spd);
-
- return 0;
-}
-static int stid127_init(struct platform_device *pdev, void *priv)
-{
- struct sti_dwmac *dwmac = priv;
- u32 spd = dwmac->speed;
-
- sti_dwmac_ctrl_init(dwmac);
-
- stid127_fix_retime_src(priv, spd);
+ dwmac->fix_retime_src(priv, dwmac->speed);
return 0;
}
@@ -334,36 +321,58 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
return 0;
}
-static void *sti_dwmac_setup(struct platform_device *pdev)
+static int sti_dwmac_probe(struct platform_device *pdev)
{
+ struct plat_stmmacenet_data *plat_dat;
+ const struct sti_dwmac_of_data *data;
+ struct stmmac_resources stmmac_res;
struct sti_dwmac *dwmac;
int ret;
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "No OF match data provided\n");
+ return -EINVAL;
+ }
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
if (!dwmac)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
ret = sti_dwmac_parse_data(dwmac, pdev);
if (ret) {
dev_err(&pdev->dev, "Unable to parse OF data\n");
- return ERR_PTR(ret);
+ return ret;
}
- return dwmac;
+ dwmac->fix_retime_src = data->fix_retime_src;
+
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->init = sti_dwmac_init;
+ plat_dat->exit = sti_dwmac_exit;
+ plat_dat->fix_mac_speed = data->fix_retime_src;
+
+ ret = sti_dwmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
-static const struct stmmac_of_data stih4xx_dwmac_data = {
- .fix_mac_speed = stih4xx_fix_retime_src,
- .setup = sti_dwmac_setup,
- .init = stix4xx_init,
- .exit = sti_dwmac_exit,
+static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
+ .fix_retime_src = stih4xx_fix_retime_src,
};
-static const struct stmmac_of_data stid127_dwmac_data = {
- .fix_mac_speed = stid127_fix_retime_src,
- .setup = sti_dwmac_setup,
- .init = stid127_init,
- .exit = sti_dwmac_exit,
+static const struct sti_dwmac_of_data stid127_dwmac_data = {
+ .fix_retime_src = stid127_fix_retime_src,
};
static const struct of_device_id sti_dwmac_match[] = {
@@ -376,7 +385,7 @@ static const struct of_device_id sti_dwmac_match[] = {
MODULE_DEVICE_TABLE(of, sti_dwmac_match);
static struct platform_driver sti_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = sti_dwmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "sti-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 15048ca39759..52b8ed9bd87c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -33,35 +33,6 @@ struct sunxi_priv_data {
struct regulator *regulator;
};
-static void *sun7i_gmac_setup(struct platform_device *pdev)
-{
- struct sunxi_priv_data *gmac;
- struct device *dev = &pdev->dev;
-
- gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
- if (!gmac)
- return ERR_PTR(-ENOMEM);
-
- gmac->interface = of_get_phy_mode(dev->of_node);
-
- gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
- if (IS_ERR(gmac->tx_clk)) {
- dev_err(dev, "could not get tx clock\n");
- return gmac->tx_clk;
- }
-
- /* Optional regulator for PHY */
- gmac->regulator = devm_regulator_get_optional(dev, "phy");
- if (IS_ERR(gmac->regulator)) {
- if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
- return ERR_PTR(-EPROBE_DEFER);
- dev_info(dev, "no regulator found\n");
- gmac->regulator = NULL;
- }
-
- return gmac;
-}
-
#define SUN7I_GMAC_GMII_RGMII_RATE 125000000
#define SUN7I_GMAC_MII_RATE 25000000
@@ -132,25 +103,67 @@ static void sun7i_fix_speed(void *priv, unsigned int speed)
}
}
-/* of_data specifying hardware features and callbacks.
- * hardware features were copied from Allwinner drivers. */
-static const struct stmmac_of_data sun7i_gmac_data = {
- .has_gmac = 1,
- .tx_coe = 1,
- .fix_mac_speed = sun7i_fix_speed,
- .setup = sun7i_gmac_setup,
- .init = sun7i_gmac_init,
- .exit = sun7i_gmac_exit,
-};
+static int sun7i_gmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct sunxi_priv_data *gmac;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return -ENOMEM;
+
+ gmac->interface = of_get_phy_mode(dev->of_node);
+
+ gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
+ if (IS_ERR(gmac->tx_clk)) {
+ dev_err(dev, "could not get tx clock\n");
+ return PTR_ERR(gmac->tx_clk);
+ }
+
+ /* Optional regulator for PHY */
+ gmac->regulator = devm_regulator_get_optional(dev, "phy");
+ if (IS_ERR(gmac->regulator)) {
+ if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no regulator found\n");
+ gmac->regulator = NULL;
+ }
+
+ /* platform data specifying hardware features and callbacks.
+ * hardware features were copied from Allwinner drivers. */
+ plat_dat->tx_coe = 1;
+ plat_dat->has_gmac = true;
+ plat_dat->bsp_priv = gmac;
+ plat_dat->init = sun7i_gmac_init;
+ plat_dat->exit = sun7i_gmac_exit;
+ plat_dat->fix_mac_speed = sun7i_fix_speed;
+
+ ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
static const struct of_device_id sun7i_dwmac_match[] = {
- { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
+ { .compatible = "allwinner,sun7i-a20-gmac" },
{ }
};
MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
static struct platform_driver sun7i_dwmac_driver = {
- .probe = stmmac_pltfr_probe,
+ .probe = sun7i_gmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "sun7i-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 771cda2a48b2..2e51b816a7e8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -721,10 +721,13 @@ static int stmmac_get_ts_info(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if ((priv->hwts_tx_en) && (priv->hwts_rx_en)) {
+ if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
- info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (priv->ptp_clock)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 864b476f7fd5..64d8aa4e0cad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -424,7 +424,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct hwtstamp_config config;
- struct timespec now;
+ struct timespec64 now;
u64 temp = 0;
u32 ptp_v2 = 0;
u32 tstamp_all = 0;
@@ -621,8 +621,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
priv->default_addend);
/* initialize system time */
- getnstimeofday(&now);
- priv->hw->ptp->init_systime(priv->ioaddr, now.tv_sec,
+ ktime_get_real_ts64(&now);
+
+ /* lower 32 bits of tv_sec are safe until y2106 */
+ priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec,
now.tv_nsec);
}
@@ -837,8 +839,11 @@ static int stmmac_init_phy(struct net_device *dev)
interface);
}
- if (IS_ERR(phydev)) {
+ if (IS_ERR_OR_NULL(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
+ if (!phydev)
+ return -ENODEV;
+
return PTR_ERR(phydev);
}
@@ -1942,7 +1947,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int txsize = priv->dma_tx_size;
- unsigned int entry;
+ int entry;
int i, csum_insertion = 0, is_jumbo = 0;
int nfrags = skb_shinfo(skb)->nr_frags;
struct dma_desc *desc, *first;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index b735fa22ac95..ebf6abc4853f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -161,11 +161,16 @@ int stmmac_mdio_reset(struct mii_bus *bus)
if (!gpio_request(reset_gpio, "mdio-reset")) {
gpio_direction_output(reset_gpio, active_low ? 1 : 0);
- udelay(data->delays[0]);
+ if (data->delays[0])
+ msleep(DIV_ROUND_UP(data->delays[0], 1000));
+
gpio_set_value(reset_gpio, active_low ? 0 : 1);
- udelay(data->delays[1]);
+ if (data->delays[1])
+ msleep(DIV_ROUND_UP(data->delays[1], 1000));
+
gpio_set_value(reset_gpio, active_low ? 1 : 0);
- udelay(data->delays[2]);
+ if (data->delays[2])
+ msleep(DIV_ROUND_UP(data->delays[2], 1000));
}
}
#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index bcdc8955c719..d02691ba3d7f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -104,32 +104,16 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
* this function is to read the driver parameters from device-tree and
* set some private fields that will be used by the main at runtime.
*/
-static int stmmac_probe_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat,
- const char **mac)
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
{
struct device_node *np = pdev->dev.of_node;
+ struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
- const struct of_device_id *device;
- struct device *dev = &pdev->dev;
-
- device = of_match_device(dev->driver->of_match_table, dev);
- if (device->data) {
- const struct stmmac_of_data *data = device->data;
- plat->has_gmac = data->has_gmac;
- plat->enh_desc = data->enh_desc;
- plat->tx_coe = data->tx_coe;
- plat->rx_coe = data->rx_coe;
- plat->bugged_jumbo = data->bugged_jumbo;
- plat->pmt = data->pmt;
- plat->riwt_off = data->riwt_off;
- plat->fix_mac_speed = data->fix_mac_speed;
- plat->bus_setup = data->bus_setup;
- plat->setup = data->setup;
- plat->free = data->free;
- plat->init = data->init;
- plat->exit = data->exit;
- }
+
+ plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return ERR_PTR(-ENOMEM);
*mac = of_get_mac_address(np);
plat->interface = of_get_phy_mode(np);
@@ -151,7 +135,7 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
/* If phy-handle is not specified, check if we have a fixed-phy */
if (!plat->phy_node && of_phy_is_fixed_link(np)) {
if ((of_phy_register_fixed_link(np) < 0))
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
plat->phy_node = of_node_get(np);
}
@@ -182,6 +166,12 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
*/
plat->maxmtu = JUMBO_LEN;
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
/*
* Currently only the properties needed on SPEAr600
* are provided. All other properties should be added
@@ -222,7 +212,7 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
GFP_KERNEL);
if (!dma_cfg) {
of_node_put(np);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
plat->dma_cfg = dma_cfg;
of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
@@ -240,44 +230,34 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
}
- return 0;
+ return plat;
}
#else
-static int stmmac_probe_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat,
- const char **mac)
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
{
- return -ENOSYS;
+ return ERR_PTR(-ENOSYS);
}
#endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
-/**
- * stmmac_pltfr_probe - platform driver probe.
- * @pdev: platform device pointer
- * Description: platform_device probe function. It is to allocate
- * the necessary platform resources, invoke custom helper (if required) and
- * invoke the main probe function.
- */
-int stmmac_pltfr_probe(struct platform_device *pdev)
+int stmmac_get_platform_resources(struct platform_device *pdev,
+ struct stmmac_resources *stmmac_res)
{
- struct stmmac_resources stmmac_res;
- int ret = 0;
struct resource *res;
- struct device *dev = &pdev->dev;
- struct plat_stmmacenet_data *plat_dat = NULL;
- memset(&stmmac_res, 0, sizeof(stmmac_res));
+ memset(stmmac_res, 0, sizeof(*stmmac_res));
/* Get IRQ information early to have an ability to ask for deferred
* probe if needed before we went too far with resource allocation.
*/
- stmmac_res.irq = platform_get_irq_byname(pdev, "macirq");
- if (stmmac_res.irq < 0) {
- if (stmmac_res.irq != -EPROBE_DEFER) {
- dev_err(dev,
+ stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
+ if (stmmac_res->irq < 0) {
+ if (stmmac_res->irq != -EPROBE_DEFER) {
+ dev_err(&pdev->dev,
"MAC IRQ configuration information not found\n");
}
- return stmmac_res.irq;
+ return stmmac_res->irq;
}
/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
@@ -287,64 +267,23 @@ int stmmac_pltfr_probe(struct platform_device *pdev)
* In case the wake up interrupt is not passed from the platform
* so the driver will continue to use the mac irq (ndev->irq)
*/
- stmmac_res.wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
- if (stmmac_res.wol_irq < 0) {
- if (stmmac_res.wol_irq == -EPROBE_DEFER)
+ stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+ if (stmmac_res->wol_irq < 0) {
+ if (stmmac_res->wol_irq == -EPROBE_DEFER)
return -EPROBE_DEFER;
- stmmac_res.wol_irq = stmmac_res.irq;
+ stmmac_res->wol_irq = stmmac_res->irq;
}
- stmmac_res.lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
- if (stmmac_res.lpi_irq == -EPROBE_DEFER)
+ stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+ if (stmmac_res->lpi_irq == -EPROBE_DEFER)
return -EPROBE_DEFER;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- stmmac_res.addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(stmmac_res.addr))
- return PTR_ERR(stmmac_res.addr);
-
- plat_dat = dev_get_platdata(&pdev->dev);
-
- if (!plat_dat)
- plat_dat = devm_kzalloc(&pdev->dev,
- sizeof(struct plat_stmmacenet_data),
- GFP_KERNEL);
- if (!plat_dat) {
- pr_err("%s: ERROR: no memory", __func__);
- return -ENOMEM;
- }
-
- /* Set default value for multicast hash bins */
- plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat_dat->unicast_filter_entries = 1;
-
- if (pdev->dev.of_node) {
- ret = stmmac_probe_config_dt(pdev, plat_dat, &stmmac_res.mac);
- if (ret) {
- pr_err("%s: main dt probe failed", __func__);
- return ret;
- }
- }
+ stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
- /* Custom setup (if needed) */
- if (plat_dat->setup) {
- plat_dat->bsp_priv = plat_dat->setup(pdev);
- if (IS_ERR(plat_dat->bsp_priv))
- return PTR_ERR(plat_dat->bsp_priv);
- }
-
- /* Custom initialisation (if needed)*/
- if (plat_dat->init) {
- ret = plat_dat->init(pdev, plat_dat->bsp_priv);
- if (unlikely(ret))
- return ret;
- }
-
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ return PTR_ERR_OR_ZERO(stmmac_res->addr);
}
-EXPORT_SYMBOL_GPL(stmmac_pltfr_probe);
+EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
/**
* stmmac_pltfr_remove
@@ -361,9 +300,6 @@ int stmmac_pltfr_remove(struct platform_device *pdev)
if (priv->plat->exit)
priv->plat->exit(pdev, priv->plat->bsp_priv);
- if (priv->plat->free)
- priv->plat->free(pdev, priv->plat->bsp_priv);
-
return ret;
}
EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 71da86d7bd00..ffeb8d9e2b2e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -19,7 +19,14 @@
#ifndef __STMMAC_PLATFORM_H__
#define __STMMAC_PLATFORM_H__
-int stmmac_pltfr_probe(struct platform_device *pdev);
+#include "stmmac.h"
+
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac);
+
+int stmmac_get_platform_resources(struct platform_device *pdev,
+ struct stmmac_resources *stmmac_res);
+
int stmmac_pltfr_remove(struct platform_device *pdev);
extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 6ce973187225..062bce9acde6 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4529,9 +4529,6 @@ static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
- info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
- cp->casreg_len : CAS_MAX_REGS;
- info->n_stats = CAS_NUM_STAT_KEYS;
}
static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 53fe200e0b79..cc106d892e29 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1756,7 +1756,8 @@ static const struct net_device_ops vnet_ops = {
#endif
};
-static struct vnet *vnet_new(const u64 *local_mac)
+static struct vnet *vnet_new(const u64 *local_mac,
+ struct vio_dev *vdev)
{
struct net_device *dev;
struct vnet *vp;
@@ -1790,6 +1791,8 @@ static struct vnet *vnet_new(const u64 *local_mac)
NETIF_F_HW_CSUM | NETIF_F_SG;
dev->features = dev->hw_features;
+ SET_NETDEV_DEV(dev, &vdev->dev);
+
err = register_netdev(dev);
if (err) {
pr_err("Cannot register net device, aborting\n");
@@ -1808,7 +1811,8 @@ err_out_free_dev:
return ERR_PTR(err);
}
-static struct vnet *vnet_find_or_create(const u64 *local_mac)
+static struct vnet *vnet_find_or_create(const u64 *local_mac,
+ struct vio_dev *vdev)
{
struct vnet *iter, *vp;
@@ -1821,7 +1825,7 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
}
}
if (!vp)
- vp = vnet_new(local_mac);
+ vp = vnet_new(local_mac, vdev);
mutex_unlock(&vnet_list_mutex);
return vp;
@@ -1848,7 +1852,8 @@ static void vnet_cleanup(void)
static const char *local_mac_prop = "local-mac-address";
static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
- u64 port_node)
+ u64 port_node,
+ struct vio_dev *vdev)
{
const u64 *local_mac = NULL;
u64 a;
@@ -1869,7 +1874,7 @@ static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
if (!local_mac)
return ERR_PTR(-ENODEV);
- return vnet_find_or_create(local_mac);
+ return vnet_find_or_create(local_mac, vdev);
}
static struct ldc_channel_config vnet_ldc_cfg = {
@@ -1923,7 +1928,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
hp = mdesc_grab();
- vp = vnet_find_parent(hp, vdev->mp);
+ vp = vnet_find_parent(hp, vdev->mp, vdev);
if (IS_ERR(vp)) {
pr_err("Cannot find port parent vnet\n");
err = PTR_ERR(vp);
diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig
new file mode 100644
index 000000000000..8276ee5a7d54
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Kconfig
@@ -0,0 +1,27 @@
+#
+# Synopsys network device configuration
+#
+
+config NET_VENDOR_SYNOPSYS
+ bool "Synopsys devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) device belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Synopsys devices. If you say Y, you will be asked
+ for your specific device in the following questions.
+
+if NET_VENDOR_SYNOPSYS
+
+config SYNOPSYS_DWC_ETH_QOS
+ tristate "Sypnopsys DWC Ethernet QOS v4.10a support"
+ select PHYLIB
+ select CRC32
+ select MII
+ depends on OF && HAS_DMA
+ ---help---
+ This driver supports the DWC Ethernet QoS from Synopsys
+
+endif # NET_VENDOR_SYNOPSYS
diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile
new file mode 100644
index 000000000000..7a375723fc18
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Synopsys network device drivers.
+#
+
+obj-$(CONFIG_SYNOPSYS_DWC_ETH_QOS) += dwc_eth_qos.o
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
new file mode 100644
index 000000000000..85b3326775b8
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -0,0 +1,3019 @@
+/* Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver
+ *
+ * This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC).
+ * This version introduced a lot of changes which breaks backwards
+ * compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers).
+ * Some fields differ between version 4.00a and 4.10a, mainly the interrupt
+ * bit fields. The driver could be made compatible with 4.00, if all relevant
+ * HW erratas are handled.
+ *
+ * The GMAC is highly configurable at synthesis time. This driver has been
+ * developed for a subset of the total available feature set. Currently
+ * it supports:
+ * - TSO
+ * - Checksum offload for RX and TX.
+ * - Energy efficient ethernet.
+ * - GMII phy interface.
+ * - The statistics module.
+ * - Single RX and TX queue.
+ *
+ * Copyright (C) 2015 Axis Communications AB.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ethtool.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+
+#include <linux/phy.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
+
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/timer.h>
+#include <linux/tcp.h>
+
+#define DRIVER_NAME "dwceqos"
+#define DRIVER_DESCRIPTION "Synopsys DWC Ethernet QoS driver"
+#define DRIVER_VERSION "0.9"
+
+#define DWCEQOS_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
+#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */
+
+#define DWCEQOS_LPI_TIMER_MIN 8
+#define DWCEQOS_LPI_TIMER_MAX ((1 << 20) - 1)
+
+#define DWCEQOS_RX_BUF_SIZE 2048
+
+#define DWCEQOS_RX_DCNT 256
+#define DWCEQOS_TX_DCNT 256
+
+#define DWCEQOS_HASH_TABLE_SIZE 64
+
+/* The size field in the DMA descriptor is 14 bits */
+#define BYTES_PER_DMA_DESC 16376
+
+/* Hardware registers */
+#define START_MAC_REG_OFFSET 0x0000
+#define MAX_MAC_REG_OFFSET 0x0bd0
+#define START_MTL_REG_OFFSET 0x0c00
+#define MAX_MTL_REG_OFFSET 0x0d7c
+#define START_DMA_REG_OFFSET 0x1000
+#define MAX_DMA_REG_OFFSET 0x117C
+
+#define REG_SPACE_SIZE 0x1800
+
+/* DMA */
+#define REG_DWCEQOS_DMA_MODE 0x1000
+#define REG_DWCEQOS_DMA_SYSBUS_MODE 0x1004
+#define REG_DWCEQOS_DMA_IS 0x1008
+#define REG_DWCEQOS_DMA_DEBUG_ST0 0x100c
+
+/* DMA channel registers */
+#define REG_DWCEQOS_DMA_CH0_CTRL 0x1100
+#define REG_DWCEQOS_DMA_CH0_TX_CTRL 0x1104
+#define REG_DWCEQOS_DMA_CH0_RX_CTRL 0x1108
+#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST 0x1114
+#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST 0x111c
+#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL 0x1120
+#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL 0x1128
+#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN 0x112c
+#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN 0x1130
+#define REG_DWCEQOS_DMA_CH0_IE 0x1134
+#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC 0x1144
+#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC 0x114c
+#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF 0x1154
+#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG 0x115c
+#define REG_DWCEQOS_DMA_CH0_STA 0x1160
+
+#define DWCEQOS_DMA_MODE_TXPR BIT(11)
+#define DWCEQOS_DMA_MODE_DA BIT(1)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI BIT(31)
+#define DWCEQOS_DMA_SYSBUS_MODE_FB BIT(0)
+#define DWCEQOS_DMA_SYSBUS_MODE_AAL BIT(12)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \
+ (((x) << 16) & 0x000F0000)
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT 3
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK GENMASK(19, 16)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \
+ (((x) << 24) & 0x0F000000)
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT 3
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK GENMASK(27, 24)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1)
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \
+ (((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK)
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT GENMASK(3, 1)
+
+#define DWCEQOS_DMA_CH_CTRL_PBLX8 BIT(16)
+#define DWCEQOS_DMA_CH_CTRL_DSL(x) ((x) << 18)
+
+#define DWCEQOS_DMA_CH_CTRL_PBL(x) ((x) << 16)
+#define DWCEQOS_DMA_CH_CTRL_START BIT(0)
+#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x) ((x) << 1)
+#define DWCEQOS_DMA_CH_TX_OSP BIT(4)
+#define DWCEQOS_DMA_CH_TX_TSE BIT(12)
+
+#define DWCEQOS_DMA_CH0_IE_NIE BIT(15)
+#define DWCEQOS_DMA_CH0_IE_AIE BIT(14)
+#define DWCEQOS_DMA_CH0_IE_RIE BIT(6)
+#define DWCEQOS_DMA_CH0_IE_TIE BIT(0)
+#define DWCEQOS_DMA_CH0_IE_FBEE BIT(12)
+#define DWCEQOS_DMA_CH0_IE_RBUE BIT(7)
+
+#define DWCEQOS_DMA_IS_DC0IS BIT(0)
+#define DWCEQOS_DMA_IS_MTLIS BIT(16)
+#define DWCEQOS_DMA_IS_MACIS BIT(17)
+
+#define DWCEQOS_DMA_CH0_IS_TI BIT(0)
+#define DWCEQOS_DMA_CH0_IS_RI BIT(6)
+#define DWCEQOS_DMA_CH0_IS_RBU BIT(7)
+#define DWCEQOS_DMA_CH0_IS_FBE BIT(12)
+#define DWCEQOS_DMA_CH0_IS_CDE BIT(13)
+#define DWCEQOS_DMA_CH0_IS_AIS BIT(14)
+
+#define DWCEQOS_DMA_CH0_IS_TEB GENMASK(18, 16)
+#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ BIT(16)
+#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR BIT(17)
+
+#define DWCEQOS_DMA_CH0_IS_REB GENMASK(21, 19)
+#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ BIT(19)
+#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR BIT(20)
+
+/* DMA descriptor bits for RX normal descriptor (read format) */
+#define DWCEQOS_DMA_RDES3_OWN BIT(31)
+#define DWCEQOS_DMA_RDES3_INTE BIT(30)
+#define DWCEQOS_DMA_RDES3_BUF2V BIT(25)
+#define DWCEQOS_DMA_RDES3_BUF1V BIT(24)
+
+/* DMA descriptor bits for RX normal descriptor (write back format) */
+#define DWCEQOS_DMA_RDES1_IPCE BIT(7)
+#define DWCEQOS_DMA_RDES3_ES BIT(15)
+#define DWCEQOS_DMA_RDES3_E_JT BIT(14)
+#define DWCEQOS_DMA_RDES3_PL(x) ((x) & 0x7fff)
+#define DWCEQOS_DMA_RDES1_PT 0x00000007
+#define DWCEQOS_DMA_RDES1_PT_UDP BIT(0)
+#define DWCEQOS_DMA_RDES1_PT_TCP BIT(1)
+#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003
+
+/* DMA descriptor bits for TX normal descriptor (read format) */
+#define DWCEQOS_DMA_TDES2_IOC BIT(31)
+#define DWCEQOS_DMA_TDES3_OWN BIT(31)
+#define DWCEQOS_DMA_TDES3_CTXT BIT(30)
+#define DWCEQOS_DMA_TDES3_FD BIT(29)
+#define DWCEQOS_DMA_TDES3_LD BIT(28)
+#define DWCEQOS_DMA_TDES3_CIPH BIT(16)
+#define DWCEQOS_DMA_TDES3_CIPP BIT(17)
+#define DWCEQOS_DMA_TDES3_CA 0x00030000
+#define DWCEQOS_DMA_TDES3_TSE BIT(18)
+#define DWCEQOS_DMA_DES3_THL(x) ((x) << 19)
+#define DWCEQOS_DMA_DES2_B2L(x) ((x) << 16)
+
+#define DWCEQOS_DMA_TDES3_TCMSSV BIT(26)
+
+/* DMA channel states */
+#define DMA_TX_CH_STOPPED 0
+#define DMA_TX_CH_SUSPENDED 6
+
+#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12)
+
+/* MTL */
+#define REG_DWCEQOS_MTL_OPER 0x0c00
+#define REG_DWCEQOS_MTL_DEBUG_ST 0x0c0c
+#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST 0x0d08
+#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST 0x0d38
+
+#define REG_DWCEQOS_MTL_IS 0x0c20
+#define REG_DWCEQOS_MTL_TXQ0_OPER 0x0d00
+#define REG_DWCEQOS_MTL_RXQ0_OPER 0x0d30
+#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT 0x0d34
+#define REG_DWCEQOS_MTL_RXQ0_CTRL 0x0d3c
+
+#define REG_DWCEQOS_MTL_Q0_ISCTRL 0x0d2c
+
+#define DWCEQOS_MTL_SCHALG_STRICT 0x00000060
+
+#define DWCEQOS_MTL_TXQ_TXQEN BIT(3)
+#define DWCEQOS_MTL_TXQ_TSF BIT(1)
+#define DWCEQOS_MTL_TXQ_FTQ BIT(0)
+#define DWCEQOS_MTL_TXQ_TTC512 0x00000070
+
+#define DWCEQOS_MTL_TXQ_SIZE(x) ((((x) - 256) & 0xff00) << 8)
+
+#define DWCEQOS_MTL_RXQ_SIZE(x) ((((x) - 256) & 0xff00) << 12)
+#define DWCEQOS_MTL_RXQ_EHFC BIT(7)
+#define DWCEQOS_MTL_RXQ_DIS_TCP_EF BIT(6)
+#define DWCEQOS_MTL_RXQ_FEP BIT(4)
+#define DWCEQOS_MTL_RXQ_FUP BIT(3)
+#define DWCEQOS_MTL_RXQ_RSF BIT(5)
+#define DWCEQOS_MTL_RXQ_RTC32 BIT(0)
+
+/* MAC */
+#define REG_DWCEQOS_MAC_CFG 0x0000
+#define REG_DWCEQOS_MAC_EXT_CFG 0x0004
+#define REG_DWCEQOS_MAC_PKT_FILT 0x0008
+#define REG_DWCEQOS_MAC_WD_TO 0x000c
+#define REG_DWCEQOS_HASTABLE_LO 0x0010
+#define REG_DWCEQOS_HASTABLE_HI 0x0014
+#define REG_DWCEQOS_MAC_IS 0x00b0
+#define REG_DWCEQOS_MAC_IE 0x00b4
+#define REG_DWCEQOS_MAC_STAT 0x00b8
+#define REG_DWCEQOS_MAC_MDIO_ADDR 0x0200
+#define REG_DWCEQOS_MAC_MDIO_DATA 0x0204
+#define REG_DWCEQOS_MAC_MAC_ADDR0_HI 0x0300
+#define REG_DWCEQOS_MAC_MAC_ADDR0_LO 0x0304
+#define REG_DWCEQOS_MAC_RXQ0_CTRL0 0x00a0
+#define REG_DWCEQOS_MAC_HW_FEATURE0 0x011c
+#define REG_DWCEQOS_MAC_HW_FEATURE1 0x0120
+#define REG_DWCEQOS_MAC_HW_FEATURE2 0x0124
+#define REG_DWCEQOS_MAC_HASHTABLE_LO 0x0010
+#define REG_DWCEQOS_MAC_HASHTABLE_HI 0x0014
+#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS 0x00d0
+#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL 0x00d4
+#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER 0x00d8
+#define REG_DWCEQOS_MAC_1US_TIC_COUNTER 0x00dc
+#define REG_DWCEQOS_MAC_RX_FLOW_CTRL 0x0090
+#define REG_DWCEQOS_MAC_Q0_TX_FLOW 0x0070
+
+#define DWCEQOS_MAC_CFG_ACS BIT(20)
+#define DWCEQOS_MAC_CFG_JD BIT(17)
+#define DWCEQOS_MAC_CFG_JE BIT(16)
+#define DWCEQOS_MAC_CFG_PS BIT(15)
+#define DWCEQOS_MAC_CFG_FES BIT(14)
+#define DWCEQOS_MAC_CFG_DM BIT(13)
+#define DWCEQOS_MAC_CFG_DO BIT(10)
+#define DWCEQOS_MAC_CFG_TE BIT(1)
+#define DWCEQOS_MAC_CFG_IPC BIT(27)
+#define DWCEQOS_MAC_CFG_RE BIT(0)
+
+#define DWCEQOS_ADDR_HIGH(reg) (0x00000300 + (reg * 8))
+#define DWCEQOS_ADDR_LOW(reg) (0x00000304 + (reg * 8))
+
+#define DWCEQOS_MAC_IS_LPI_INT BIT(5)
+#define DWCEQOS_MAC_IS_MMC_INT BIT(8)
+
+#define DWCEQOS_MAC_RXQ_EN BIT(1)
+#define DWCEQOS_MAC_MAC_ADDR_HI_EN BIT(31)
+#define DWCEQOS_MAC_PKT_FILT_RA BIT(31)
+#define DWCEQOS_MAC_PKT_FILT_HPF BIT(10)
+#define DWCEQOS_MAC_PKT_FILT_SAF BIT(9)
+#define DWCEQOS_MAC_PKT_FILT_SAIF BIT(8)
+#define DWCEQOS_MAC_PKT_FILT_DBF BIT(5)
+#define DWCEQOS_MAC_PKT_FILT_PM BIT(4)
+#define DWCEQOS_MAC_PKT_FILT_DAIF BIT(3)
+#define DWCEQOS_MAC_PKT_FILT_HMC BIT(2)
+#define DWCEQOS_MAC_PKT_FILT_HUC BIT(1)
+#define DWCEQOS_MAC_PKT_FILT_PR BIT(0)
+
+#define DWCEQOS_MAC_MDIO_ADDR_CR(x) (((x & 15)) << 8)
+#define DWCEQOS_MAC_MDIO_ADDR_CR_20 2
+#define DWCEQOS_MAC_MDIO_ADDR_CR_35 3
+#define DWCEQOS_MAC_MDIO_ADDR_CR_60 0
+#define DWCEQOS_MAC_MDIO_ADDR_CR_100 1
+#define DWCEQOS_MAC_MDIO_ADDR_CR_150 4
+#define DWCEQOS_MAC_MDIO_ADDR_CR_250 5
+#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ 0x0000000c
+#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE BIT(2)
+#define DWCEQOS_MAC_MDIO_ADDR_GB BIT(0)
+
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN BIT(0)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX BIT(1)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN BIT(2)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX BIT(3)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST BIT(8)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST BIT(9)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN BIT(16)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS BIT(17)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN BIT(18)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA BIT(19)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE BIT(20)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21)
+
+#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x) ((x) & GENMASK(11, 0))
+
+#define DWCEQOS_LPI_CTRL_ENABLE_EEE (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \
+ DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \
+ DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN)
+
+#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
+
+#define DWCEQOS_MAC_Q0_TX_FLOW_TFE BIT(1)
+#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time) ((time) << 16)
+#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4)
+
+/* Features */
+#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16)
+#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14)
+#define DWCEQOS_MAC_HW_FEATURE0_HDSEL BIT(2)
+#define DWCEQOS_MAC_HW_FEATURE0_EEESEL BIT(13)
+#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL BIT(1)
+#define DWCEQOS_MAC_HW_FEATURE0_MIISEL BIT(0)
+
+#define DWCEQOS_MAC_HW_FEATURE1_TSOEN BIT(18)
+#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6)
+#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x) (128 << ((x) & 0x1f))
+
+#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \
+ (1 + (((feature1) & 0x1fc0000) >> 18))
+
+#define DWCEQOS_MDIO_PHYADDR(x) (((x) & 0x1f) << 21)
+#define DWCEQOS_MDIO_PHYREG(x) (((x) & 0x1f) << 16)
+
+#define DWCEQOS_DMA_MODE_SWR BIT(0)
+
+#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048
+
+/* Mac Management Counters */
+#define REG_DWCEQOS_MMC_CTRL 0x0700
+#define REG_DWCEQOS_MMC_RXIRQ 0x0704
+#define REG_DWCEQOS_MMC_TXIRQ 0x0708
+#define REG_DWCEQOS_MMC_RXIRQMASK 0x070c
+#define REG_DWCEQOS_MMC_TXIRQMASK 0x0710
+
+#define DWCEQOS_MMC_CTRL_CNTRST BIT(0)
+#define DWCEQOS_MMC_CTRL_RSTONRD BIT(2)
+
+#define DWC_MMC_TXLPITRANSCNTR 0x07F0
+#define DWC_MMC_TXLPIUSCNTR 0x07EC
+#define DWC_MMC_TXOVERSIZE_G 0x0778
+#define DWC_MMC_TXVLANPACKETS_G 0x0774
+#define DWC_MMC_TXPAUSEPACKETS 0x0770
+#define DWC_MMC_TXEXCESSDEF 0x076C
+#define DWC_MMC_TXPACKETCOUNT_G 0x0768
+#define DWC_MMC_TXOCTETCOUNT_G 0x0764
+#define DWC_MMC_TXCARRIERERROR 0x0760
+#define DWC_MMC_TXEXCESSCOL 0x075C
+#define DWC_MMC_TXLATECOL 0x0758
+#define DWC_MMC_TXDEFERRED 0x0754
+#define DWC_MMC_TXMULTICOL_G 0x0750
+#define DWC_MMC_TXSINGLECOL_G 0x074C
+#define DWC_MMC_TXUNDERFLOWERROR 0x0748
+#define DWC_MMC_TXBROADCASTPACKETS_GB 0x0744
+#define DWC_MMC_TXMULTICASTPACKETS_GB 0x0740
+#define DWC_MMC_TXUNICASTPACKETS_GB 0x073C
+#define DWC_MMC_TX1024TOMAXOCTETS_GB 0x0738
+#define DWC_MMC_TX512TO1023OCTETS_GB 0x0734
+#define DWC_MMC_TX256TO511OCTETS_GB 0x0730
+#define DWC_MMC_TX128TO255OCTETS_GB 0x072C
+#define DWC_MMC_TX65TO127OCTETS_GB 0x0728
+#define DWC_MMC_TX64OCTETS_GB 0x0724
+#define DWC_MMC_TXMULTICASTPACKETS_G 0x0720
+#define DWC_MMC_TXBROADCASTPACKETS_G 0x071C
+#define DWC_MMC_TXPACKETCOUNT_GB 0x0718
+#define DWC_MMC_TXOCTETCOUNT_GB 0x0714
+
+#define DWC_MMC_RXLPITRANSCNTR 0x07F8
+#define DWC_MMC_RXLPIUSCNTR 0x07F4
+#define DWC_MMC_RXCTRLPACKETS_G 0x07E4
+#define DWC_MMC_RXRCVERROR 0x07E0
+#define DWC_MMC_RXWATCHDOG 0x07DC
+#define DWC_MMC_RXVLANPACKETS_GB 0x07D8
+#define DWC_MMC_RXFIFOOVERFLOW 0x07D4
+#define DWC_MMC_RXPAUSEPACKETS 0x07D0
+#define DWC_MMC_RXOUTOFRANGETYPE 0x07CC
+#define DWC_MMC_RXLENGTHERROR 0x07C8
+#define DWC_MMC_RXUNICASTPACKETS_G 0x07C4
+#define DWC_MMC_RX1024TOMAXOCTETS_GB 0x07C0
+#define DWC_MMC_RX512TO1023OCTETS_GB 0x07BC
+#define DWC_MMC_RX256TO511OCTETS_GB 0x07B8
+#define DWC_MMC_RX128TO255OCTETS_GB 0x07B4
+#define DWC_MMC_RX65TO127OCTETS_GB 0x07B0
+#define DWC_MMC_RX64OCTETS_GB 0x07AC
+#define DWC_MMC_RXOVERSIZE_G 0x07A8
+#define DWC_MMC_RXUNDERSIZE_G 0x07A4
+#define DWC_MMC_RXJABBERERROR 0x07A0
+#define DWC_MMC_RXRUNTERROR 0x079C
+#define DWC_MMC_RXALIGNMENTERROR 0x0798
+#define DWC_MMC_RXCRCERROR 0x0794
+#define DWC_MMC_RXMULTICASTPACKETS_G 0x0790
+#define DWC_MMC_RXBROADCASTPACKETS_G 0x078C
+#define DWC_MMC_RXOCTETCOUNT_G 0x0788
+#define DWC_MMC_RXOCTETCOUNT_GB 0x0784
+#define DWC_MMC_RXPACKETCOUNT_GB 0x0780
+
+static int debug = 3;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)");
+
+/* DMA ring descriptor. These are used as support descriptors for the HW DMA */
+struct ring_desc {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ size_t len;
+};
+
+/* DMA hardware descriptor */
+struct dwceqos_dma_desc {
+ u32 des0;
+ u32 des1;
+ u32 des2;
+ u32 des3;
+} ____cacheline_aligned;
+
+struct dwceqos_mmc_counters {
+ __u64 txlpitranscntr;
+ __u64 txpiuscntr;
+ __u64 txoversize_g;
+ __u64 txvlanpackets_g;
+ __u64 txpausepackets;
+ __u64 txexcessdef;
+ __u64 txpacketcount_g;
+ __u64 txoctetcount_g;
+ __u64 txcarriererror;
+ __u64 txexcesscol;
+ __u64 txlatecol;
+ __u64 txdeferred;
+ __u64 txmulticol_g;
+ __u64 txsinglecol_g;
+ __u64 txunderflowerror;
+ __u64 txbroadcastpackets_gb;
+ __u64 txmulticastpackets_gb;
+ __u64 txunicastpackets_gb;
+ __u64 tx1024tomaxoctets_gb;
+ __u64 tx512to1023octets_gb;
+ __u64 tx256to511octets_gb;
+ __u64 tx128to255octets_gb;
+ __u64 tx65to127octets_gb;
+ __u64 tx64octets_gb;
+ __u64 txmulticastpackets_g;
+ __u64 txbroadcastpackets_g;
+ __u64 txpacketcount_gb;
+ __u64 txoctetcount_gb;
+
+ __u64 rxlpitranscntr;
+ __u64 rxlpiuscntr;
+ __u64 rxctrlpackets_g;
+ __u64 rxrcverror;
+ __u64 rxwatchdog;
+ __u64 rxvlanpackets_gb;
+ __u64 rxfifooverflow;
+ __u64 rxpausepackets;
+ __u64 rxoutofrangetype;
+ __u64 rxlengtherror;
+ __u64 rxunicastpackets_g;
+ __u64 rx1024tomaxoctets_gb;
+ __u64 rx512to1023octets_gb;
+ __u64 rx256to511octets_gb;
+ __u64 rx128to255octets_gb;
+ __u64 rx65to127octets_gb;
+ __u64 rx64octets_gb;
+ __u64 rxoversize_g;
+ __u64 rxundersize_g;
+ __u64 rxjabbererror;
+ __u64 rxrunterror;
+ __u64 rxalignmenterror;
+ __u64 rxcrcerror;
+ __u64 rxmulticastpackets_g;
+ __u64 rxbroadcastpackets_g;
+ __u64 rxoctetcount_g;
+ __u64 rxoctetcount_gb;
+ __u64 rxpacketcount_gb;
+};
+
+/* Ethtool statistics */
+
+struct dwceqos_stat {
+ const char stat_name[ETH_GSTRING_LEN];
+ int offset;
+};
+
+#define STAT_ITEM(name, var) \
+ {\
+ name,\
+ offsetof(struct dwceqos_mmc_counters, var),\
+ }
+
+static const struct dwceqos_stat dwceqos_ethtool_stats[] = {
+ STAT_ITEM("tx_bytes", txoctetcount_gb),
+ STAT_ITEM("tx_packets", txpacketcount_gb),
+ STAT_ITEM("tx_unicst_packets", txunicastpackets_gb),
+ STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb),
+ STAT_ITEM("tx_multicast_packets", txmulticastpackets_gb),
+ STAT_ITEM("tx_pause_packets", txpausepackets),
+ STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb),
+ STAT_ITEM("tx_65_to_127_byte_packets", tx65to127octets_gb),
+ STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb),
+ STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb),
+ STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
+ STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb),
+ STAT_ITEM("tx_underflow_errors", txunderflowerror),
+ STAT_ITEM("tx_lpi_count", txlpitranscntr),
+
+ STAT_ITEM("rx_bytes", rxoctetcount_gb),
+ STAT_ITEM("rx_packets", rxpacketcount_gb),
+ STAT_ITEM("rx_unicast_packets", rxunicastpackets_g),
+ STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g),
+ STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g),
+ STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb),
+ STAT_ITEM("rx_pause_packets", rxpausepackets),
+ STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb),
+ STAT_ITEM("rx_65_to_127_byte_packets", rx65to127octets_gb),
+ STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb),
+ STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb),
+ STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
+ STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb),
+ STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow),
+ STAT_ITEM("rx_oversize_packets", rxoversize_g),
+ STAT_ITEM("rx_undersize_packets", rxundersize_g),
+ STAT_ITEM("rx_jabbers", rxjabbererror),
+ STAT_ITEM("rx_align_errors", rxalignmenterror),
+ STAT_ITEM("rx_crc_errors", rxcrcerror),
+ STAT_ITEM("rx_lpi_count", rxlpitranscntr),
+};
+
+/* Configuration of AXI bus parameters.
+ * These values depend on the parameters set on the MAC core as well
+ * as the AXI interconnect.
+ */
+struct dwceqos_bus_cfg {
+ /* Enable AXI low-power interface. */
+ bool en_lpi;
+ /* Limit on number of outstanding AXI write requests. */
+ u32 write_requests;
+ /* Limit on number of outstanding AXI read requests. */
+ u32 read_requests;
+ /* Bitmap of allowed AXI burst lengths, 4-256 beats. */
+ u32 burst_map;
+ /* DMA Programmable burst length*/
+ u32 tx_pbl;
+ u32 rx_pbl;
+};
+
+struct dwceqos_flowcontrol {
+ int autoneg;
+ int rx;
+ int rx_current;
+ int tx;
+ int tx_current;
+};
+
+struct net_local {
+ void __iomem *baseaddr;
+ struct clk *phy_ref_clk;
+ struct clk *apb_pclk;
+
+ struct device_node *phy_node;
+ struct net_device *ndev;
+ struct platform_device *pdev;
+
+ u32 msg_enable;
+
+ struct tasklet_struct tx_bdreclaim_tasklet;
+ struct workqueue_struct *txtimeout_handler_wq;
+ struct work_struct txtimeout_reinit;
+
+ phy_interface_t phy_interface;
+ struct phy_device *phy_dev;
+ struct mii_bus *mii_bus;
+
+ unsigned int link;
+ unsigned int speed;
+ unsigned int duplex;
+
+ struct napi_struct napi;
+
+ /* DMA Descriptor Areas */
+ struct ring_desc *rx_skb;
+ struct ring_desc *tx_skb;
+
+ struct dwceqos_dma_desc *tx_descs;
+ struct dwceqos_dma_desc *rx_descs;
+
+ /* DMA Mapped Descriptor areas*/
+ dma_addr_t tx_descs_addr;
+ dma_addr_t rx_descs_addr;
+ dma_addr_t tx_descs_tail_addr;
+ dma_addr_t rx_descs_tail_addr;
+
+ size_t tx_free;
+ size_t tx_next;
+ size_t rx_cur;
+ size_t tx_cur;
+
+ /* Spinlocks for accessing DMA Descriptors */
+ spinlock_t tx_lock;
+
+ /* Spinlock for register read-modify-writes. */
+ spinlock_t hw_lock;
+
+ u32 feature0;
+ u32 feature1;
+ u32 feature2;
+
+ struct dwceqos_bus_cfg bus_cfg;
+ bool en_tx_lpi_clockgating;
+
+ int eee_enabled;
+ int eee_active;
+ int csr_val;
+ u32 gso_size;
+
+ struct dwceqos_mmc_counters mmc_counters;
+ /* Protect the mmc_counter updates. */
+ spinlock_t stats_lock;
+ u32 mmc_rx_counters_mask;
+ u32 mmc_tx_counters_mask;
+
+ struct dwceqos_flowcontrol flowcontrol;
+};
+
+static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
+ u32 tx_mask);
+
+static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
+ unsigned int reg_n);
+static int dwceqos_stop(struct net_device *ndev);
+static int dwceqos_open(struct net_device *ndev);
+static void dwceqos_tx_poll_demand(struct net_local *lp);
+
+static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable);
+static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable);
+
+static void dwceqos_reset_state(struct net_local *lp);
+
+#define dwceqos_read(lp, reg) \
+ readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg))
+#define dwceqos_write(lp, reg, val) \
+ writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg))
+
+static void dwceqos_reset_state(struct net_local *lp)
+{
+ lp->link = 0;
+ lp->speed = 0;
+ lp->duplex = DUPLEX_UNKNOWN;
+ lp->flowcontrol.rx_current = 0;
+ lp->flowcontrol.tx_current = 0;
+ lp->eee_active = 0;
+ lp->eee_enabled = 0;
+}
+
+static void print_descriptor(struct net_local *lp, int index, int tx)
+{
+ struct dwceqos_dma_desc *dd;
+
+ if (tx)
+ dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index];
+ else
+ dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index];
+
+ pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX",
+ index, dd);
+ pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2,
+ dd->des3);
+}
+
+static void print_status(struct net_local *lp)
+{
+ size_t desci, i;
+
+ pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free,
+ lp->tx_cur, lp->tx_next);
+
+ print_descriptor(lp, lp->rx_cur, 0);
+
+ for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0;
+ i < DWCEQOS_TX_DCNT;
+ ++i) {
+ print_descriptor(lp, desci, 1);
+ desci = (desci + 1) % DWCEQOS_TX_DCNT;
+ }
+
+ pr_info("DMA_Debug_Status0: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0));
+ pr_info("DMA_CH0_Status: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_DMA_IS));
+ pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n",
+ dwceqos_read(lp, 0x1144));
+ pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n",
+ dwceqos_read(lp, 0x1154));
+ pr_info("MTL_Debug_Status: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST));
+ pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST));
+ pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST));
+ pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n",
+ dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC),
+ dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC));
+}
+
+static void dwceqos_mdio_set_csr(struct net_local *lp)
+{
+ int rate = clk_get_rate(lp->apb_pclk);
+
+ if (rate <= 20000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20;
+ else if (rate <= 35000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35;
+ else if (rate <= 60000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60;
+ else if (rate <= 100000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100;
+ else if (rate <= 150000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150;
+ else if (rate <= 250000000)
+ lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250;
+}
+
+/* Simple MDIO functions implementing mii_bus */
+static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg)
+{
+ struct net_local *lp = bus->priv;
+ u32 regval;
+ int i;
+ int data;
+
+ regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
+ DWCEQOS_MDIO_PHYREG(phyreg) |
+ DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
+ DWCEQOS_MAC_MDIO_ADDR_GB |
+ DWCEQOS_MAC_MDIO_ADDR_GOC_READ;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
+
+ for (i = 0; i < 5; ++i) {
+ usleep_range(64, 128);
+ if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
+ DWCEQOS_MAC_MDIO_ADDR_GB))
+ break;
+ }
+
+ data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA);
+ if (i == 5) {
+ netdev_warn(lp->ndev, "MDIO read timed out\n");
+ data = 0xffff;
+ }
+
+ return data & 0xffff;
+}
+
+static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
+ u16 value)
+{
+ struct net_local *lp = bus->priv;
+ u32 regval;
+ int i;
+
+ dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value);
+
+ regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
+ DWCEQOS_MDIO_PHYREG(phyreg) |
+ DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
+ DWCEQOS_MAC_MDIO_ADDR_GB |
+ DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
+
+ for (i = 0; i < 5; ++i) {
+ usleep_range(64, 128);
+ if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
+ DWCEQOS_MAC_MDIO_ADDR_GB))
+ break;
+ }
+ if (i == 5)
+ netdev_warn(lp->ndev, "MDIO write timed out\n");
+ return 0;
+}
+
+static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return phy_mii_ioctl(phydev, rq, cmd);
+ default:
+ dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd);
+ return -EOPNOTSUPP;
+ }
+}
+
+static void dwceqos_link_down(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ /* Indicate link down to the LPI state machine */
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_link_up(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ /* Indicate link up to the LPI state machine */
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+
+ lp->eee_active = !phy_init_eee(lp->phy_dev, 0);
+
+ /* Check for changed EEE capability */
+ if (!lp->eee_active && lp->eee_enabled) {
+ lp->eee_enabled = 0;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ }
+}
+
+static void dwceqos_set_speed(struct net_local *lp)
+{
+ struct phy_device *phydev = lp->phy_dev;
+ u32 regval;
+
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+ regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES |
+ DWCEQOS_MAC_CFG_DM);
+
+ if (phydev->duplex)
+ regval |= DWCEQOS_MAC_CFG_DM;
+ if (phydev->speed == SPEED_10) {
+ regval |= DWCEQOS_MAC_CFG_PS;
+ } else if (phydev->speed == SPEED_100) {
+ regval |= DWCEQOS_MAC_CFG_PS |
+ DWCEQOS_MAC_CFG_FES;
+ } else if (phydev->speed != SPEED_1000) {
+ netdev_err(lp->ndev,
+ "unknown PHY speed %d\n",
+ phydev->speed);
+ return;
+ }
+
+ dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval);
+}
+
+static void dwceqos_adjust_link(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+ int status_change = 0;
+
+ if (phydev->link) {
+ if ((lp->speed != phydev->speed) ||
+ (lp->duplex != phydev->duplex)) {
+ dwceqos_set_speed(lp);
+
+ lp->speed = phydev->speed;
+ lp->duplex = phydev->duplex;
+ status_change = 1;
+ }
+
+ if (lp->flowcontrol.autoneg) {
+ lp->flowcontrol.rx = phydev->pause ||
+ phydev->asym_pause;
+ lp->flowcontrol.tx = phydev->pause ||
+ phydev->asym_pause;
+ }
+
+ if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) {
+ if (netif_msg_link(lp))
+ netdev_dbg(ndev, "set rx flow to %d\n",
+ lp->flowcontrol.rx);
+ dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx);
+ lp->flowcontrol.rx_current = lp->flowcontrol.rx;
+ }
+ if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) {
+ if (netif_msg_link(lp))
+ netdev_dbg(ndev, "set tx flow to %d\n",
+ lp->flowcontrol.tx);
+ dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx);
+ lp->flowcontrol.tx_current = lp->flowcontrol.tx;
+ }
+ }
+
+ if (phydev->link != lp->link) {
+ lp->link = phydev->link;
+ status_change = 1;
+ }
+
+ if (status_change) {
+ if (phydev->link) {
+ lp->ndev->trans_start = jiffies;
+ dwceqos_link_up(lp);
+ } else {
+ dwceqos_link_down(lp);
+ }
+ phy_print_status(phydev);
+ }
+}
+
+static int dwceqos_mii_probe(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = NULL;
+
+ if (lp->phy_node) {
+ phydev = of_phy_connect(lp->ndev,
+ lp->phy_node,
+ &dwceqos_adjust_link,
+ 0,
+ lp->phy_interface);
+
+ if (!phydev) {
+ netdev_err(ndev, "no PHY found\n");
+ return -1;
+ }
+ } else {
+ netdev_err(ndev, "no PHY configured\n");
+ return -ENODEV;
+ }
+
+ if (netif_msg_probe(lp))
+ netdev_dbg(lp->ndev,
+ "phydev %p, phydev->phy_id 0xa%x, phydev->addr 0x%x\n",
+ phydev, phydev->phy_id, phydev->addr);
+
+ phydev->supported &= PHY_GBIT_FEATURES;
+
+ lp->link = 0;
+ lp->speed = 0;
+ lp->duplex = DUPLEX_UNKNOWN;
+ lp->phy_dev = phydev;
+
+ if (netif_msg_probe(lp)) {
+ netdev_dbg(lp->ndev, "phy_addr 0x%x, phy_id 0x%08x\n",
+ lp->phy_dev->addr, lp->phy_dev->phy_id);
+
+ netdev_dbg(lp->ndev, "attach [%s] phy driver\n",
+ lp->phy_dev->drv->name);
+ }
+
+ return 0;
+}
+
+static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index)
+{
+ struct sk_buff *new_skb;
+ dma_addr_t new_skb_baddr = 0;
+
+ new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
+ if (!new_skb) {
+ netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index);
+ goto err_out;
+ }
+
+ new_skb_baddr = dma_map_single(lp->ndev->dev.parent,
+ new_skb->data, DWCEQOS_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
+ netdev_err(lp->ndev, "DMA map error\n");
+ dev_kfree_skb(new_skb);
+ new_skb = NULL;
+ goto err_out;
+ }
+
+ lp->rx_descs[index].des0 = new_skb_baddr;
+ lp->rx_descs[index].des1 = 0;
+ lp->rx_descs[index].des2 = 0;
+ lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE |
+ DWCEQOS_DMA_RDES3_BUF1V |
+ DWCEQOS_DMA_RDES3_OWN;
+
+ lp->rx_skb[index].mapping = new_skb_baddr;
+ lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE;
+
+err_out:
+ lp->rx_skb[index].skb = new_skb;
+}
+
+static void dwceqos_clean_rings(struct net_local *lp)
+{
+ int i;
+
+ if (lp->rx_skb) {
+ for (i = 0; i < DWCEQOS_RX_DCNT; i++) {
+ if (lp->rx_skb[i].skb) {
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->rx_skb[i].mapping,
+ lp->rx_skb[i].len,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb(lp->rx_skb[i].skb);
+ lp->rx_skb[i].skb = NULL;
+ lp->rx_skb[i].mapping = 0;
+ }
+ }
+ }
+
+ if (lp->tx_skb) {
+ for (i = 0; i < DWCEQOS_TX_DCNT; i++) {
+ if (lp->tx_skb[i].skb) {
+ dev_kfree_skb(lp->tx_skb[i].skb);
+ lp->tx_skb[i].skb = NULL;
+ }
+ if (lp->tx_skb[i].mapping) {
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->tx_skb[i].mapping,
+ lp->tx_skb[i].len,
+ DMA_TO_DEVICE);
+ lp->tx_skb[i].mapping = 0;
+ }
+ }
+ }
+}
+
+static void dwceqos_descriptor_free(struct net_local *lp)
+{
+ int size;
+
+ dwceqos_clean_rings(lp);
+
+ kfree(lp->tx_skb);
+ lp->tx_skb = NULL;
+ kfree(lp->rx_skb);
+ lp->rx_skb = NULL;
+
+ size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
+ if (lp->rx_descs) {
+ dma_free_coherent(lp->ndev->dev.parent, size,
+ (void *)(lp->rx_descs), lp->rx_descs_addr);
+ lp->rx_descs = NULL;
+ }
+
+ size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
+ if (lp->tx_descs) {
+ dma_free_coherent(lp->ndev->dev.parent, size,
+ (void *)(lp->tx_descs), lp->tx_descs_addr);
+ lp->tx_descs = NULL;
+ }
+}
+
+static int dwceqos_descriptor_init(struct net_local *lp)
+{
+ int size;
+ u32 i;
+
+ lp->gso_size = 0;
+
+ lp->tx_skb = NULL;
+ lp->rx_skb = NULL;
+ lp->rx_descs = NULL;
+ lp->tx_descs = NULL;
+
+ /* Reset the DMA indexes */
+ lp->rx_cur = 0;
+ lp->tx_cur = 0;
+ lp->tx_next = 0;
+ lp->tx_free = DWCEQOS_TX_DCNT;
+
+ /* Allocate Ring descriptors */
+ size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc);
+ lp->rx_skb = kzalloc(size, GFP_KERNEL);
+ if (!lp->rx_skb)
+ goto err_out;
+
+ size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc);
+ lp->tx_skb = kzalloc(size, GFP_KERNEL);
+ if (!lp->tx_skb)
+ goto err_out;
+
+ /* Allocate DMA descriptors */
+ size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
+ lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
+ &lp->rx_descs_addr, 0);
+ if (!lp->rx_descs)
+ goto err_out;
+ lp->rx_descs_tail_addr = lp->rx_descs_addr +
+ sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT;
+
+ size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
+ lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
+ &lp->tx_descs_addr, 0);
+ if (!lp->tx_descs)
+ goto err_out;
+ lp->tx_descs_tail_addr = lp->tx_descs_addr +
+ sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT;
+
+ /* Initialize RX Ring Descriptors and buffers */
+ for (i = 0; i < DWCEQOS_RX_DCNT; ++i) {
+ dwceqos_alloc_rxring_desc(lp, i);
+ if (!(lp->rx_skb[lp->rx_cur].skb))
+ goto err_out;
+ }
+
+ /* Initialize TX Descriptors */
+ for (i = 0; i < DWCEQOS_TX_DCNT; ++i) {
+ lp->tx_descs[i].des0 = 0;
+ lp->tx_descs[i].des1 = 0;
+ lp->tx_descs[i].des2 = 0;
+ lp->tx_descs[i].des3 = 0;
+ }
+
+ /* Make descriptor writes visible to the DMA. */
+ wmb();
+
+ return 0;
+
+err_out:
+ dwceqos_descriptor_free(lp);
+ return -ENOMEM;
+}
+
+static int dwceqos_packet_avail(struct net_local *lp)
+{
+ return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN);
+}
+
+static void dwceqos_get_hwfeatures(struct net_local *lp)
+{
+ lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0);
+ lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1);
+ lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2);
+}
+
+static void dwceqos_dma_enable_txirq(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+ regval |= DWCEQOS_DMA_CH0_IE_TIE;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_disable_txirq(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+ regval &= ~DWCEQOS_DMA_CH0_IE_TIE;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_enable_rxirq(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+ regval |= DWCEQOS_DMA_CH0_IE_RIE;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_disable_rxirq(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+ regval &= ~DWCEQOS_DMA_CH0_IE_RIE;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_enable_mmc_interrupt(struct net_local *lp)
+{
+ dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0);
+ dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0);
+}
+
+static int dwceqos_mii_init(struct net_local *lp)
+{
+ int ret = -ENXIO, i;
+ struct resource res;
+ struct device_node *mdionode;
+
+ mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio");
+
+ if (!mdionode)
+ return 0;
+
+ lp->mii_bus = mdiobus_alloc();
+ if (!lp->mii_bus) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ lp->mii_bus->name = "DWCEQOS MII bus";
+ lp->mii_bus->read = &dwceqos_mdio_read;
+ lp->mii_bus->write = &dwceqos_mdio_write;
+ lp->mii_bus->priv = lp;
+ lp->mii_bus->parent = &lp->ndev->dev;
+
+ lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!lp->mii_bus->irq) {
+ ret = -ENOMEM;
+ goto err_out_free_mdiobus;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ lp->mii_bus->irq[i] = PHY_POLL;
+ of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
+ snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ (unsigned long long)res.start);
+ if (of_mdiobus_register(lp->mii_bus, mdionode))
+ goto err_out_free_mdio_irq;
+
+ return 0;
+
+err_out_free_mdio_irq:
+ kfree(lp->mii_bus->irq);
+err_out_free_mdiobus:
+ mdiobus_free(lp->mii_bus);
+err_out:
+ of_node_put(mdionode);
+ return ret;
+}
+
+/* DMA reset. When issued also resets all MTL and MAC registers as well */
+static void dwceqos_reset_hw(struct net_local *lp)
+{
+ /* Wait (at most) 0.5 seconds for DMA reset*/
+ int i = 5000;
+ u32 reg;
+
+ /* Force gigabit to guarantee a TX clock for GMII. */
+ reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+ reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES);
+ reg |= DWCEQOS_MAC_CFG_DM;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg);
+
+ dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR);
+
+ do {
+ udelay(100);
+ i--;
+ reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE);
+ } while ((reg & DWCEQOS_DMA_MODE_SWR) && i);
+ /* We might experience a timeout if the chip clock mux is broken */
+ if (!i)
+ netdev_err(lp->ndev, "DMA reset timed out!\n");
+}
+
+static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status)
+{
+ if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) {
+ netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n",
+ dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ?
+ "read" : "write",
+ dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ?
+ "descr" : "data",
+ dma_status);
+
+ print_status(lp);
+ }
+ if (dma_status & DWCEQOS_DMA_CH0_IS_REB) {
+ netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n",
+ dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ?
+ "read" : "write",
+ dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ?
+ "descr" : "data",
+ dma_status);
+
+ print_status(lp);
+ }
+}
+
+static void dwceqos_mmc_interrupt(struct net_local *lp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->stats_lock, flags);
+
+ /* A latched mmc interrupt can not be masked, we must read
+ * all the counters with an interrupt pending.
+ */
+ dwceqos_read_mmc_counters(lp,
+ dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ),
+ dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ));
+
+ spin_unlock_irqrestore(&lp->stats_lock, flags);
+}
+
+static void dwceqos_mac_interrupt(struct net_local *lp)
+{
+ u32 cause;
+
+ cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS);
+
+ if (cause & DWCEQOS_MAC_IS_MMC_INT)
+ dwceqos_mmc_interrupt(lp);
+}
+
+static irqreturn_t dwceqos_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct net_local *lp = netdev_priv(ndev);
+
+ u32 cause;
+ u32 dma_status;
+ irqreturn_t ret = IRQ_NONE;
+
+ cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS);
+ /* DMA Channel 0 Interrupt */
+ if (cause & DWCEQOS_DMA_IS_DC0IS) {
+ dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA);
+
+ /* Transmit Interrupt */
+ if (dma_status & DWCEQOS_DMA_CH0_IS_TI) {
+ tasklet_schedule(&lp->tx_bdreclaim_tasklet);
+ dwceqos_dma_disable_txirq(lp);
+ }
+
+ /* Receive Interrupt */
+ if (dma_status & DWCEQOS_DMA_CH0_IS_RI) {
+ /* Disable RX IRQs */
+ dwceqos_dma_disable_rxirq(lp);
+ napi_schedule(&lp->napi);
+ }
+
+ /* Fatal Bus Error interrupt */
+ if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) {
+ dwceqos_fatal_bus_error(lp, dma_status);
+
+ /* errata 9000831707 */
+ dma_status |= DWCEQOS_DMA_CH0_IS_TEB |
+ DWCEQOS_DMA_CH0_IS_REB;
+ }
+
+ /* Ack all DMA Channel 0 IRQs */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status);
+ ret = IRQ_HANDLED;
+ }
+
+ if (cause & DWCEQOS_DMA_IS_MTLIS) {
+ u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL);
+
+ dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val);
+ ret = IRQ_HANDLED;
+ }
+
+ if (cause & DWCEQOS_DMA_IS_MACIS) {
+ dwceqos_mac_interrupt(lp);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL);
+ if (enable)
+ regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
+ else
+ regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval);
+
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+
+ /* MTL flow control */
+ regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
+ if (enable)
+ regval |= DWCEQOS_MTL_RXQ_EHFC;
+ else
+ regval &= ~DWCEQOS_MTL_RXQ_EHFC;
+
+ dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+
+ /* MAC flow control */
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW);
+ if (enable)
+ regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE;
+ else
+ regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
+
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_configure_flow_control(struct net_local *lp)
+{
+ u32 regval;
+ unsigned long flags;
+ int RQS, RFD, RFA;
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+
+ regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
+
+ /* The queue size is in units of 256 bytes. We want 512 bytes units for
+ * the threshold fields.
+ */
+ RQS = ((regval >> 20) & 0x3FF) + 1;
+ RQS /= 2;
+
+ /* The thresholds are relative to a full queue, with a bias
+ * of 1 KiByte below full.
+ */
+ RFD = RQS / 2 - 2;
+ RFA = RQS / 8 - 2;
+
+ regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8);
+
+ if (RFD >= 0 && RFA >= 0) {
+ dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+ } else {
+ netdev_warn(lp->ndev,
+ "FIFO too small for flow control.");
+ }
+
+ regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) |
+ DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS;
+
+ dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
+
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_configure_clock(struct net_local *lp)
+{
+ unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000;
+
+ BUG_ON(!rate_mhz);
+
+ dwceqos_write(lp,
+ REG_DWCEQOS_MAC_1US_TIC_COUNTER,
+ DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1));
+}
+
+static void dwceqos_configure_bus(struct net_local *lp)
+{
+ u32 sysbus_reg;
+
+ /* N.B. We do not support the Fixed Burst mode because it
+ * opens a race window by making HW access to DMA descriptors
+ * non-atomic.
+ */
+
+ sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL;
+
+ if (lp->bus_cfg.en_lpi)
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI;
+
+ if (lp->bus_cfg.burst_map)
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
+ lp->bus_cfg.burst_map);
+ else
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
+ DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT);
+
+ if (lp->bus_cfg.read_requests)
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
+ lp->bus_cfg.read_requests - 1);
+ else
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
+ DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT);
+
+ if (lp->bus_cfg.write_requests)
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
+ lp->bus_cfg.write_requests - 1);
+ else
+ sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
+ DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT);
+
+ if (netif_msg_hw(lp))
+ netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg);
+
+ dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg);
+}
+
+static void dwceqos_init_hw(struct net_local *lp)
+{
+ u32 regval;
+ u32 buswidth;
+ u32 dma_skip;
+
+ /* Software reset */
+ dwceqos_reset_hw(lp);
+
+ dwceqos_configure_bus(lp);
+
+ /* Probe data bus width, 32/64/128 bits. */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF);
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL);
+ buswidth = (regval ^ 0xF) + 1;
+
+ /* Cache-align dma descriptors. */
+ dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL,
+ DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) |
+ DWCEQOS_DMA_CH_CTRL_PBLX8);
+
+ /* Initialize DMA Channel 0 */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST,
+ (u32)lp->tx_descs_addr);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST,
+ (u32)lp->rx_descs_addr);
+
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
+ lp->tx_descs_tail_addr);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
+ lp->rx_descs_tail_addr);
+
+ if (lp->bus_cfg.tx_pbl)
+ regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl);
+ else
+ regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
+
+ /* Enable TSO if the HW support it */
+ if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
+ regval |= DWCEQOS_DMA_CH_TX_TSE;
+
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval);
+
+ if (lp->bus_cfg.rx_pbl)
+ regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl);
+ else
+ regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
+
+ regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
+
+ regval |= DWCEQOS_DMA_CH_CTRL_START;
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
+
+ /* Initialize MTL Queues */
+ regval = DWCEQOS_MTL_SCHALG_STRICT;
+ dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval);
+
+ regval = DWCEQOS_MTL_TXQ_SIZE(
+ DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) |
+ DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF |
+ DWCEQOS_MTL_TXQ_TTC512;
+ dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval);
+
+ regval = DWCEQOS_MTL_RXQ_SIZE(
+ DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) |
+ DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF;
+ dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+
+ dwceqos_configure_flow_control(lp);
+
+ /* Initialize MAC */
+ dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+
+ lp->eee_enabled = 0;
+
+ dwceqos_configure_clock(lp);
+
+ /* MMC counters */
+
+ /* probe implemented counters */
+ dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u);
+ dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u);
+ lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK);
+ lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK);
+
+ dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST |
+ DWCEQOS_MMC_CTRL_RSTONRD);
+ dwceqos_enable_mmc_interrupt(lp);
+
+ /* Enable Interrupts */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
+ DWCEQOS_DMA_CH0_IE_NIE |
+ DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
+ DWCEQOS_DMA_CH0_IE_AIE |
+ DWCEQOS_DMA_CH0_IE_FBEE);
+
+ dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
+
+ dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
+ DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+
+ /* Start TX DMA */
+ regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL);
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL,
+ regval | DWCEQOS_DMA_CH_CTRL_START);
+
+ /* Enable MAC TX/RX */
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+ dwceqos_write(lp, REG_DWCEQOS_MAC_CFG,
+ regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+}
+
+static void dwceqos_tx_reclaim(unsigned long data)
+{
+ struct net_device *ndev = (struct net_device *)data;
+ struct net_local *lp = netdev_priv(ndev);
+ unsigned int tx_bytes = 0;
+ unsigned int tx_packets = 0;
+
+ spin_lock(&lp->tx_lock);
+
+ while (lp->tx_free < DWCEQOS_TX_DCNT) {
+ struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur];
+ struct ring_desc *rd = &lp->tx_skb[lp->tx_cur];
+
+ /* Descriptor still being held by DMA ? */
+ if (dd->des3 & DWCEQOS_DMA_TDES3_OWN)
+ break;
+
+ if (rd->mapping)
+ dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(rd->skb)) {
+ ++tx_packets;
+ tx_bytes += rd->skb->len;
+ dev_consume_skb_any(rd->skb);
+ }
+
+ rd->skb = NULL;
+ rd->mapping = 0;
+ lp->tx_free++;
+ lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT;
+
+ if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) &&
+ (dd->des3 & DWCEQOS_DMA_RDES3_ES)) {
+ if (netif_msg_tx_err(lp))
+ netdev_err(ndev, "TX Error, TDES3 = 0x%x\n",
+ dd->des3);
+ if (netif_msg_hw(lp))
+ print_status(lp);
+ }
+ }
+ spin_unlock(&lp->tx_lock);
+
+ netdev_completed_queue(ndev, tx_packets, tx_bytes);
+
+ dwceqos_dma_enable_txirq(lp);
+ netif_wake_queue(ndev);
+}
+
+static int dwceqos_rx(struct net_local *lp, int budget)
+{
+ struct sk_buff *skb;
+ u32 tot_size = 0;
+ unsigned int n_packets = 0;
+ unsigned int n_descs = 0;
+ u32 len;
+
+ struct dwceqos_dma_desc *dd;
+ struct sk_buff *new_skb;
+ dma_addr_t new_skb_baddr = 0;
+
+ while (n_descs < budget) {
+ if (!dwceqos_packet_avail(lp))
+ break;
+
+ new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
+ if (!new_skb) {
+ netdev_err(lp->ndev, "no memory for new sk_buff\n");
+ break;
+ }
+
+ /* Get dma handle of skb->data */
+ new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent,
+ new_skb->data,
+ DWCEQOS_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
+ netdev_err(lp->ndev, "DMA map error\n");
+ dev_kfree_skb(new_skb);
+ break;
+ }
+
+ /* Read descriptor data after reading owner bit. */
+ dma_rmb();
+
+ dd = &lp->rx_descs[lp->rx_cur];
+ len = DWCEQOS_DMA_RDES3_PL(dd->des3);
+ skb = lp->rx_skb[lp->rx_cur].skb;
+
+ /* Unmap old buffer */
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->rx_skb[lp->rx_cur].mapping,
+ lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE);
+
+ /* Discard packet on reception error or bad checksum */
+ if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) ||
+ (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ } else {
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, lp->ndev);
+ switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) {
+ case DWCEQOS_DMA_RDES1_PT_UDP:
+ case DWCEQOS_DMA_RDES1_PT_TCP:
+ case DWCEQOS_DMA_RDES1_PT_ICMP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ default:
+ skb->ip_summed = CHECKSUM_NONE;
+ break;
+ }
+ }
+
+ if (unlikely(!skb)) {
+ if (netif_msg_rx_err(lp))
+ netdev_dbg(lp->ndev, "rx error: des3=%X\n",
+ lp->rx_descs[lp->rx_cur].des3);
+ } else {
+ tot_size += skb->len;
+ n_packets++;
+
+ netif_receive_skb(skb);
+ }
+
+ lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr;
+ lp->rx_descs[lp->rx_cur].des1 = 0;
+ lp->rx_descs[lp->rx_cur].des2 = 0;
+ /* The DMA must observe des0/1/2 written before des3. */
+ wmb();
+ lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE |
+ DWCEQOS_DMA_RDES3_OWN |
+ DWCEQOS_DMA_RDES3_BUF1V;
+
+ lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr;
+ lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE;
+ lp->rx_skb[lp->rx_cur].skb = new_skb;
+
+ n_descs++;
+ lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT;
+ }
+
+ /* Make sure any ownership update is written to the descriptors before
+ * DMA wakeup.
+ */
+ wmb();
+
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI);
+ /* Wake up RX by writing tail pointer */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
+ lp->rx_descs_tail_addr);
+
+ return n_descs;
+}
+
+static int dwceqos_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct net_local *lp = container_of(napi, struct net_local, napi);
+ int work_done = 0;
+
+ work_done = dwceqos_rx(lp, budget - work_done);
+
+ if (!dwceqos_packet_avail(lp) && work_done < budget) {
+ napi_complete(napi);
+ dwceqos_dma_enable_rxirq(lp);
+ } else {
+ work_done = budget;
+ }
+
+ return work_done;
+}
+
+/* Reinitialize function if a TX timed out */
+static void dwceqos_reinit_for_txtimeout(struct work_struct *data)
+{
+ struct net_local *lp = container_of(data, struct net_local,
+ txtimeout_reinit);
+
+ netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n",
+ DWCEQOS_TX_TIMEOUT);
+
+ if (netif_msg_hw(lp))
+ print_status(lp);
+
+ rtnl_lock();
+ dwceqos_stop(lp->ndev);
+ dwceqos_open(lp->ndev);
+ rtnl_unlock();
+}
+
+/* DT Probing function called by main probe */
+static inline int dwceqos_probe_config_dt(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ struct net_local *lp;
+ const void *mac_address;
+ struct dwceqos_bus_cfg *bus_cfg;
+ struct device_node *np = pdev->dev.of_node;
+
+ ndev = platform_get_drvdata(pdev);
+ lp = netdev_priv(ndev);
+ bus_cfg = &lp->bus_cfg;
+
+ /* Set the MAC address. */
+ mac_address = of_get_mac_address(pdev->dev.of_node);
+ if (mac_address)
+ ether_addr_copy(ndev->dev_addr, mac_address);
+
+ /* These are all optional parameters */
+ lp->en_tx_lpi_clockgating = of_property_read_bool(np,
+ "snps,en-tx-lpi-clockgating");
+ bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi");
+ of_property_read_u32(np, "snps,write-requests",
+ &bus_cfg->write_requests);
+ of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests);
+ of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map);
+ of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl);
+ of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl);
+
+ netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n",
+ bus_cfg->en_lpi,
+ bus_cfg->write_requests,
+ bus_cfg->read_requests,
+ bus_cfg->burst_map,
+ bus_cfg->rx_pbl,
+ bus_cfg->tx_pbl);
+
+ return 0;
+}
+
+static int dwceqos_open(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ int res;
+
+ dwceqos_reset_state(lp);
+ res = dwceqos_descriptor_init(lp);
+ if (res) {
+ netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res);
+ return res;
+ }
+ netdev_reset_queue(ndev);
+
+ napi_enable(&lp->napi);
+ phy_start(lp->phy_dev);
+ dwceqos_init_hw(lp);
+
+ netif_start_queue(ndev);
+ tasklet_enable(&lp->tx_bdreclaim_tasklet);
+
+ return 0;
+}
+
+static bool dweqos_is_tx_dma_suspended(struct net_local *lp)
+{
+ u32 reg;
+
+ reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0);
+ reg = DMA_GET_TX_STATE_CH0(reg);
+
+ return reg == DMA_TX_CH_SUSPENDED;
+}
+
+static void dwceqos_drain_dma(struct net_local *lp)
+{
+ /* Wait for all pending TX buffers to be sent. Upper limit based
+ * on max frame size on a 10 Mbit link.
+ */
+ size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100;
+
+ while (!dweqos_is_tx_dma_suspended(lp) && limit--)
+ usleep_range(100, 200);
+}
+
+static int dwceqos_stop(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+
+ phy_stop(lp->phy_dev);
+
+ tasklet_disable(&lp->tx_bdreclaim_tasklet);
+ netif_stop_queue(ndev);
+ napi_disable(&lp->napi);
+
+ dwceqos_drain_dma(lp);
+
+ netif_tx_lock(lp->ndev);
+ dwceqos_reset_hw(lp);
+ dwceqos_descriptor_free(lp);
+ netif_tx_unlock(lp->ndev);
+
+ return 0;
+}
+
+static void dwceqos_dmadesc_set_ctx(struct net_local *lp,
+ unsigned short gso_size)
+{
+ struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next];
+
+ dd->des0 = 0;
+ dd->des1 = 0;
+ dd->des2 = gso_size;
+ dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV;
+
+ lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+}
+
+static void dwceqos_tx_poll_demand(struct net_local *lp)
+{
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
+ lp->tx_descs_tail_addr);
+}
+
+struct dwceqos_tx {
+ size_t nr_descriptors;
+ size_t initial_descriptor;
+ size_t last_descriptor;
+ size_t prev_gso_size;
+ size_t network_header_len;
+};
+
+static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp,
+ struct dwceqos_tx *tx)
+{
+ size_t n = 1;
+ size_t i;
+
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size)
+ ++n;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ n += (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) /
+ BYTES_PER_DMA_DESC;
+ }
+
+ tx->nr_descriptors = n;
+ tx->initial_descriptor = lp->tx_next;
+ tx->last_descriptor = lp->tx_next;
+ tx->prev_gso_size = lp->gso_size;
+
+ tx->network_header_len = skb_transport_offset(skb);
+ if (skb_is_gso(skb))
+ tx->network_header_len += tcp_hdrlen(skb);
+}
+
+static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp,
+ struct dwceqos_tx *tx)
+{
+ struct ring_desc *rd;
+ struct dwceqos_dma_desc *dd;
+ size_t payload_len;
+ dma_addr_t dma_handle;
+
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) {
+ dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size);
+ lp->gso_size = skb_shinfo(skb)->gso_size;
+ }
+
+ dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
+ netdev_err(lp->ndev, "TX DMA Mapping error\n");
+ return -ENOMEM;
+ }
+
+ rd = &lp->tx_skb[lp->tx_next];
+ dd = &lp->tx_descs[lp->tx_next];
+
+ rd->skb = NULL;
+ rd->len = skb_headlen(skb);
+ rd->mapping = dma_handle;
+
+ /* Set up DMA Descriptor */
+ dd->des0 = dma_handle;
+
+ if (skb_is_gso(skb)) {
+ payload_len = skb_headlen(skb) - tx->network_header_len;
+
+ if (payload_len)
+ dd->des1 = dma_handle + tx->network_header_len;
+ dd->des2 = tx->network_header_len |
+ DWCEQOS_DMA_DES2_B2L(payload_len);
+ dd->des3 = DWCEQOS_DMA_TDES3_TSE |
+ DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) |
+ (skb->len - tx->network_header_len);
+ } else {
+ dd->des1 = 0;
+ dd->des2 = skb_headlen(skb);
+ dd->des3 = skb->len;
+
+ switch (skb->ip_summed) {
+ case CHECKSUM_PARTIAL:
+ dd->des3 |= DWCEQOS_DMA_TDES3_CA;
+ case CHECKSUM_NONE:
+ case CHECKSUM_UNNECESSARY:
+ case CHECKSUM_COMPLETE:
+ default:
+ break;
+ }
+ }
+
+ dd->des3 |= DWCEQOS_DMA_TDES3_FD;
+ if (lp->tx_next != tx->initial_descriptor)
+ dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+ tx->last_descriptor = lp->tx_next;
+ lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+
+ return 0;
+}
+
+static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp,
+ struct dwceqos_tx *tx)
+{
+ struct ring_desc *rd = NULL;
+ struct dwceqos_dma_desc *dd;
+ dma_addr_t dma_handle;
+ size_t i;
+
+ /* Setup more ring and DMA descriptor if the packet is fragmented */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ size_t frag_size;
+ size_t consumed_size;
+
+ /* Map DMA Area */
+ dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
+ netdev_err(lp->ndev, "DMA Mapping error\n");
+ return -ENOMEM;
+ }
+
+ /* order-3 fragments span more than one descriptor. */
+ frag_size = skb_frag_size(frag);
+ consumed_size = 0;
+ while (consumed_size < frag_size) {
+ size_t dma_size = min_t(size_t, 16376,
+ frag_size - consumed_size);
+
+ rd = &lp->tx_skb[lp->tx_next];
+ memset(rd, 0, sizeof(*rd));
+
+ dd = &lp->tx_descs[lp->tx_next];
+
+ /* Set DMA Descriptor fields */
+ dd->des0 = dma_handle;
+ dd->des1 = 0;
+ dd->des2 = dma_size;
+
+ if (skb_is_gso(skb))
+ dd->des3 = (skb->len - tx->network_header_len);
+ else
+ dd->des3 = skb->len;
+
+ dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+ tx->last_descriptor = lp->tx_next;
+ lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+ consumed_size += dma_size;
+ }
+
+ rd->len = skb_frag_size(frag);
+ rd->mapping = dma_handle;
+ }
+
+ return 0;
+}
+
+static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp,
+ struct dwceqos_tx *tx)
+{
+ lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD;
+ lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC;
+
+ lp->tx_skb[tx->last_descriptor].skb = skb;
+
+ /* Make all descriptor updates visible to the DMA before setting the
+ * owner bit.
+ */
+ wmb();
+
+ lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+ /* Make the owner bit visible before TX wakeup. */
+ wmb();
+
+ dwceqos_tx_poll_demand(lp);
+}
+
+static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx)
+{
+ size_t i = tx->initial_descriptor;
+
+ while (i != lp->tx_next) {
+ if (lp->tx_skb[i].mapping)
+ dma_unmap_single(lp->ndev->dev.parent,
+ lp->tx_skb[i].mapping,
+ lp->tx_skb[i].len,
+ DMA_TO_DEVICE);
+
+ lp->tx_skb[i].mapping = 0;
+ lp->tx_skb[i].skb = NULL;
+
+ memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i]));
+
+ i = (i + 1) % DWCEQOS_TX_DCNT;
+ }
+
+ lp->tx_next = tx->initial_descriptor;
+ lp->gso_size = tx->prev_gso_size;
+}
+
+static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct dwceqos_tx trans;
+ int err;
+
+ dwceqos_tx_prepare(skb, lp, &trans);
+ if (lp->tx_free < trans.nr_descriptors) {
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ err = dwceqos_tx_linear(skb, lp, &trans);
+ if (err)
+ goto tx_error;
+
+ err = dwceqos_tx_frags(skb, lp, &trans);
+ if (err)
+ goto tx_error;
+
+ WARN_ON(lp->tx_next !=
+ ((trans.initial_descriptor + trans.nr_descriptors) %
+ DWCEQOS_TX_DCNT));
+
+ dwceqos_tx_finalize(skb, lp, &trans);
+
+ netdev_sent_queue(ndev, skb->len);
+
+ spin_lock_bh(&lp->tx_lock);
+ lp->tx_free -= trans.nr_descriptors;
+ spin_unlock_bh(&lp->tx_lock);
+
+ ndev->trans_start = jiffies;
+ return 0;
+
+tx_error:
+ dwceqos_tx_rollback(lp, &trans);
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+/* Set MAC address and then update HW accordingly */
+static int dwceqos_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct sockaddr *hwaddr = (struct sockaddr *)addr;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(hwaddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len);
+
+ dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+ return 0;
+}
+
+static void dwceqos_tx_timeout(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+
+ queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit);
+}
+
+static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
+ unsigned int reg_n)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n),
+ data | DWCEQOS_MAC_MAC_ADDR_HI_EN);
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data);
+}
+
+static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n)
+{
+ /* Do not disable MAC address 0 */
+ if (reg_n != 0)
+ dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0);
+}
+
+static void dwceqos_set_rx_mode(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval = 0;
+ u32 mc_filter[2];
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+ unsigned int max_mac_addr;
+
+ max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1);
+
+ if (ndev->flags & IFF_PROMISC) {
+ regval = DWCEQOS_MAC_PKT_FILT_PR;
+ } else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) ||
+ (ndev->flags & IFF_ALLMULTI))) {
+ regval = DWCEQOS_MAC_PKT_FILT_PM;
+ dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff);
+ dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff);
+ } else if (!netdev_mc_empty(ndev)) {
+ regval = DWCEQOS_MAC_PKT_FILT_HMC;
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, ndev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the contens of the hash table
+ */
+ int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
+ /* The most significant bit determines the register
+ * to use (H/L) while the other 5 bits determine
+ * the bit within the register.
+ */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]);
+ dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]);
+ }
+ if (netdev_uc_count(ndev) > max_mac_addr) {
+ regval |= DWCEQOS_MAC_PKT_FILT_PR;
+ } else {
+ netdev_for_each_uc_addr(ha, ndev) {
+ dwceqos_set_umac_addr(lp, ha->addr, reg);
+ reg++;
+ }
+ for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++)
+ dwceqos_disable_umac_addr(lp, reg);
+ }
+ dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void dwceqos_poll_controller(struct net_device *ndev)
+{
+ disable_irq(ndev->irq);
+ dwceqos_interrupt(ndev->irq, ndev);
+ enable_irq(ndev->irq);
+}
+#endif
+
+static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
+ u32 tx_mask)
+{
+ if (tx_mask & BIT(27))
+ lp->mmc_counters.txlpitranscntr +=
+ dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR);
+ if (tx_mask & BIT(26))
+ lp->mmc_counters.txpiuscntr +=
+ dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR);
+ if (tx_mask & BIT(25))
+ lp->mmc_counters.txoversize_g +=
+ dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G);
+ if (tx_mask & BIT(24))
+ lp->mmc_counters.txvlanpackets_g +=
+ dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G);
+ if (tx_mask & BIT(23))
+ lp->mmc_counters.txpausepackets +=
+ dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS);
+ if (tx_mask & BIT(22))
+ lp->mmc_counters.txexcessdef +=
+ dwceqos_read(lp, DWC_MMC_TXEXCESSDEF);
+ if (tx_mask & BIT(21))
+ lp->mmc_counters.txpacketcount_g +=
+ dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G);
+ if (tx_mask & BIT(20))
+ lp->mmc_counters.txoctetcount_g +=
+ dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G);
+ if (tx_mask & BIT(19))
+ lp->mmc_counters.txcarriererror +=
+ dwceqos_read(lp, DWC_MMC_TXCARRIERERROR);
+ if (tx_mask & BIT(18))
+ lp->mmc_counters.txexcesscol +=
+ dwceqos_read(lp, DWC_MMC_TXEXCESSCOL);
+ if (tx_mask & BIT(17))
+ lp->mmc_counters.txlatecol +=
+ dwceqos_read(lp, DWC_MMC_TXLATECOL);
+ if (tx_mask & BIT(16))
+ lp->mmc_counters.txdeferred +=
+ dwceqos_read(lp, DWC_MMC_TXDEFERRED);
+ if (tx_mask & BIT(15))
+ lp->mmc_counters.txmulticol_g +=
+ dwceqos_read(lp, DWC_MMC_TXMULTICOL_G);
+ if (tx_mask & BIT(14))
+ lp->mmc_counters.txsinglecol_g +=
+ dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G);
+ if (tx_mask & BIT(13))
+ lp->mmc_counters.txunderflowerror +=
+ dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR);
+ if (tx_mask & BIT(12))
+ lp->mmc_counters.txbroadcastpackets_gb +=
+ dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB);
+ if (tx_mask & BIT(11))
+ lp->mmc_counters.txmulticastpackets_gb +=
+ dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB);
+ if (tx_mask & BIT(10))
+ lp->mmc_counters.txunicastpackets_gb +=
+ dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB);
+ if (tx_mask & BIT(9))
+ lp->mmc_counters.tx1024tomaxoctets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB);
+ if (tx_mask & BIT(8))
+ lp->mmc_counters.tx512to1023octets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB);
+ if (tx_mask & BIT(7))
+ lp->mmc_counters.tx256to511octets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB);
+ if (tx_mask & BIT(6))
+ lp->mmc_counters.tx128to255octets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB);
+ if (tx_mask & BIT(5))
+ lp->mmc_counters.tx65to127octets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB);
+ if (tx_mask & BIT(4))
+ lp->mmc_counters.tx64octets_gb +=
+ dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB);
+ if (tx_mask & BIT(3))
+ lp->mmc_counters.txmulticastpackets_g +=
+ dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G);
+ if (tx_mask & BIT(2))
+ lp->mmc_counters.txbroadcastpackets_g +=
+ dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G);
+ if (tx_mask & BIT(1))
+ lp->mmc_counters.txpacketcount_gb +=
+ dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB);
+ if (tx_mask & BIT(0))
+ lp->mmc_counters.txoctetcount_gb +=
+ dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB);
+
+ if (rx_mask & BIT(27))
+ lp->mmc_counters.rxlpitranscntr +=
+ dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR);
+ if (rx_mask & BIT(26))
+ lp->mmc_counters.rxlpiuscntr +=
+ dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR);
+ if (rx_mask & BIT(25))
+ lp->mmc_counters.rxctrlpackets_g +=
+ dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G);
+ if (rx_mask & BIT(24))
+ lp->mmc_counters.rxrcverror +=
+ dwceqos_read(lp, DWC_MMC_RXRCVERROR);
+ if (rx_mask & BIT(23))
+ lp->mmc_counters.rxwatchdog +=
+ dwceqos_read(lp, DWC_MMC_RXWATCHDOG);
+ if (rx_mask & BIT(22))
+ lp->mmc_counters.rxvlanpackets_gb +=
+ dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB);
+ if (rx_mask & BIT(21))
+ lp->mmc_counters.rxfifooverflow +=
+ dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW);
+ if (rx_mask & BIT(20))
+ lp->mmc_counters.rxpausepackets +=
+ dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS);
+ if (rx_mask & BIT(19))
+ lp->mmc_counters.rxoutofrangetype +=
+ dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE);
+ if (rx_mask & BIT(18))
+ lp->mmc_counters.rxlengtherror +=
+ dwceqos_read(lp, DWC_MMC_RXLENGTHERROR);
+ if (rx_mask & BIT(17))
+ lp->mmc_counters.rxunicastpackets_g +=
+ dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G);
+ if (rx_mask & BIT(16))
+ lp->mmc_counters.rx1024tomaxoctets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB);
+ if (rx_mask & BIT(15))
+ lp->mmc_counters.rx512to1023octets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB);
+ if (rx_mask & BIT(14))
+ lp->mmc_counters.rx256to511octets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB);
+ if (rx_mask & BIT(13))
+ lp->mmc_counters.rx128to255octets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB);
+ if (rx_mask & BIT(12))
+ lp->mmc_counters.rx65to127octets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB);
+ if (rx_mask & BIT(11))
+ lp->mmc_counters.rx64octets_gb +=
+ dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB);
+ if (rx_mask & BIT(10))
+ lp->mmc_counters.rxoversize_g +=
+ dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G);
+ if (rx_mask & BIT(9))
+ lp->mmc_counters.rxundersize_g +=
+ dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G);
+ if (rx_mask & BIT(8))
+ lp->mmc_counters.rxjabbererror +=
+ dwceqos_read(lp, DWC_MMC_RXJABBERERROR);
+ if (rx_mask & BIT(7))
+ lp->mmc_counters.rxrunterror +=
+ dwceqos_read(lp, DWC_MMC_RXRUNTERROR);
+ if (rx_mask & BIT(6))
+ lp->mmc_counters.rxalignmenterror +=
+ dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR);
+ if (rx_mask & BIT(5))
+ lp->mmc_counters.rxcrcerror +=
+ dwceqos_read(lp, DWC_MMC_RXCRCERROR);
+ if (rx_mask & BIT(4))
+ lp->mmc_counters.rxmulticastpackets_g +=
+ dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G);
+ if (rx_mask & BIT(3))
+ lp->mmc_counters.rxbroadcastpackets_g +=
+ dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G);
+ if (rx_mask & BIT(2))
+ lp->mmc_counters.rxoctetcount_g +=
+ dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G);
+ if (rx_mask & BIT(1))
+ lp->mmc_counters.rxoctetcount_gb +=
+ dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB);
+ if (rx_mask & BIT(0))
+ lp->mmc_counters.rxpacketcount_gb +=
+ dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB);
+}
+
+static struct rtnl_link_stats64*
+dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
+{
+ unsigned long flags;
+ struct net_local *lp = netdev_priv(ndev);
+ struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters;
+
+ spin_lock_irqsave(&lp->stats_lock, flags);
+ dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
+ lp->mmc_tx_counters_mask);
+ spin_unlock_irqrestore(&lp->stats_lock, flags);
+
+ s->rx_packets = hwstats->rxpacketcount_gb;
+ s->rx_bytes = hwstats->rxoctetcount_gb;
+ s->rx_errors = hwstats->rxpacketcount_gb -
+ hwstats->rxbroadcastpackets_g -
+ hwstats->rxmulticastpackets_g -
+ hwstats->rxunicastpackets_g;
+ s->multicast = hwstats->rxmulticastpackets_g;
+ s->rx_length_errors = hwstats->rxlengtherror;
+ s->rx_crc_errors = hwstats->rxcrcerror;
+ s->rx_fifo_errors = hwstats->rxfifooverflow;
+
+ s->tx_packets = hwstats->txpacketcount_gb;
+ s->tx_bytes = hwstats->txoctetcount_gb;
+
+ if (lp->mmc_tx_counters_mask & BIT(21))
+ s->tx_errors = hwstats->txpacketcount_gb -
+ hwstats->txpacketcount_g;
+ else
+ s->tx_errors = hwstats->txunderflowerror +
+ hwstats->txcarriererror;
+
+ return s;
+}
+
+static int
+dwceqos_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phydev, ecmd);
+}
+
+static int
+dwceqos_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = lp->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, ecmd);
+}
+
+static void
+dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
+{
+ const struct net_local *lp = netdev_priv(ndev);
+
+ strcpy(ed->driver, lp->pdev->dev.driver->name);
+ strcpy(ed->version, DRIVER_VERSION);
+}
+
+static void dwceqos_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pp)
+{
+ const struct net_local *lp = netdev_priv(ndev);
+
+ pp->autoneg = lp->flowcontrol.autoneg;
+ pp->tx_pause = lp->flowcontrol.tx;
+ pp->rx_pause = lp->flowcontrol.rx;
+}
+
+static int dwceqos_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pp)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ int ret = 0;
+
+ lp->flowcontrol.autoneg = pp->autoneg;
+ if (pp->autoneg) {
+ lp->phy_dev->advertising |= ADVERTISED_Pause;
+ lp->phy_dev->advertising |= ADVERTISED_Asym_Pause;
+ } else {
+ lp->phy_dev->advertising &= ~ADVERTISED_Pause;
+ lp->phy_dev->advertising &= ~ADVERTISED_Asym_Pause;
+ lp->flowcontrol.rx = pp->rx_pause;
+ lp->flowcontrol.tx = pp->tx_pause;
+ }
+
+ if (netif_running(ndev))
+ ret = phy_start_aneg(lp->phy_dev);
+
+ return ret;
+}
+
+static void dwceqos_get_strings(struct net_device *ndev, u32 stringset,
+ u8 *data)
+{
+ size_t i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
+ memcpy(data, dwceqos_ethtool_stats[i].stat_name,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+}
+
+static void dwceqos_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ unsigned long flags;
+ size_t i;
+ u8 *mmcstat = (u8 *)&lp->mmc_counters;
+
+ spin_lock_irqsave(&lp->stats_lock, flags);
+ dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
+ lp->mmc_tx_counters_mask);
+ spin_unlock_irqrestore(&lp->stats_lock, flags);
+
+ for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
+ memcpy(data,
+ mmcstat + dwceqos_ethtool_stats[i].offset,
+ sizeof(u64));
+ data++;
+ }
+}
+
+static int dwceqos_get_sset_count(struct net_device *ndev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(dwceqos_ethtool_stats);
+
+ return -EOPNOTSUPP;
+}
+
+static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *space)
+{
+ const struct net_local *lp = netdev_priv(dev);
+ u32 *reg_space = (u32 *)space;
+ int reg_offset;
+ int reg_ix = 0;
+
+ /* MAC registers */
+ for (reg_offset = START_MAC_REG_OFFSET;
+ reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+ reg_ix++;
+ }
+ /* MTL registers */
+ for (reg_offset = START_MTL_REG_OFFSET;
+ reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+ reg_ix++;
+ }
+
+ /* DMA registers */
+ for (reg_offset = START_DMA_REG_OFFSET;
+ reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+ reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+ reg_ix++;
+ }
+
+ BUG_ON(4 * reg_ix > REG_SPACE_SIZE);
+}
+
+static int dwceqos_get_regs_len(struct net_device *dev)
+{
+ return REG_SPACE_SIZE;
+}
+
+static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl)
+{
+ return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off";
+}
+
+static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl)
+{
+ return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off";
+}
+
+static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 lpi_status;
+ u32 lpi_enabled;
+
+ if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
+ return -EOPNOTSUPP;
+
+ edata->eee_active = lp->eee_active;
+ edata->eee_enabled = lp->eee_enabled;
+ edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER);
+ lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA);
+ edata->tx_lpi_enabled = lpi_enabled;
+
+ if (netif_msg_hw(lp)) {
+ u32 regval;
+
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+
+ netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n",
+ dwceqos_get_rx_lpi_state(regval),
+ dwceqos_get_tx_lpi_state(regval));
+ }
+
+ return phy_ethtool_get_eee(lp->phy_dev, edata);
+}
+
+static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ u32 regval;
+ unsigned long flags;
+
+ if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
+ return -EOPNOTSUPP;
+
+ if (edata->eee_enabled && !lp->eee_active)
+ return -EOPNOTSUPP;
+
+ if (edata->tx_lpi_enabled) {
+ if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN ||
+ edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX)
+ return -EINVAL;
+ }
+
+ lp->eee_enabled = edata->eee_enabled;
+
+ if (edata->eee_enabled && edata->tx_lpi_enabled) {
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER,
+ edata->tx_lpi_timer);
+
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE;
+ if (lp->en_tx_lpi_clockgating)
+ regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ } else {
+ spin_lock_irqsave(&lp->hw_lock, flags);
+ regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+ regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
+ dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+ spin_unlock_irqrestore(&lp->hw_lock, flags);
+ }
+
+ return phy_ethtool_set_eee(lp->phy_dev, edata);
+}
+
+static u32 dwceqos_get_msglevel(struct net_device *ndev)
+{
+ const struct net_local *lp = netdev_priv(ndev);
+
+ return lp->msg_enable;
+}
+
+static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
+{
+ struct net_local *lp = netdev_priv(ndev);
+
+ lp->msg_enable = msglevel;
+}
+
+static struct ethtool_ops dwceqos_ethtool_ops = {
+ .get_settings = dwceqos_get_settings,
+ .set_settings = dwceqos_set_settings,
+ .get_drvinfo = dwceqos_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = dwceqos_get_pauseparam,
+ .set_pauseparam = dwceqos_set_pauseparam,
+ .get_strings = dwceqos_get_strings,
+ .get_ethtool_stats = dwceqos_get_ethtool_stats,
+ .get_sset_count = dwceqos_get_sset_count,
+ .get_regs = dwceqos_get_regs,
+ .get_regs_len = dwceqos_get_regs_len,
+ .get_eee = dwceqos_get_eee,
+ .set_eee = dwceqos_set_eee,
+ .get_msglevel = dwceqos_get_msglevel,
+ .set_msglevel = dwceqos_set_msglevel,
+};
+
+static struct net_device_ops netdev_ops = {
+ .ndo_open = dwceqos_open,
+ .ndo_stop = dwceqos_stop,
+ .ndo_start_xmit = dwceqos_start_xmit,
+ .ndo_set_rx_mode = dwceqos_set_rx_mode,
+ .ndo_set_mac_address = dwceqos_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = dwceqos_poll_controller,
+#endif
+ .ndo_do_ioctl = dwceqos_ioctl,
+ .ndo_tx_timeout = dwceqos_tx_timeout,
+ .ndo_get_stats64 = dwceqos_get_stats64,
+};
+
+static const struct of_device_id dwceq_of_match[] = {
+ { .compatible = "snps,dwc-qos-ethernet-4.10", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dwceq_of_match);
+
+static int dwceqos_probe(struct platform_device *pdev)
+{
+ struct resource *r_mem = NULL;
+ struct net_device *ndev;
+ struct net_local *lp;
+ int ret = -ENXIO;
+
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r_mem) {
+ dev_err(&pdev->dev, "no IO resource defined.\n");
+ return -ENXIO;
+ }
+
+ ndev = alloc_etherdev(sizeof(*lp));
+ if (!ndev) {
+ dev_err(&pdev->dev, "etherdev allocation failed.\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ lp = netdev_priv(ndev);
+ lp->ndev = ndev;
+ lp->pdev = pdev;
+ lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT);
+
+ spin_lock_init(&lp->tx_lock);
+ spin_lock_init(&lp->hw_lock);
+ spin_lock_init(&lp->stats_lock);
+
+ lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(lp->apb_pclk)) {
+ dev_err(&pdev->dev, "apb_pclk clock not found.\n");
+ ret = PTR_ERR(lp->apb_pclk);
+ goto err_out_free_netdev;
+ }
+
+ ret = clk_prepare_enable(lp->apb_pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable APER clock.\n");
+ goto err_out_free_netdev;
+ }
+
+ lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem);
+ if (IS_ERR(lp->baseaddr)) {
+ dev_err(&pdev->dev, "failed to map baseaddress.\n");
+ ret = PTR_ERR(lp->baseaddr);
+ goto err_out_clk_dis_aper;
+ }
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ;
+ ndev->netdev_ops = &netdev_ops;
+ ndev->ethtool_ops = &dwceqos_ethtool_ops;
+ ndev->base_addr = r_mem->start;
+
+ dwceqos_get_hwfeatures(lp);
+ dwceqos_mdio_set_csr(lp);
+
+ ndev->hw_features = NETIF_F_SG;
+
+ if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
+ ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+
+ if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL)
+ ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL)
+ ndev->hw_features |= NETIF_F_RXCSUM;
+
+ ndev->features = ndev->hw_features;
+
+ netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_clk_dis_aper;
+ }
+
+ lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+ if (IS_ERR(lp->phy_ref_clk)) {
+ dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+ ret = PTR_ERR(lp->phy_ref_clk);
+ goto err_out_unregister_netdev;
+ }
+
+ ret = clk_prepare_enable(lp->phy_ref_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable device clock.\n");
+ goto err_out_unregister_netdev;
+ }
+
+ lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
+ "phy-handle", 0);
+ if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) {
+ ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "invalid fixed-link");
+ goto err_out_unregister_netdev;
+ }
+
+ lp->phy_node = of_node_get(lp->pdev->dev.of_node);
+ }
+
+ ret = of_get_phy_mode(lp->pdev->dev.of_node);
+ if (ret < 0) {
+ dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
+ goto err_out_unregister_clk_notifier;
+ }
+
+ lp->phy_interface = ret;
+
+ ret = dwceqos_mii_init(lp);
+ if (ret) {
+ dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
+ goto err_out_unregister_clk_notifier;
+ }
+
+ ret = dwceqos_mii_probe(ndev);
+ if (ret != 0) {
+ netdev_err(ndev, "mii_probe fail.\n");
+ ret = -ENXIO;
+ goto err_out_unregister_clk_notifier;
+ }
+
+ dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+
+ tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim,
+ (unsigned long)ndev);
+ tasklet_disable(&lp->tx_bdreclaim_tasklet);
+
+ lp->txtimeout_handler_wq = create_singlethread_workqueue(DRIVER_NAME);
+ INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout);
+
+ platform_set_drvdata(pdev, ndev);
+ ret = dwceqos_probe_config_dt(pdev);
+ if (ret) {
+ dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
+ ret);
+ goto err_out_unregister_clk_notifier;
+ }
+ dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
+ pdev->id, ndev->base_addr, ndev->irq);
+
+ ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0,
+ ndev->name, ndev);
+ if (ret) {
+ dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
+ ndev->irq, ret);
+ goto err_out_unregister_clk_notifier;
+ }
+
+ if (netif_msg_probe(lp))
+ netdev_dbg(ndev, "net_local@%p\n", lp);
+
+ return 0;
+
+err_out_unregister_clk_notifier:
+ clk_disable_unprepare(lp->phy_ref_clk);
+err_out_unregister_netdev:
+ unregister_netdev(ndev);
+err_out_clk_dis_aper:
+ clk_disable_unprepare(lp->apb_pclk);
+err_out_free_netdev:
+ if (lp->phy_node)
+ of_node_put(lp->phy_node);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static int dwceqos_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_local *lp;
+
+ if (ndev) {
+ lp = netdev_priv(ndev);
+
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
+
+ unregister_netdev(ndev);
+
+ clk_disable_unprepare(lp->phy_ref_clk);
+ clk_disable_unprepare(lp->apb_pclk);
+
+ free_netdev(ndev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver dwceqos_driver = {
+ .probe = dwceqos_probe,
+ .remove = dwceqos_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = dwceq_of_match,
+ },
+};
+
+module_platform_driver(dwceqos_driver);
+
+MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andreas Irestaal <andreas.irestal@axis.com>");
+MODULE_AUTHOR("Lars Persson <lars.persson@axis.com>");
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index a9cac8413e49..14c9d1baa85c 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2182,11 +2182,6 @@ bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(priv->pdev),
sizeof(drvinfo->bus_info));
-
- drvinfo->n_stats = ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
- drvinfo->testinfo_len = 0;
- drvinfo->regdump_len = 0;
- drvinfo->eedump_len = 0;
}
/*
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index dd9430043536..77d26fe286c0 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -41,6 +41,8 @@
#include <linux/gpio.h>
#include <linux/atomic.h>
+#include <asm/mach-ar7/ar7.h>
+
MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
MODULE_LICENSE("GPL");
@@ -897,7 +899,6 @@ static void cpmac_get_drvinfo(struct net_device *dev,
strlcpy(info->driver, "cpmac", sizeof(info->driver));
strlcpy(info->version, CPMAC_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
- info->regdump_len = 0;
}
static const struct ethtool_ops cpmac_ethtool_ops = {
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index f59509486113..c08be62bceba 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -19,11 +19,38 @@
#include "cpsw.h"
-#define AM33XX_CTRL_MAC_LO_REG(offset, id) ((offset) + 0x8 * (id))
-#define AM33XX_CTRL_MAC_HI_REG(offset, id) ((offset) + 0x8 * (id) + 0x4)
+#define CTRL_MAC_LO_REG(offset, id) ((offset) + 0x8 * (id))
+#define CTRL_MAC_HI_REG(offset, id) ((offset) + 0x8 * (id) + 0x4)
-int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
- u8 *mac_addr)
+static int davinci_emac_3517_get_macid(struct device *dev, u16 offset,
+ int slave, u8 *mac_addr)
+{
+ u32 macid_lsb;
+ u32 macid_msb;
+ struct regmap *syscon;
+
+ syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+ if (IS_ERR(syscon)) {
+ if (PTR_ERR(syscon) == -ENODEV)
+ return 0;
+ return PTR_ERR(syscon);
+ }
+
+ regmap_read(syscon, CTRL_MAC_LO_REG(offset, slave), &macid_lsb);
+ regmap_read(syscon, CTRL_MAC_HI_REG(offset, slave), &macid_msb);
+
+ mac_addr[0] = (macid_msb >> 16) & 0xff;
+ mac_addr[1] = (macid_msb >> 8) & 0xff;
+ mac_addr[2] = macid_msb & 0xff;
+ mac_addr[3] = (macid_lsb >> 16) & 0xff;
+ mac_addr[4] = (macid_lsb >> 8) & 0xff;
+ mac_addr[5] = macid_lsb & 0xff;
+
+ return 0;
+}
+
+static int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
+ u8 *mac_addr)
{
u32 macid_lo;
u32 macid_hi;
@@ -36,10 +63,8 @@ int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
return PTR_ERR(syscon);
}
- regmap_read(syscon, AM33XX_CTRL_MAC_LO_REG(offset, slave),
- &macid_lo);
- regmap_read(syscon, AM33XX_CTRL_MAC_HI_REG(offset, slave),
- &macid_hi);
+ regmap_read(syscon, CTRL_MAC_LO_REG(offset, slave), &macid_lo);
+ regmap_read(syscon, CTRL_MAC_HI_REG(offset, slave), &macid_hi);
mac_addr[5] = (macid_lo >> 8) & 0xff;
mac_addr[4] = macid_lo & 0xff;
@@ -50,6 +75,27 @@ int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_am33xx_cm_get_macid);
+
+int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
+{
+ if (of_machine_is_compatible("ti,am33xx"))
+ return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
+
+ if (of_device_is_compatible(dev->of_node, "ti,am3517-emac"))
+ return davinci_emac_3517_get_macid(dev, 0x110, slave, mac_addr);
+
+ if (of_device_is_compatible(dev->of_node, "ti,dm816-emac"))
+ return cpsw_am33xx_cm_get_macid(dev, 0x30, slave, mac_addr);
+
+ if (of_machine_is_compatible("ti,am4372"))
+ return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
+
+ if (of_machine_is_compatible("ti,dra7"))
+ return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr);
+
+ dev_err(dev, "incompatible machine/device type for reading mac address\n");
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(ti_cm_get_macid);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 0ea78326cc21..e9cc61e1ec74 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -2,6 +2,8 @@
*
* Copyright (C) 2013 Texas Instruments
*
+ * Module Author: Mugunthan V N <mugunthanvnm@ti.com>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
@@ -13,7 +15,7 @@
*/
#include <linux/platform_device.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/of.h>
@@ -173,7 +175,6 @@ static const struct of_device_id cpsw_phy_sel_id_table[] = {
},
{}
};
-MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
static int cpsw_phy_sel_probe(struct platform_device *pdev)
{
@@ -214,7 +215,4 @@ static struct platform_driver cpsw_phy_sel_driver = {
.of_match_table = cpsw_phy_sel_id_table,
},
};
-
-module_platform_driver(cpsw_phy_sel_driver);
-MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(cpsw_phy_sel_driver);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index d155bf2573cd..040fbc1e5508 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -29,7 +29,9 @@
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include <linux/gpio.h>
#include <linux/of.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
#include <linux/if_vlan.h>
@@ -365,7 +367,9 @@ struct cpsw_priv {
spinlock_t lock;
struct platform_device *pdev;
struct net_device *ndev;
- struct napi_struct napi;
+ struct device_node *phy_node;
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
struct device *dev;
struct cpsw_platform_data data;
struct cpsw_ss_regs __iomem *regs;
@@ -386,10 +390,12 @@ struct cpsw_priv {
struct cpsw_ale *ale;
bool rx_pause;
bool tx_pause;
+ bool quirk_irq;
+ bool rx_irq_disabled;
+ bool tx_irq_disabled;
/* snapshot of IRQ numbers */
u32 irqs_table[4];
u32 num_irqs;
- bool irq_enabled;
struct cpts *cpts;
u32 emac_port;
};
@@ -752,13 +758,15 @@ static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
{
struct cpsw_priv *priv = dev_id;
+ writel(0, &priv->wr_regs->tx_en);
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
- cpdma_chan_process(priv->txch, 128);
- priv = cpsw_get_slave_priv(priv, 1);
- if (priv)
- cpdma_chan_process(priv->txch, 128);
+ if (priv->quirk_irq) {
+ disable_irq_nosync(priv->irqs_table[1]);
+ priv->tx_irq_disabled = true;
+ }
+ napi_schedule(&priv->napi_tx);
return IRQ_HANDLED;
}
@@ -767,43 +775,49 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
struct cpsw_priv *priv = dev_id;
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+ writel(0, &priv->wr_regs->rx_en);
- cpsw_intr_disable(priv);
- if (priv->irq_enabled == true) {
+ if (priv->quirk_irq) {
disable_irq_nosync(priv->irqs_table[0]);
- priv->irq_enabled = false;
+ priv->rx_irq_disabled = true;
}
- if (netif_running(priv->ndev)) {
- napi_schedule(&priv->napi);
- return IRQ_HANDLED;
- }
+ napi_schedule(&priv->napi_rx);
+ return IRQ_HANDLED;
+}
- priv = cpsw_get_slave_priv(priv, 1);
- if (!priv)
- return IRQ_NONE;
+static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct cpsw_priv *priv = napi_to_priv(napi_tx);
+ int num_tx;
- if (netif_running(priv->ndev)) {
- napi_schedule(&priv->napi);
- return IRQ_HANDLED;
+ num_tx = cpdma_chan_process(priv->txch, budget);
+ if (num_tx < budget) {
+ napi_complete(napi_tx);
+ writel(0xff, &priv->wr_regs->tx_en);
+ if (priv->quirk_irq && priv->tx_irq_disabled) {
+ priv->tx_irq_disabled = false;
+ enable_irq(priv->irqs_table[1]);
+ }
}
- return IRQ_NONE;
+
+ if (num_tx)
+ cpsw_dbg(priv, intr, "poll %d tx pkts\n", num_tx);
+
+ return num_tx;
}
-static int cpsw_poll(struct napi_struct *napi, int budget)
+static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
{
- struct cpsw_priv *priv = napi_to_priv(napi);
+ struct cpsw_priv *priv = napi_to_priv(napi_rx);
int num_rx;
num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) {
- struct cpsw_priv *prim_cpsw;
-
- napi_complete(napi);
- cpsw_intr_enable(priv);
- prim_cpsw = cpsw_get_slave_priv(priv, 0);
- if (prim_cpsw->irq_enabled == false) {
- prim_cpsw->irq_enabled = true;
+ napi_complete(napi_rx);
+ writel(0xff, &priv->wr_regs->rx_en);
+ if (priv->quirk_irq && priv->rx_irq_disabled) {
+ priv->rx_irq_disabled = false;
enable_irq(priv->irqs_table[0]);
}
}
@@ -1134,7 +1148,11 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
- slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
+ if (priv->phy_node)
+ slave->phy = of_phy_connect(priv->ndev, priv->phy_node,
+ &cpsw_adjust_link, 0, slave->data->phy_if);
+ else
+ slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
&cpsw_adjust_link, slave->data->phy_if);
if (IS_ERR(slave->phy)) {
dev_err(priv->dev, "phy %s not found on slave %d\n",
@@ -1230,7 +1248,6 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
static int cpsw_ndo_open(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_priv *prim_cpsw;
int i, ret;
u32 reg;
@@ -1260,6 +1277,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
ALE_ALL_PORTS << priv->host_port, 0, 0);
if (!cpsw_common_res_usage_state(priv)) {
+ struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
+
/* setup tx dma to fixed prio and zero offset */
cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
@@ -1273,6 +1292,19 @@ static int cpsw_ndo_open(struct net_device *ndev)
/* Enable internal fifo flow control */
writel(0x7, &priv->regs->flow_control);
+ napi_enable(&priv_sl0->napi_rx);
+ napi_enable(&priv_sl0->napi_tx);
+
+ if (priv_sl0->tx_irq_disabled) {
+ priv_sl0->tx_irq_disabled = false;
+ enable_irq(priv->irqs_table[1]);
+ }
+
+ if (priv_sl0->rx_irq_disabled) {
+ priv_sl0->rx_irq_disabled = false;
+ enable_irq(priv->irqs_table[0]);
+ }
+
if (WARN_ON(!priv->data.rx_descs))
priv->data.rx_descs = 128;
@@ -1311,18 +1343,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpsw_set_coalesce(ndev, &coal);
}
- napi_enable(&priv->napi);
cpdma_ctlr_start(priv->dma);
cpsw_intr_enable(priv);
- prim_cpsw = cpsw_get_slave_priv(priv, 0);
- if (prim_cpsw->irq_enabled == false) {
- if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
- prim_cpsw->irq_enabled = true;
- enable_irq(prim_cpsw->irqs_table[0]);
- }
- }
-
if (priv->data.dual_emac)
priv->slaves[priv->emac_port].open_stat = true;
return 0;
@@ -1341,10 +1364,13 @@ static int cpsw_ndo_stop(struct net_device *ndev)
cpsw_info(priv, ifdown, "shutting down cpsw device\n");
netif_stop_queue(priv->ndev);
- napi_disable(&priv->napi);
netif_carrier_off(priv->ndev);
if (cpsw_common_res_usage_state(priv) <= 1) {
+ struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
+
+ napi_disable(&priv_sl0->napi_rx);
+ napi_disable(&priv_sl0->napi_tx);
cpts_unregister(priv->cpts);
cpsw_intr_disable(priv);
cpdma_ctlr_stop(priv->dma);
@@ -1764,7 +1790,6 @@ static void cpsw_get_drvinfo(struct net_device *ndev,
strlcpy(info->driver, "cpsw", sizeof(info->driver));
strlcpy(info->version, "1.0", sizeof(info->version));
strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info));
- info->regdump_len = cpsw_get_regs_len(ndev);
}
static u32 cpsw_get_msglevel(struct net_device *ndev)
@@ -1915,11 +1940,12 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
slave->port_vlan = data->dual_emac_res_vlan;
}
-static int cpsw_probe_dt(struct cpsw_platform_data *data,
+static int cpsw_probe_dt(struct cpsw_priv *priv,
struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct device_node *slave_node;
+ struct cpsw_platform_data *data = &priv->data;
int i = 0, ret;
u32 prop;
@@ -2010,6 +2036,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (strcmp(slave_node->name, "slave"))
continue;
+ priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
parp = of_get_property(slave_node, "phy_id", &lenp);
if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
@@ -2025,7 +2052,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
PHY_ID_FMT, mdio->name, phyid);
-
slave_data->phy_if = of_get_phy_mode(slave_node);
if (slave_data->phy_if < 0) {
dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
@@ -2038,13 +2064,10 @@ no_phy_slave:
if (mac_addr) {
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
} else {
- if (of_machine_is_compatible("ti,am33xx")) {
- ret = cpsw_am33xx_cm_get_macid(&pdev->dev,
- 0x630, i,
- slave_data->mac_addr);
- if (ret)
- return ret;
- }
+ ret = ti_cm_get_macid(&pdev->dev, i,
+ slave_data->mac_addr);
+ if (ret)
+ return ret;
}
if (data->dual_emac) {
if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
@@ -2127,7 +2150,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
- netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -2141,6 +2163,44 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
return ret;
}
+#define CPSW_QUIRK_IRQ BIT(0)
+
+static struct platform_device_id cpsw_devtype[] = {
+ {
+ /* keep it for existing comaptibles */
+ .name = "cpsw",
+ .driver_data = CPSW_QUIRK_IRQ,
+ }, {
+ .name = "am335x-cpsw",
+ .driver_data = CPSW_QUIRK_IRQ,
+ }, {
+ .name = "am4372-cpsw",
+ .driver_data = 0,
+ }, {
+ .name = "dra7-cpsw",
+ .driver_data = 0,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, cpsw_devtype);
+
+enum ti_cpsw_type {
+ CPSW = 0,
+ AM335X_CPSW,
+ AM4372_CPSW,
+ DRA7_CPSW,
+};
+
+static const struct of_device_id cpsw_of_mtable[] = {
+ { .compatible = "ti,cpsw", .data = &cpsw_devtype[CPSW], },
+ { .compatible = "ti,am335x-cpsw", .data = &cpsw_devtype[AM335X_CPSW], },
+ { .compatible = "ti,am4372-cpsw", .data = &cpsw_devtype[AM4372_CPSW], },
+ { .compatible = "ti,dra7-cpsw", .data = &cpsw_devtype[DRA7_CPSW], },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
+
static int cpsw_probe(struct platform_device *pdev)
{
struct cpsw_platform_data *data;
@@ -2150,6 +2210,8 @@ static int cpsw_probe(struct platform_device *pdev)
struct cpsw_ale_params ale_params;
void __iomem *ss_regs;
struct resource *res, *ss_res;
+ const struct of_device_id *of_id;
+ struct gpio_descs *mode;
u32 slave_offset, sliver_offset, slave_size;
int ret = 0, i;
int irq;
@@ -2169,13 +2231,19 @@ static int cpsw_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
priv->rx_packet_max = max(rx_packet_max, 128);
priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
- priv->irq_enabled = true;
if (!priv->cpts) {
dev_err(&pdev->dev, "error allocating cpts\n");
ret = -ENOMEM;
goto clean_ndev_ret;
}
+ mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW);
+ if (IS_ERR(mode)) {
+ ret = PTR_ERR(mode);
+ dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
+ goto clean_ndev_ret;
+ }
+
/*
* This may be required here for child devices.
*/
@@ -2184,7 +2252,7 @@ static int cpsw_probe(struct platform_device *pdev)
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- if (cpsw_probe_dt(&priv->data, pdev)) {
+ if (cpsw_probe_dt(priv, pdev)) {
dev_err(&pdev->dev, "cpsw: platform data missing\n");
ret = -ENODEV;
goto clean_runtime_disable_ret;
@@ -2341,6 +2409,13 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_ale_ret;
}
+ of_id = of_match_device(cpsw_of_mtable, &pdev->dev);
+ if (of_id) {
+ pdev->id_entry = of_id->data;
+ if (pdev->id_entry->driver_data)
+ priv->quirk_irq = true;
+ }
+
/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
* MISC IRQs which are always kept disabled with this driver so
* we will not request them.
@@ -2380,7 +2455,8 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
- netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -2504,12 +2580,6 @@ static int cpsw_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
-static const struct of_device_id cpsw_of_mtable[] = {
- { .compatible = "ti,cpsw", },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
-
static struct platform_driver cpsw_driver = {
.driver = {
.name = "cpsw",
@@ -2520,17 +2590,7 @@ static struct platform_driver cpsw_driver = {
.remove = cpsw_remove,
};
-static int __init cpsw_init(void)
-{
- return platform_driver_register(&cpsw_driver);
-}
-late_initcall(cpsw_init);
-
-static void __exit cpsw_exit(void)
-{
- platform_driver_unregister(&cpsw_driver);
-}
-module_exit(cpsw_exit);
+module_platform_driver(cpsw_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index ca90efafd156..442a7038e660 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -41,7 +41,6 @@ struct cpsw_platform_data {
};
void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
-int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
- u8 *mac_addr);
+int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr);
#endif /* __CPSW_H__ */
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index aeebc0a7bf47..33bd3b902304 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1861,8 +1861,12 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
pdata->no_bd_ram = of_property_read_bool(np, "ti,davinci-no-bd-ram");
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
- if (!priv->phy_node)
- pdata->phy_id = NULL;
+ if (!priv->phy_node) {
+ if (!of_phy_is_fixed_link(np))
+ pdata->phy_id = NULL;
+ else if (of_phy_register_fixed_link(np) >= 0)
+ priv->phy_node = of_node_get(np);
+ }
auxdata = pdev->dev.platform_data;
if (auxdata) {
@@ -1882,51 +1886,13 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
return pdata;
}
-static int davinci_emac_3517_get_macid(struct device *dev, u16 offset,
- int slave, u8 *mac_addr)
-{
- u32 macid_lsb;
- u32 macid_msb;
- struct regmap *syscon;
-
- syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
- if (IS_ERR(syscon)) {
- if (PTR_ERR(syscon) == -ENODEV)
- return 0;
- return PTR_ERR(syscon);
- }
-
- regmap_read(syscon, offset, &macid_lsb);
- regmap_read(syscon, offset + 4, &macid_msb);
-
- mac_addr[0] = (macid_msb >> 16) & 0xff;
- mac_addr[1] = (macid_msb >> 8) & 0xff;
- mac_addr[2] = macid_msb & 0xff;
- mac_addr[3] = (macid_lsb >> 16) & 0xff;
- mac_addr[4] = (macid_lsb >> 8) & 0xff;
- mac_addr[5] = macid_lsb & 0xff;
-
- return 0;
-}
-
static int davinci_emac_try_get_mac(struct platform_device *pdev,
int instance, u8 *mac_addr)
{
- int error = -EINVAL;
-
if (!pdev->dev.of_node)
- return error;
-
- if (of_device_is_compatible(pdev->dev.of_node, "ti,am3517-emac"))
- error = davinci_emac_3517_get_macid(&pdev->dev, 0x110,
- 0, mac_addr);
- else if (of_device_is_compatible(pdev->dev.of_node,
- "ti,dm816-emac"))
- error = cpsw_am33xx_cm_get_macid(&pdev->dev, 0x30,
- instance,
- mac_addr);
-
- return error;
+ return -EINVAL;
+
+ return ti_cm_get_macid(&pdev->dev, instance, mac_addr);
}
/**
@@ -2004,8 +1970,10 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (res_ctrl) {
priv->ctrl_base =
devm_ioremap_resource(&pdev->dev, res_ctrl);
- if (IS_ERR(priv->ctrl_base))
+ if (IS_ERR(priv->ctrl_base)) {
+ rc = PTR_ERR(priv->ctrl_base);
goto no_pdata;
+ }
} else {
priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
}
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index a8a730641bbb..bb1bb72121c0 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -85,7 +85,6 @@ struct netcp_intf {
struct list_head rxhook_list_head;
unsigned int rx_queue_id;
void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
- u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
struct napi_struct rx_napi;
struct napi_struct tx_napi;
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 734a6c435840..37b9b39192ec 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -34,6 +34,7 @@
#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
#define NETCP_NAPI_WEIGHT 64
#define NETCP_TX_TIMEOUT (5 * HZ)
+#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
#define NETCP_MAX_MCAST_ADDR 16
@@ -51,6 +52,8 @@
NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
NETIF_MSG_RX_STATUS)
+#define NETCP_EFUSE_ADDR_SWAP 2
+
#define knav_queue_get_id(q) knav_queue_device_control(q, \
KNAV_QUEUE_GET_ID, (unsigned long)NULL)
@@ -172,13 +175,22 @@ static void set_words(u32 *words, int num_words, u32 *desc)
}
/* Read the e-fuse value as 32 bit values to be endian independent */
-static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac)
+static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap)
{
unsigned int addr0, addr1;
addr1 = readl(efuse_mac + 4);
addr0 = readl(efuse_mac);
+ switch (swap) {
+ case NETCP_EFUSE_ADDR_SWAP:
+ addr0 = addr1;
+ addr1 = readl(efuse_mac);
+ break;
+ default:
+ break;
+ }
+
x[0] = (addr1 & 0x0000ff00) >> 8;
x[1] = addr1 & 0x000000ff;
x[2] = (addr0 & 0xff000000) >> 24;
@@ -279,13 +291,6 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
interface_list) {
struct netcp_intf_modpriv *intf_modpriv;
- /* If interface not registered then register now */
- if (!netcp_intf->netdev_registered)
- ret = netcp_register_interface(netcp_intf);
-
- if (ret)
- return -ENODEV;
-
intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
GFP_KERNEL);
if (!intf_modpriv)
@@ -294,6 +299,11 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
interface = of_parse_phandle(netcp_intf->node_interface,
module->name, 0);
+ if (!interface) {
+ devm_kfree(dev, intf_modpriv);
+ continue;
+ }
+
intf_modpriv->netcp_priv = netcp_intf;
intf_modpriv->netcp_module = module;
list_add_tail(&intf_modpriv->intf_list,
@@ -311,6 +321,18 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
continue;
}
}
+
+ /* Now register the interface with netdev */
+ list_for_each_entry(netcp_intf,
+ &netcp_device->interface_head,
+ interface_list) {
+ /* If interface not registered then register now */
+ if (!netcp_intf->netdev_registered) {
+ ret = netcp_register_interface(netcp_intf);
+ if (ret)
+ return -ENODEV;
+ }
+ }
return 0;
}
@@ -345,7 +367,6 @@ int netcp_register_module(struct netcp_module *module)
if (ret < 0)
goto fail;
}
-
mutex_unlock(&netcp_modules_lock);
return 0;
@@ -784,7 +805,7 @@ static void netcp_rxpool_free(struct netcp_intf *netcp)
netcp->rx_pool = NULL;
}
-static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
+static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
{
struct knav_dma_desc *hwdesc;
unsigned int buf_len, dma_sz;
@@ -798,36 +819,34 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
hwdesc = knav_pool_desc_get(netcp->rx_pool);
if (IS_ERR_OR_NULL(hwdesc)) {
dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
- return;
+ return -ENOMEM;
}
if (likely(fdq == 0)) {
unsigned int primary_buf_len;
/* Allocate a primary receive queue entry */
- buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET;
+ buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
primary_buf_len = SKB_DATA_ALIGN(buf_len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- if (primary_buf_len <= PAGE_SIZE) {
- bufptr = netdev_alloc_frag(primary_buf_len);
- pad[1] = primary_buf_len;
- } else {
- bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
- GFP_DMA32 | __GFP_COLD);
- pad[1] = 0;
- }
+ bufptr = netdev_alloc_frag(primary_buf_len);
+ pad[1] = primary_buf_len;
if (unlikely(!bufptr)) {
- dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n");
+ dev_warn_ratelimited(netcp->ndev_dev,
+ "Primary RX buffer alloc failed\n");
goto fail;
}
dma = dma_map_single(netcp->dev, bufptr, buf_len,
DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(netcp->dev, dma)))
+ goto fail;
+
pad[0] = (u32)bufptr;
} else {
/* Allocate a secondary receive queue entry */
- page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD);
+ page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
if (unlikely(!page)) {
dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
goto fail;
@@ -852,25 +871,26 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
&dma_sz);
knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
- return;
+ return 0;
fail:
knav_pool_desc_put(netcp->rx_pool, hwdesc);
+ return -ENOMEM;
}
/* Refill Rx FDQ with descriptors & attached buffers */
static void netcp_rxpool_refill(struct netcp_intf *netcp)
{
u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
- int i;
+ int i, ret = 0;
/* Calculate the FDQ deficit and refill */
for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
fdq_deficit[i] = netcp->rx_queue_depths[i] -
knav_queue_get_count(netcp->rx_fdq[i]);
- while (fdq_deficit[i]--)
- netcp_allocate_rx_buf(netcp, i);
+ while (fdq_deficit[i]-- && !ret)
+ ret = netcp_allocate_rx_buf(netcp, i);
} /* end for fdqs */
}
@@ -883,12 +903,12 @@ static int netcp_rx_poll(struct napi_struct *napi, int budget)
packets = netcp_process_rx_packets(netcp, budget);
+ netcp_rxpool_refill(netcp);
if (packets < budget) {
napi_complete(&netcp->rx_napi);
knav_queue_enable_notify(netcp->rx_queue);
}
- netcp_rxpool_refill(netcp);
return packets;
}
@@ -1010,7 +1030,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
/* Map the linear buffer */
dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
- if (unlikely(!dma_addr)) {
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
return NULL;
}
@@ -1374,7 +1394,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp)
continue;
dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
naddr->addr, naddr->type);
- mutex_lock(&netcp_modules_lock);
for_each_module(netcp, priv) {
module = priv->netcp_module;
if (!module->del_addr)
@@ -1383,7 +1402,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp)
naddr);
WARN_ON(error);
}
- mutex_unlock(&netcp_modules_lock);
netcp_addr_del(netcp, naddr);
}
}
@@ -1400,7 +1418,7 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp)
continue;
dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
naddr->addr, naddr->type);
- mutex_lock(&netcp_modules_lock);
+
for_each_module(netcp, priv) {
module = priv->netcp_module;
if (!module->add_addr)
@@ -1408,7 +1426,6 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp)
error = module->add_addr(priv->module_priv, naddr);
WARN_ON(error);
}
- mutex_unlock(&netcp_modules_lock);
}
}
@@ -1422,6 +1439,7 @@ static void netcp_set_rx_mode(struct net_device *ndev)
ndev->flags & IFF_ALLMULTI ||
netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
+ spin_lock(&netcp->lock);
/* first clear all marks */
netcp_addr_clear_mark(netcp);
@@ -1440,6 +1458,7 @@ static void netcp_set_rx_mode(struct net_device *ndev)
/* finally sweep and callout into modules */
netcp_addr_sweep_del(netcp);
netcp_addr_sweep_add(netcp);
+ spin_unlock(&netcp->lock);
}
static void netcp_free_navigator_resources(struct netcp_intf *netcp)
@@ -1546,8 +1565,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
knav_queue_disable_notify(netcp->rx_queue);
/* open Rx FDQs */
- for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
- netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) {
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
+ ++i) {
snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
@@ -1604,7 +1623,6 @@ static int netcp_ndo_open(struct net_device *ndev)
goto fail;
}
- mutex_lock(&netcp_modules_lock);
for_each_module(netcp, intf_modpriv) {
module = intf_modpriv->netcp_module;
if (module->open) {
@@ -1615,7 +1633,6 @@ static int netcp_ndo_open(struct net_device *ndev)
}
}
}
- mutex_unlock(&netcp_modules_lock);
napi_enable(&netcp->rx_napi);
napi_enable(&netcp->tx_napi);
@@ -1632,7 +1649,6 @@ fail_open:
if (module->close)
module->close(intf_modpriv->module_priv, ndev);
}
- mutex_unlock(&netcp_modules_lock);
fail:
netcp_free_navigator_resources(netcp);
@@ -1656,7 +1672,6 @@ static int netcp_ndo_stop(struct net_device *ndev)
napi_disable(&netcp->rx_napi);
napi_disable(&netcp->tx_napi);
- mutex_lock(&netcp_modules_lock);
for_each_module(netcp, intf_modpriv) {
module = intf_modpriv->netcp_module;
if (module->close) {
@@ -1665,7 +1680,6 @@ static int netcp_ndo_stop(struct net_device *ndev)
dev_err(netcp->ndev_dev, "Close failed\n");
}
}
- mutex_unlock(&netcp_modules_lock);
/* Recycle Rx descriptors from completion queue */
netcp_empty_rx_queue(netcp);
@@ -1693,7 +1707,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev,
if (!netif_running(ndev))
return -EINVAL;
- mutex_lock(&netcp_modules_lock);
for_each_module(netcp, intf_modpriv) {
module = intf_modpriv->netcp_module;
if (!module->ioctl)
@@ -1709,7 +1722,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev,
}
out:
- mutex_unlock(&netcp_modules_lock);
return (ret == 0) ? 0 : err;
}
@@ -1744,11 +1756,12 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
struct netcp_intf *netcp = netdev_priv(ndev);
struct netcp_intf_modpriv *intf_modpriv;
struct netcp_module *module;
+ unsigned long flags;
int err = 0;
dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
- mutex_lock(&netcp_modules_lock);
+ spin_lock_irqsave(&netcp->lock, flags);
for_each_module(netcp, intf_modpriv) {
module = intf_modpriv->netcp_module;
if ((module->add_vid) && (vid != 0)) {
@@ -1760,7 +1773,8 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
}
}
}
- mutex_unlock(&netcp_modules_lock);
+ spin_unlock_irqrestore(&netcp->lock, flags);
+
return err;
}
@@ -1769,11 +1783,12 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
struct netcp_intf *netcp = netdev_priv(ndev);
struct netcp_intf_modpriv *intf_modpriv;
struct netcp_module *module;
+ unsigned long flags;
int err = 0;
dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
- mutex_lock(&netcp_modules_lock);
+ spin_lock_irqsave(&netcp->lock, flags);
for_each_module(netcp, intf_modpriv) {
module = intf_modpriv->netcp_module;
if (module->del_vid) {
@@ -1785,7 +1800,7 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
}
}
}
- mutex_unlock(&netcp_modules_lock);
+ spin_unlock_irqrestore(&netcp->lock, flags);
return err;
}
@@ -1902,7 +1917,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
goto quit;
}
- emac_arch_get_mac_addr(efuse_mac_addr, efuse);
+ emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
if (is_valid_ether_addr(efuse_mac_addr))
ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
else
@@ -1941,14 +1956,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
netcp->rx_queue_depths[0] = 128;
}
- ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
- netcp->rx_buffer_sizes,
- KNAV_DMA_FDQ_PER_CHAN);
- if (ret) {
- dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
- netcp->rx_buffer_sizes[0] = 1536;
- }
-
ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
if (ret < 0) {
dev_err(dev, "missing \"rx-pool\" parameter\n");
@@ -2038,7 +2045,6 @@ static int netcp_probe(struct platform_device *pdev)
struct device_node *child, *interfaces;
struct netcp_device *netcp_device;
struct device *dev = &pdev->dev;
- struct netcp_module *module;
int ret;
if (!node) {
@@ -2085,14 +2091,6 @@ static int netcp_probe(struct platform_device *pdev)
/* Add the device instance to the list */
list_add_tail(&netcp_device->device_list, &netcp_devices);
- /* Probe & attach any modules already registered */
- mutex_lock(&netcp_modules_lock);
- for_each_netcp_module(module) {
- ret = netcp_module_probe(netcp_device, module);
- if (ret < 0)
- dev_err(dev, "module(%s) probe failed\n", module->name);
- }
- mutex_unlock(&netcp_modules_lock);
return 0;
probe_quit_interface:
@@ -2150,7 +2148,6 @@ MODULE_DEVICE_TABLE(of, of_match);
static struct platform_driver netcp_driver = {
.driver = {
.name = "netcp-1.0",
- .owner = THIS_MODULE,
.of_match_table = of_match,
},
.probe = netcp_probe,
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 1974a8ae764a..4e70e7586a09 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -77,6 +77,7 @@
#define GBENU_ALE_OFFSET 0x1e000
#define GBENU_HOST_PORT_NUM 0
#define GBENU_NUM_ALE_ENTRIES 1024
+#define GBENU_SGMII_MODULE_SIZE 0x100
/* 10G Ethernet SS defines */
#define XGBE_MODULE_NAME "netcp-xgbe"
@@ -149,8 +150,8 @@
#define XGBE_STATS2_MODULE 2
/* s: 0-based slave_port */
-#define SGMII_BASE(s) \
- (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
+#define SGMII_BASE(d, s) \
+ (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
#define GBE_TX_QUEUE 648
#define GBE_TXHOOK_ORDER 0
@@ -295,8 +296,6 @@ struct xgbe_hw_stats {
u32 rx_dma_overruns;
};
-#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
-
struct gbenu_ss_regs {
u32 id_ver;
u32 synce_count; /* NU */
@@ -480,7 +479,6 @@ struct gbenu_hw_stats {
u32 tx_pri7_drop_bcnt;
};
-#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32))
#define GBENU_HW_STATS_REG_MAP_SZ 0x200
struct gbe_ss_regs {
@@ -615,7 +613,6 @@ struct gbe_hw_stats {
u32 rx_dma_overruns;
};
-#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
#define GBE_MAX_HW_STAT_MODS 9
#define GBE_HW_STATS_REG_MAP_SZ 0x100
@@ -646,6 +643,7 @@ struct gbe_priv {
bool enable_ale;
u8 max_num_slaves;
u8 max_num_ports; /* max_num_slaves + 1 */
+ u8 num_stats_mods;
struct netcp_tx_pipe tx_pipe;
int host_port;
@@ -675,6 +673,7 @@ struct gbe_priv {
struct net_device *dummy_ndev;
u64 *hw_stats;
+ u32 *hw_stats_prev;
const struct netcp_ethtool_stat *et_stats;
int num_et_stats;
/* Lock for updating the hwstats */
@@ -874,7 +873,7 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = {
};
/* This is the size of entries in GBENU_STATS_HOST */
-#define GBENU_ET_STATS_HOST_SIZE 33
+#define GBENU_ET_STATS_HOST_SIZE 52
#define GBENU_STATS_HOST(field) \
{ \
@@ -883,8 +882,8 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = {
offsetof(struct gbenu_hw_stats, field) \
}
-/* This is the size of entries in GBENU_STATS_HOST */
-#define GBENU_ET_STATS_PORT_SIZE 46
+/* This is the size of entries in GBENU_STATS_PORT */
+#define GBENU_ET_STATS_PORT_SIZE 65
#define GBENU_STATS_P1(field) \
{ \
@@ -976,7 +975,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_HOST(ale_unknown_mcast_bytes),
GBENU_STATS_HOST(ale_unknown_bcast),
GBENU_STATS_HOST(ale_unknown_bcast_bytes),
+ GBENU_STATS_HOST(ale_pol_match),
+ GBENU_STATS_HOST(ale_pol_match_red),
+ GBENU_STATS_HOST(ale_pol_match_yellow),
GBENU_STATS_HOST(tx_mem_protect_err),
+ GBENU_STATS_HOST(tx_pri0_drop),
+ GBENU_STATS_HOST(tx_pri1_drop),
+ GBENU_STATS_HOST(tx_pri2_drop),
+ GBENU_STATS_HOST(tx_pri3_drop),
+ GBENU_STATS_HOST(tx_pri4_drop),
+ GBENU_STATS_HOST(tx_pri5_drop),
+ GBENU_STATS_HOST(tx_pri6_drop),
+ GBENU_STATS_HOST(tx_pri7_drop),
+ GBENU_STATS_HOST(tx_pri0_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri1_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri2_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri3_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri4_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri5_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri6_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri7_drop_bcnt),
/* GBENU Module 1 */
GBENU_STATS_P1(rx_good_frames),
GBENU_STATS_P1(rx_broadcast_frames),
@@ -1023,7 +1041,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P1(ale_unknown_mcast_bytes),
GBENU_STATS_P1(ale_unknown_bcast),
GBENU_STATS_P1(ale_unknown_bcast_bytes),
+ GBENU_STATS_P1(ale_pol_match),
+ GBENU_STATS_P1(ale_pol_match_red),
+ GBENU_STATS_P1(ale_pol_match_yellow),
GBENU_STATS_P1(tx_mem_protect_err),
+ GBENU_STATS_P1(tx_pri0_drop),
+ GBENU_STATS_P1(tx_pri1_drop),
+ GBENU_STATS_P1(tx_pri2_drop),
+ GBENU_STATS_P1(tx_pri3_drop),
+ GBENU_STATS_P1(tx_pri4_drop),
+ GBENU_STATS_P1(tx_pri5_drop),
+ GBENU_STATS_P1(tx_pri6_drop),
+ GBENU_STATS_P1(tx_pri7_drop),
+ GBENU_STATS_P1(tx_pri0_drop_bcnt),
+ GBENU_STATS_P1(tx_pri1_drop_bcnt),
+ GBENU_STATS_P1(tx_pri2_drop_bcnt),
+ GBENU_STATS_P1(tx_pri3_drop_bcnt),
+ GBENU_STATS_P1(tx_pri4_drop_bcnt),
+ GBENU_STATS_P1(tx_pri5_drop_bcnt),
+ GBENU_STATS_P1(tx_pri6_drop_bcnt),
+ GBENU_STATS_P1(tx_pri7_drop_bcnt),
/* GBENU Module 2 */
GBENU_STATS_P2(rx_good_frames),
GBENU_STATS_P2(rx_broadcast_frames),
@@ -1070,7 +1107,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P2(ale_unknown_mcast_bytes),
GBENU_STATS_P2(ale_unknown_bcast),
GBENU_STATS_P2(ale_unknown_bcast_bytes),
+ GBENU_STATS_P2(ale_pol_match),
+ GBENU_STATS_P2(ale_pol_match_red),
+ GBENU_STATS_P2(ale_pol_match_yellow),
GBENU_STATS_P2(tx_mem_protect_err),
+ GBENU_STATS_P2(tx_pri0_drop),
+ GBENU_STATS_P2(tx_pri1_drop),
+ GBENU_STATS_P2(tx_pri2_drop),
+ GBENU_STATS_P2(tx_pri3_drop),
+ GBENU_STATS_P2(tx_pri4_drop),
+ GBENU_STATS_P2(tx_pri5_drop),
+ GBENU_STATS_P2(tx_pri6_drop),
+ GBENU_STATS_P2(tx_pri7_drop),
+ GBENU_STATS_P2(tx_pri0_drop_bcnt),
+ GBENU_STATS_P2(tx_pri1_drop_bcnt),
+ GBENU_STATS_P2(tx_pri2_drop_bcnt),
+ GBENU_STATS_P2(tx_pri3_drop_bcnt),
+ GBENU_STATS_P2(tx_pri4_drop_bcnt),
+ GBENU_STATS_P2(tx_pri5_drop_bcnt),
+ GBENU_STATS_P2(tx_pri6_drop_bcnt),
+ GBENU_STATS_P2(tx_pri7_drop_bcnt),
/* GBENU Module 3 */
GBENU_STATS_P3(rx_good_frames),
GBENU_STATS_P3(rx_broadcast_frames),
@@ -1117,7 +1173,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P3(ale_unknown_mcast_bytes),
GBENU_STATS_P3(ale_unknown_bcast),
GBENU_STATS_P3(ale_unknown_bcast_bytes),
+ GBENU_STATS_P3(ale_pol_match),
+ GBENU_STATS_P3(ale_pol_match_red),
+ GBENU_STATS_P3(ale_pol_match_yellow),
GBENU_STATS_P3(tx_mem_protect_err),
+ GBENU_STATS_P3(tx_pri0_drop),
+ GBENU_STATS_P3(tx_pri1_drop),
+ GBENU_STATS_P3(tx_pri2_drop),
+ GBENU_STATS_P3(tx_pri3_drop),
+ GBENU_STATS_P3(tx_pri4_drop),
+ GBENU_STATS_P3(tx_pri5_drop),
+ GBENU_STATS_P3(tx_pri6_drop),
+ GBENU_STATS_P3(tx_pri7_drop),
+ GBENU_STATS_P3(tx_pri0_drop_bcnt),
+ GBENU_STATS_P3(tx_pri1_drop_bcnt),
+ GBENU_STATS_P3(tx_pri2_drop_bcnt),
+ GBENU_STATS_P3(tx_pri3_drop_bcnt),
+ GBENU_STATS_P3(tx_pri4_drop_bcnt),
+ GBENU_STATS_P3(tx_pri5_drop_bcnt),
+ GBENU_STATS_P3(tx_pri6_drop_bcnt),
+ GBENU_STATS_P3(tx_pri7_drop_bcnt),
/* GBENU Module 4 */
GBENU_STATS_P4(rx_good_frames),
GBENU_STATS_P4(rx_broadcast_frames),
@@ -1164,7 +1239,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P4(ale_unknown_mcast_bytes),
GBENU_STATS_P4(ale_unknown_bcast),
GBENU_STATS_P4(ale_unknown_bcast_bytes),
+ GBENU_STATS_P4(ale_pol_match),
+ GBENU_STATS_P4(ale_pol_match_red),
+ GBENU_STATS_P4(ale_pol_match_yellow),
GBENU_STATS_P4(tx_mem_protect_err),
+ GBENU_STATS_P4(tx_pri0_drop),
+ GBENU_STATS_P4(tx_pri1_drop),
+ GBENU_STATS_P4(tx_pri2_drop),
+ GBENU_STATS_P4(tx_pri3_drop),
+ GBENU_STATS_P4(tx_pri4_drop),
+ GBENU_STATS_P4(tx_pri5_drop),
+ GBENU_STATS_P4(tx_pri6_drop),
+ GBENU_STATS_P4(tx_pri7_drop),
+ GBENU_STATS_P4(tx_pri0_drop_bcnt),
+ GBENU_STATS_P4(tx_pri1_drop_bcnt),
+ GBENU_STATS_P4(tx_pri2_drop_bcnt),
+ GBENU_STATS_P4(tx_pri3_drop_bcnt),
+ GBENU_STATS_P4(tx_pri4_drop_bcnt),
+ GBENU_STATS_P4(tx_pri5_drop_bcnt),
+ GBENU_STATS_P4(tx_pri6_drop_bcnt),
+ GBENU_STATS_P4(tx_pri7_drop_bcnt),
/* GBENU Module 5 */
GBENU_STATS_P5(rx_good_frames),
GBENU_STATS_P5(rx_broadcast_frames),
@@ -1211,7 +1305,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P5(ale_unknown_mcast_bytes),
GBENU_STATS_P5(ale_unknown_bcast),
GBENU_STATS_P5(ale_unknown_bcast_bytes),
+ GBENU_STATS_P5(ale_pol_match),
+ GBENU_STATS_P5(ale_pol_match_red),
+ GBENU_STATS_P5(ale_pol_match_yellow),
GBENU_STATS_P5(tx_mem_protect_err),
+ GBENU_STATS_P5(tx_pri0_drop),
+ GBENU_STATS_P5(tx_pri1_drop),
+ GBENU_STATS_P5(tx_pri2_drop),
+ GBENU_STATS_P5(tx_pri3_drop),
+ GBENU_STATS_P5(tx_pri4_drop),
+ GBENU_STATS_P5(tx_pri5_drop),
+ GBENU_STATS_P5(tx_pri6_drop),
+ GBENU_STATS_P5(tx_pri7_drop),
+ GBENU_STATS_P5(tx_pri0_drop_bcnt),
+ GBENU_STATS_P5(tx_pri1_drop_bcnt),
+ GBENU_STATS_P5(tx_pri2_drop_bcnt),
+ GBENU_STATS_P5(tx_pri3_drop_bcnt),
+ GBENU_STATS_P5(tx_pri4_drop_bcnt),
+ GBENU_STATS_P5(tx_pri5_drop_bcnt),
+ GBENU_STATS_P5(tx_pri6_drop_bcnt),
+ GBENU_STATS_P5(tx_pri7_drop_bcnt),
/* GBENU Module 6 */
GBENU_STATS_P6(rx_good_frames),
GBENU_STATS_P6(rx_broadcast_frames),
@@ -1258,7 +1371,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P6(ale_unknown_mcast_bytes),
GBENU_STATS_P6(ale_unknown_bcast),
GBENU_STATS_P6(ale_unknown_bcast_bytes),
+ GBENU_STATS_P6(ale_pol_match),
+ GBENU_STATS_P6(ale_pol_match_red),
+ GBENU_STATS_P6(ale_pol_match_yellow),
GBENU_STATS_P6(tx_mem_protect_err),
+ GBENU_STATS_P6(tx_pri0_drop),
+ GBENU_STATS_P6(tx_pri1_drop),
+ GBENU_STATS_P6(tx_pri2_drop),
+ GBENU_STATS_P6(tx_pri3_drop),
+ GBENU_STATS_P6(tx_pri4_drop),
+ GBENU_STATS_P6(tx_pri5_drop),
+ GBENU_STATS_P6(tx_pri6_drop),
+ GBENU_STATS_P6(tx_pri7_drop),
+ GBENU_STATS_P6(tx_pri0_drop_bcnt),
+ GBENU_STATS_P6(tx_pri1_drop_bcnt),
+ GBENU_STATS_P6(tx_pri2_drop_bcnt),
+ GBENU_STATS_P6(tx_pri3_drop_bcnt),
+ GBENU_STATS_P6(tx_pri4_drop_bcnt),
+ GBENU_STATS_P6(tx_pri5_drop_bcnt),
+ GBENU_STATS_P6(tx_pri6_drop_bcnt),
+ GBENU_STATS_P6(tx_pri7_drop_bcnt),
/* GBENU Module 7 */
GBENU_STATS_P7(rx_good_frames),
GBENU_STATS_P7(rx_broadcast_frames),
@@ -1305,7 +1437,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P7(ale_unknown_mcast_bytes),
GBENU_STATS_P7(ale_unknown_bcast),
GBENU_STATS_P7(ale_unknown_bcast_bytes),
+ GBENU_STATS_P7(ale_pol_match),
+ GBENU_STATS_P7(ale_pol_match_red),
+ GBENU_STATS_P7(ale_pol_match_yellow),
GBENU_STATS_P7(tx_mem_protect_err),
+ GBENU_STATS_P7(tx_pri0_drop),
+ GBENU_STATS_P7(tx_pri1_drop),
+ GBENU_STATS_P7(tx_pri2_drop),
+ GBENU_STATS_P7(tx_pri3_drop),
+ GBENU_STATS_P7(tx_pri4_drop),
+ GBENU_STATS_P7(tx_pri5_drop),
+ GBENU_STATS_P7(tx_pri6_drop),
+ GBENU_STATS_P7(tx_pri7_drop),
+ GBENU_STATS_P7(tx_pri0_drop_bcnt),
+ GBENU_STATS_P7(tx_pri1_drop_bcnt),
+ GBENU_STATS_P7(tx_pri2_drop_bcnt),
+ GBENU_STATS_P7(tx_pri3_drop_bcnt),
+ GBENU_STATS_P7(tx_pri4_drop_bcnt),
+ GBENU_STATS_P7(tx_pri5_drop_bcnt),
+ GBENU_STATS_P7(tx_pri6_drop_bcnt),
+ GBENU_STATS_P7(tx_pri7_drop_bcnt),
/* GBENU Module 8 */
GBENU_STATS_P8(rx_good_frames),
GBENU_STATS_P8(rx_broadcast_frames),
@@ -1352,7 +1503,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
GBENU_STATS_P8(ale_unknown_mcast_bytes),
GBENU_STATS_P8(ale_unknown_bcast),
GBENU_STATS_P8(ale_unknown_bcast_bytes),
+ GBENU_STATS_P8(ale_pol_match),
+ GBENU_STATS_P8(ale_pol_match_red),
+ GBENU_STATS_P8(ale_pol_match_yellow),
GBENU_STATS_P8(tx_mem_protect_err),
+ GBENU_STATS_P8(tx_pri0_drop),
+ GBENU_STATS_P8(tx_pri1_drop),
+ GBENU_STATS_P8(tx_pri2_drop),
+ GBENU_STATS_P8(tx_pri3_drop),
+ GBENU_STATS_P8(tx_pri4_drop),
+ GBENU_STATS_P8(tx_pri5_drop),
+ GBENU_STATS_P8(tx_pri6_drop),
+ GBENU_STATS_P8(tx_pri7_drop),
+ GBENU_STATS_P8(tx_pri0_drop_bcnt),
+ GBENU_STATS_P8(tx_pri1_drop_bcnt),
+ GBENU_STATS_P8(tx_pri2_drop_bcnt),
+ GBENU_STATS_P8(tx_pri3_drop_bcnt),
+ GBENU_STATS_P8(tx_pri4_drop_bcnt),
+ GBENU_STATS_P8(tx_pri5_drop_bcnt),
+ GBENU_STATS_P8(tx_pri6_drop_bcnt),
+ GBENU_STATS_P8(tx_pri7_drop_bcnt),
};
#define XGBE_STATS0_INFO(field) \
@@ -1554,70 +1724,97 @@ static int keystone_get_sset_count(struct net_device *ndev, int stringset)
}
}
-static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
+{
+ void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
+ u32 __iomem *p_stats_entry;
+ int i;
+
+ for (i = 0; i < gbe_dev->num_et_stats; i++) {
+ if (gbe_dev->et_stats[i].type == stats_mod) {
+ p_stats_entry = base + gbe_dev->et_stats[i].offset;
+ gbe_dev->hw_stats[i] = 0;
+ gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
+ }
+ }
+}
+
+static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
+ int et_stats_entry)
{
void __iomem *base = NULL;
- u32 __iomem *p;
- u32 tmp = 0;
+ u32 __iomem *p_stats_entry;
+ u32 curr, delta;
+
+ /* The hw_stats_regs pointers are already
+ * properly set to point to the right base:
+ */
+ base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
+ p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
+ curr = readl(p_stats_entry);
+ delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
+ gbe_dev->hw_stats_prev[et_stats_entry] = curr;
+ gbe_dev->hw_stats[et_stats_entry] += delta;
+}
+
+static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+{
int i;
for (i = 0; i < gbe_dev->num_et_stats; i++) {
- base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
- p = base + gbe_dev->et_stats[i].offset;
- tmp = readl(p);
- gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
+ gbe_update_hw_stats_entry(gbe_dev, i);
+
if (data)
data[i] = gbe_dev->hw_stats[i];
- /* write-to-decrement:
- * new register value = old register value - write value
- */
- writel(tmp, p);
}
}
-static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
+static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
+ int stats_mod)
{
- void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
- void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
- u64 *hw_stats = &gbe_dev->hw_stats[0];
- void __iomem *base = NULL;
- u32 __iomem *p;
- u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
- int i, j, pair;
+ u32 val;
- for (pair = 0; pair < 2; pair++) {
- val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+ val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
- if (pair == 0)
- val &= ~GBE_STATS_CD_SEL;
- else
- val |= GBE_STATS_CD_SEL;
+ switch (stats_mod) {
+ case GBE_STATSA_MODULE:
+ case GBE_STATSB_MODULE:
+ val &= ~GBE_STATS_CD_SEL;
+ break;
+ case GBE_STATSC_MODULE:
+ case GBE_STATSD_MODULE:
+ val |= GBE_STATS_CD_SEL;
+ break;
+ default:
+ return;
+ }
- /* make the stat modules visible */
- writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+ /* make the stat module visible */
+ writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+}
- for (i = 0; i < pair_size; i++) {
- j = pair * pair_size + i;
- switch (gbe_dev->et_stats[j].type) {
- case GBE_STATSA_MODULE:
- case GBE_STATSC_MODULE:
- base = gbe_statsa;
- break;
- case GBE_STATSB_MODULE:
- case GBE_STATSD_MODULE:
- base = gbe_statsb;
- break;
- }
+static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
+{
+ gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
+ gbe_reset_mod_stats(gbe_dev, stats_mod);
+}
+
+static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
+{
+ u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
+ int et_entry, j, pair;
+
+ for (pair = 0; pair < 2; pair++) {
+ gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
+ GBE_STATSC_MODULE :
+ GBE_STATSA_MODULE));
+
+ for (j = 0; j < half_num_et_stats; j++) {
+ et_entry = pair * half_num_et_stats + j;
+ gbe_update_hw_stats_entry(gbe_dev, et_entry);
- p = base + gbe_dev->et_stats[j].offset;
- tmp = readl(p);
- hw_stats[j] += tmp;
if (data)
- data[j] = hw_stats[j];
- /* write-to-decrement:
- * new register value = old register value - write value
- */
- writel(tmp, p);
+ data[et_entry] = gbe_dev->hw_stats[et_entry];
}
}
}
@@ -1801,13 +1998,8 @@ static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
return;
if (!SLAVE_LINK_IS_XGMII(slave)) {
- if (gbe_dev->ss_version == GBE_SS_VERSION_14)
- sgmii_link_state =
- netcp_sgmii_get_port_link(SGMII_BASE(sp), sp);
- else
- sgmii_link_state =
- netcp_sgmii_get_port_link(
- gbe_dev->sgmii_port_regs, sp);
+ sgmii_link_state =
+ netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
}
phy_link_state = gbe_phy_link_status(slave);
@@ -1904,17 +2096,11 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
static void gbe_sgmii_rtreset(struct gbe_priv *priv,
struct gbe_slave *slave, bool set)
{
- void __iomem *sgmii_port_regs;
-
if (SLAVE_LINK_IS_XGMII(slave))
return;
- if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
- sgmii_port_regs = priv->sgmii_port34_regs;
- else
- sgmii_port_regs = priv->sgmii_port_regs;
-
- netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
+ netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
+ slave->slave_num, set);
}
static void gbe_slave_stop(struct gbe_intf *intf)
@@ -1940,17 +2126,12 @@ static void gbe_slave_stop(struct gbe_intf *intf)
static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
{
- void __iomem *sgmii_port_regs;
-
- sgmii_port_regs = priv->sgmii_port_regs;
- if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
- sgmii_port_regs = priv->sgmii_port34_regs;
+ if (SLAVE_LINK_IS_XGMII(slave))
+ return;
- if (!SLAVE_LINK_IS_XGMII(slave)) {
- netcp_sgmii_reset(sgmii_port_regs, slave->slave_num);
- netcp_sgmii_config(sgmii_port_regs, slave->slave_num,
- slave->link_interface);
- }
+ netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
+ netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
+ slave->link_interface);
}
static int gbe_slave_open(struct gbe_intf *gbe_intf)
@@ -2207,14 +2388,15 @@ static void netcp_ethss_timer(unsigned long arg)
netcp_ethss_update_link_state(gbe_dev, slave, NULL);
}
- spin_lock_bh(&gbe_dev->hw_stats_lock);
+ /* A timer runs as a BH, no need to block them */
+ spin_lock(&gbe_dev->hw_stats_lock);
if (gbe_dev->ss_version == GBE_SS_VERSION_14)
gbe_update_stats_ver14(gbe_dev, NULL);
else
gbe_update_stats(gbe_dev, NULL);
- spin_unlock_bh(&gbe_dev->hw_stats_lock);
+ spin_unlock(&gbe_dev->hw_stats_lock);
gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
add_timer(&gbe_dev->timer);
@@ -2455,8 +2637,10 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev,
mac_phy_link = true;
slave->open = true;
- if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
+ if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
+ of_node_put(port);
break;
+ }
}
/* of_phy_connect() is needed only for MAC-PHY interface */
@@ -2571,15 +2755,28 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
}
gbe_dev->xgbe_serdes_regs = regs;
+ gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+ gbe_dev->et_stats = xgbe10_et_stats;
+ gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
+
gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
- XGBE10_NUM_STAT_ENTRIES *
- (gbe_dev->max_num_ports) * sizeof(u64),
- GFP_KERNEL);
+ gbe_dev->num_et_stats * sizeof(u64),
+ GFP_KERNEL);
if (!gbe_dev->hw_stats) {
dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
return -ENOMEM;
}
+ gbe_dev->hw_stats_prev =
+ devm_kzalloc(gbe_dev->dev,
+ gbe_dev->num_et_stats * sizeof(u32),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats_prev) {
+ dev_err(gbe_dev->dev,
+ "hw_stats_prev memory allocation failed\n");
+ return -ENOMEM;
+ }
+
gbe_dev->ss_version = XGBE_SS_VERSION_10;
gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
XGBE10_SGMII_MODULE_OFFSET;
@@ -2593,8 +2790,6 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
- gbe_dev->et_stats = xgbe10_et_stats;
- gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
/* Subsystem registers */
@@ -2679,30 +2874,45 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
}
gbe_dev->switch_regs = regs;
+ gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
+ gbe_dev->et_stats = gbe13_et_stats;
+ gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
+
gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
- GBE13_NUM_HW_STAT_ENTRIES *
- gbe_dev->max_num_slaves * sizeof(u64),
- GFP_KERNEL);
+ gbe_dev->num_et_stats * sizeof(u64),
+ GFP_KERNEL);
if (!gbe_dev->hw_stats) {
dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
return -ENOMEM;
}
+ gbe_dev->hw_stats_prev =
+ devm_kzalloc(gbe_dev->dev,
+ gbe_dev->num_et_stats * sizeof(u32),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats_prev) {
+ dev_err(gbe_dev->dev,
+ "hw_stats_prev memory allocation failed\n");
+ return -ENOMEM;
+ }
+
gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
+ /* K2HK has only 2 hw stats modules visible at a time, so
+ * module 0 & 2 points to one base and
+ * module 1 & 3 points to the other base
+ */
for (i = 0; i < gbe_dev->max_num_slaves; i++) {
gbe_dev->hw_stats_regs[i] =
gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
- (GBE_HW_STATS_REG_MAP_SZ * i);
+ (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
}
gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = GBE13_HOST_PORT_NUM;
gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
- gbe_dev->et_stats = gbe13_et_stats;
- gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
/* Subsystem registers */
@@ -2729,15 +2939,34 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
void __iomem *regs;
int i, ret;
+ gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+ gbe_dev->et_stats = gbenu_et_stats;
+
+ if (IS_SS_ID_NU(gbe_dev))
+ gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+ (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
+ else
+ gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+ GBENU_ET_STATS_PORT_SIZE;
+
gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
- GBENU_NUM_HW_STAT_ENTRIES *
- (gbe_dev->max_num_ports) * sizeof(u64),
- GFP_KERNEL);
+ gbe_dev->num_et_stats * sizeof(u64),
+ GFP_KERNEL);
if (!gbe_dev->hw_stats) {
dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
return -ENOMEM;
}
+ gbe_dev->hw_stats_prev =
+ devm_kzalloc(gbe_dev->dev,
+ gbe_dev->num_et_stats * sizeof(u32),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats_prev) {
+ dev_err(gbe_dev->dev,
+ "hw_stats_prev memory allocation failed\n");
+ return -ENOMEM;
+ }
+
ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
if (ret) {
dev_err(gbe_dev->dev,
@@ -2755,6 +2984,14 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
gbe_dev->switch_regs = regs;
gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
+
+ /* Although sgmii modules are mem mapped to one contiguous
+ * region on GBENU devices, setting sgmii_port34_regs allows
+ * consistent code when accessing sgmii api
+ */
+ gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
+ (2 * GBENU_SGMII_MODULE_SIZE);
+
gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
for (i = 0; i < (gbe_dev->max_num_ports); i++)
@@ -2765,16 +3002,8 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = GBENU_HOST_PORT_NUM;
gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
- gbe_dev->et_stats = gbenu_et_stats;
gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
- if (IS_SS_ID_NU(gbe_dev))
- gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
- (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
- else
- gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
- GBENU_ET_STATS_PORT_SIZE;
-
/* Subsystem registers */
GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
@@ -2804,7 +3033,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
struct cpsw_ale_params ale_params;
struct gbe_priv *gbe_dev;
u32 slave_num;
- int ret = 0;
+ int i, ret = 0;
if (!node) {
dev_err(dev, "device tree info unavailable\n");
@@ -2910,8 +3139,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
continue;
}
gbe_dev->num_slaves++;
- if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
+ if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
+ of_node_put(interface);
break;
+ }
}
of_node_put(interfaces);
@@ -2951,6 +3182,15 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
/* initialize host port */
gbe_init_host_port(gbe_dev);
+ spin_lock_bh(&gbe_dev->hw_stats_lock);
+ for (i = 0; i < gbe_dev->num_stats_mods; i++) {
+ if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+ gbe_reset_mod_stats_ver14(gbe_dev, i);
+ else
+ gbe_reset_mod_stats(gbe_dev, i);
+ }
+ spin_unlock_bh(&gbe_dev->hw_stats_lock);
+
init_timer(&gbe_dev->timer);
gbe_dev->timer.data = (unsigned long)gbe_dev;
gbe_dev->timer.function = netcp_ethss_timer;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 691ec936e88d..a274cd49afe9 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -791,7 +791,6 @@ static void tlan_get_drvinfo(struct net_device *dev,
sizeof(info->bus_info));
else
strlcpy(info->bus_info, "EISA", sizeof(info->bus_info));
- info->eedump_len = TLAN_EEPROM_SIZE;
}
static int tlan_get_eeprom_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index a3f7610002aa..0a15acc075b3 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -40,6 +40,7 @@
#include <linux/tcp.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
+#include <linux/tick.h>
#include <asm/checksum.h>
#include <asm/homecache.h>
@@ -2273,7 +2274,8 @@ static int __init tile_net_init_module(void)
tile_net_dev_init(name, mac);
if (!network_cpus_init())
- network_cpus_map = *cpu_online_mask;
+ cpumask_and(&network_cpus_map, housekeeping_cpumask(),
+ cpu_online_mask);
return 0;
}
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index 2f1264b882b9..d3d094742a7e 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_VIA
config VIA_RHINE
tristate "VIA Rhine support"
- depends on (PCI || OF_IRQ)
+ depends on PCI || (OF_IRQ && GENERIC_PCI_IOMAP)
depends on HAS_DMA
select CRC32
select MII
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index a83263743665..2b7550c43f78 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2134,10 +2134,11 @@ static int rhine_rx(struct net_device *dev, int limit)
}
skb_put(skb, pkt_len);
- skb->protocol = eth_type_trans(skb, dev);
rhine_rx_vlan_tag(skb, desc, data_size);
+ skb->protocol = eth_type_trans(skb, dev);
+
netif_receive_skb(skb);
u64_stats_update_begin(&rp->rx_stats.syncp);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index 8cf9d4f56bb2..415de1eaf641 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -59,16 +59,15 @@ static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
int temac_mdio_setup(struct temac_local *lp, struct device_node *np)
{
struct mii_bus *bus;
- const u32 *bus_hz;
+ u32 bus_hz;
int clk_div;
- int rc, size;
+ int rc;
struct resource res;
/* Calculate a reasonable divisor for the clock rate */
clk_div = 0x3f; /* worst-case default setting */
- bus_hz = of_get_property(np, "clock-frequency", &size);
- if (bus_hz && size >= sizeof(*bus_hz)) {
- clk_div = (*bus_hz) / (2500 * 1000 * 2) - 1;
+ if (of_property_read_u32(np, "clock-frequency", &bus_hz) == 0) {
+ clk_div = bus_hz / (2500 * 1000 * 2) - 1;
if (clk_div < 1)
clk_div = 1;
if (clk_div > 0x3f)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index d95f9aae95e7..4684644703cc 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1135,7 +1135,6 @@ static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
{
strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
- ed->regdump_len = sizeof(u32) * AXIENET_REGS_N;
}
/**
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 2a5a16834c01..507bbb0355c2 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -129,7 +129,6 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
{
int ret;
u32 clk_div, host_clock;
- u32 *property_p;
struct mii_bus *bus;
struct resource res;
struct device_node *np1;
@@ -168,8 +167,7 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
clk_div = DEFAULT_CLOCK_DIVISOR;
goto issue;
}
- property_p = (u32 *) of_get_property(np1, "clock-frequency", NULL);
- if (!property_p) {
+ if (of_property_read_u32(np1, "clock-frequency", &host_clock)) {
netdev_warn(lp->ndev, "clock-frequency property not found.\n");
netdev_warn(lp->ndev,
"Setting MDIO clock divisor to default %d\n",
@@ -179,7 +177,6 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
goto issue;
}
- host_clock = be32_to_cpup(property_p);
clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
/* If there is any remainder from the division of
* fHOST / (MAX_MDIO_FREQ * 2), then we need to add
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 6008eee01a33..cf468c87ce57 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -828,6 +828,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
if (!phydev)
dev_info(dev,
"MDIO of the phy is not registered yet\n");
+ else
+ put_device(&phydev->dev);
return 0;
}